code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import shutil
import copy
import glob
import tqdm
import random
import pickle
from collections import defaultdict
import requests
import PIL
import sklearn
import sklearn.svm
import sklearn.model_selection
from sklearn.model_selection import train_test_split
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# # Different Models and Layers
# +
batch_size = 8
num_epochs = 5
feature_extract = True
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes=1000, feature_extract=True, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
input_size = 224
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
# +
densenet, input_size = initialize_model("densenet")
densenet.eval()
densenet_bottom = copy.deepcopy(densenet)
densenet_top = densenet.classifier
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
densenet_bottom.classifier = Identity()
# +
squeezenet, input_size = initialize_model("squeezenet")
squeezenet.eval()
squeezenet_bottom_ = copy.deepcopy(squeezenet)
squeezenet_top = squeezenet.classifier
squeezenet_bottom_.classifier = Identity()
def squeezenet_bottom(t):
return squeezenet_bottom_(t).view((-1, 512, 13, 13))
# +
resnet, input_size = initialize_model("resnet")
resnet.eval()
resnet_bottom = copy.deepcopy(resnet)
resnet_top = resnet.fc
resnet_bottom.fc = Identity()
# +
vggnet, input_size = initialize_model("vgg")
vggnet.eval()
vggnet_bottom = copy.deepcopy(vggnet)
vggnet_top = vggnet.classifier
vggnet_bottom.classifier = Identity()
# +
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# -
img = PIL.Image.open("C:\\Users\\islam\\downloads\\lion.jpg").convert("RGB")
tensor = data_transforms["val"](img).unsqueeze(0)
squeezenet_bottom(tensor).shape
print(np.allclose(densenet(tensor), densenet_top(densenet_bottom(tensor))))
print(np.allclose(resnet(tensor), resnet_top(resnet_bottom(tensor))))
print(np.allclose(vggnet(tensor), vggnet_top(vggnet_bottom(tensor))))
print(np.allclose(squeezenet(tensor).squeeze(), squeezenet_top(squeezenet_bottom(tensor)).squeeze()))
# +
models = [densenet, resnet, vggnet, squeezenet]
tops = [densenet_top, resnet_top, vggnet_top, squeezenet_top]
bottoms = [densenet_bottom, resnet_bottom, vggnet_bottom, squeezenet_bottom]
names = ['densenet', 'resnet', 'vgg', 'squeezenet']
for model, top, bottom, name in zip(models, tops, bottoms, names):
print(name)
concept_dict = {}
concept_dir = "concept_data/"
transform = data_transforms['val']
# iterate through each folder
for concept in tqdm.tqdm(os.listdir(concept_dir)):
if concept in concept_dict:
continue
all_embeddings = []
all_labels = []
image_dataset = datasets.ImageFolder(os.path.join(concept_dir,concept), transform)
dataloaders = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
# load all of the images, get the embeddings
for inputs, labels in dataloaders:
embeddings = bottom(inputs)
all_embeddings.extend(embeddings.detach().numpy())
all_labels.extend(labels.detach().numpy())
# train an svm on the pos, neg
X_train, X_test, y_train, y_test = train_test_split(all_embeddings, all_labels)
svm = sklearn.svm.SVC(kernel="linear", C=0.001, probability=True)
svm.fit(X_train, y_train)
train_acc = svm.score(X_train, y_train)
test_acc = svm.score(X_test, y_test)
# print test accuaracy
print(train_acc, test_acc)
# store svm coefs in dictionary
concept_dict[concept] = (svm.coef_, train_acc, test_acc)
pickle.dump(concept_dict, open('{}_concepts_170.pkl'.format(name), 'wb'))
# -
# # Layer Accuracies
dense_concept_dict = copy.deepcopy(concept_dict)
# +
layers = [0, 3, 7, 9, 10]
concepts = ['bed', 'stripes', 'dog', 'flag', 'cow']
scores = []
for layer_idx in layers:
concept_dict = pickle.load(open('sq_layer_{}_concepts_170.pkl'.format(layer_idx), 'rb'))
score = [concept_dict[c][2] for c in concepts]
scores.append(score)
concept_scores = {'Layer ' + str(layer):scores[l] for l, layer in enumerate(layers)}
# +
import pandas
import matplotlib.pyplot as plt
import numpy as np
df = pandas.DataFrame(dict(graph=concepts, **concept_scores))
ind = np.arange(len(df))
width = 0.15
fig, ax = plt.subplots(figsize=[5.5, 9])
for l, layer in enumerate(layers):
ax.barh(ind + l*width, df['Layer ' + str(layer)], width, label='Layer ' + str(layer + 1))
ax.set(yticks=ind + 2*width, yticklabels=df.graph, ylim=[2*width - 1, len(df)])
ax.legend(prop={'family':'Arial', 'size':16})
ax.set_xlim([0.5, 1])
plt.xticks(fontname='Arial', fontsize=16)
plt.xlabel('Validation Accuracy', fontname='Arial', fontsize=18)
plt.yticks(fontname='Arial', fontsize=18)
# -
# # Set up the Model
# +
model_name = "squeezenet"
batch_size = 8
num_epochs = 5
feature_extract = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(use_pretrained=True):
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
input_size = 224
return model_ft, input_size
model_ft, input_size = initialize_model()
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# +
class SqueezenetBottom(nn.Module):
def __init__(self, original_model):
super(SqueezenetBottom, self).__init__()
self.features = nn.Sequential(*list(list(original_model.children())[0].children())[:15], nn.Flatten())
def forward(self, x):
x = self.features(x)
return x
class SqueezenetTop(nn.Module):
def __init__(self, original_model):
super(SqueezenetTop, self).__init__()
self.features = nn.Sequential(*list(original_model.children())[1])
def forward(self, x):
x = x.view((-1, 512, 13, 13))
x = self.features(x)
x = x.view((-1, 1000))
x = nn.Softmax(dim=-1)(x)
return x
def get_model_parts(model):
model_bottom = SqueezenetBottom(model)
model_top = SqueezenetTop(model)
return model_bottom, model_top
model_bottom, model_top = get_model_parts(model_ft)
model_bottom.eval()
model_top.eval();
# -
# # Evaluation Methods
response = requests.get("https://git.io/JJkYN")
class_labels = response.text.split("\n")
def get_concept_scores(tensor, label):
correct_idx = class_labels.index(label)
concept_scores = {}
embedding = model_bottom(tensor)
original_preds = model_top(embedding).detach().numpy().squeeze()
multiplier = 100000
for key in concept_dict:
coef, _, _ = concept_dict[key]
plus = torch.from_numpy(embedding.detach().numpy() + multiplier*coef).float()
plus_preds = model_top(plus)
plus_diff = plus_preds.squeeze()[correct_idx] - original_preds.squeeze()[correct_idx]
concept_scores[key] = float(plus_diff)
concept_scores_list = sorted(concept_scores, key=concept_scores.get, reverse=True)
return original_preds[correct_idx], concept_scores, concept_scores_list
img = PIL.Image.open("C:\\Users\\islam\\downloads\\zebra-fig1a.jpg").convert("RGB")
tensor = data_transforms(img).unsqueeze(0)
# +
concept_dict = pickle.load(open('sq_layer_{}_concepts_170.pkl'.format(10), 'rb'))
_, scores1, _ = get_concept_scores(tensor, 'zebra')
concept_dict = pickle.load(open('sq_layer_{}_concepts_170.pkl'.format(7), 'rb'))
_, scores2, _ = get_concept_scores(tensor, 'zebra')
# +
l1 = []
l2 = []
for c in scores1:
l1.append(scores1[c])
l2.append(scores2[c])
from sklearn.metrics import r2_score
l1 = np.array(l1)
l1 = l1 / np.max(np.abs(l1))
l2 = np.array(l2)
l2 = l2 / np.max(np.abs(l2))
print(r2_score(l1, l2))
# -
from scipy.stats import pearsonr
# +
import seaborn as sns
plt.figure(figsize=[6, 5])
sns.regplot(x=l1, y=l2)
# plt.plot(l1, l2, '.')
plt.title('CES from Layers 8 and 11: $R=$ ' + str(round(pearsonr(l1, l2)[0], 3)), fontname='Arial', fontsize=16)
plt.xticks(fontname='Arial', fontsize=14)
plt.xlabel('Layer 11', fontname='Arial', fontsize=16)
plt.ylabel('Layer 8', fontname='Arial', fontsize=16)
plt.yticks(fontname='Arial', fontsize=14);
# +
concept_dict = pickle.load(open('sq_layer_{}_concepts_170.pkl'.format(10), 'rb'))
_, scores1, _ = get_concept_scores(tensor, 'zebra')
concept_dict = pickle.load(open('sq_layer_{}_concepts_170.pkl'.format(0), 'rb'))
_, scores2, _ = get_concept_scores(tensor, 'zebra')
# +
l1 = []
l2 = []
for c in scores1:
l1.append(scores1[c])
l2.append(scores2[c])
from sklearn.metrics import r2_score
l1 = np.array(l1)
l1 = l1 / np.max(np.abs(l1))
l2 = np.array(l2)
l2 = l2 / np.max(np.abs(l2))
print(r2_score(l1, l2))
# +
import seaborn as sns
plt.figure(figsize=[6, 5])
sns.regplot(x=l1, y=l2)
# plt.plot(l1, l2, '.')
plt.title('CES from Layers 1 and 11: $R=$ ' + str(round(pearsonr(l1, l2)[0], 3)), fontname='Arial', fontsize=16)
plt.xticks(fontname='Arial', fontsize=14)
plt.xlabel('Layer 11', fontname='Arial', fontsize=16)
plt.ylabel('Layer 1', fontname='Arial', fontsize=16)
plt.yticks(fontname='Arial', fontsize=14);
# -
concept_dict = pickle.load(open('sq_concepts_170.pkl', 'rb'))
def get_concept_scores_with_delta(tensor, label, multiplier):
correct_idx = class_labels.index(label)
concept_scores = {}
embedding = model_bottom(tensor)
original_preds = model_top(embedding).detach().numpy().squeeze()
for key in concept_dict:
coef, _, _ = concept_dict[key]
plus = torch.from_numpy(embedding.detach().numpy() + multiplier*coef).float()
plus_preds = model_top(plus)
plus_diff = plus_preds.squeeze()[correct_idx] - original_preds.squeeze()[correct_idx]
concept_scores[key] = float(plus_diff)
m = np.max(np.abs(list(concept_scores.values())))
for c in concept_scores:
concept_scores[c] = concept_scores[c] / m
concept_scores_list = sorted(concept_scores, key=concept_scores.get, reverse=True)
return original_preds[correct_idx], concept_scores, concept_scores_list
# +
all_scores = []
deltas = [1, 10, 100, 1000, 10000, 100000]
for delta in deltas:
_, scores, lst = get_concept_scores_with_delta(tensor, 'zebra', delta)
all_scores.append(scores)
# -
top_concepts = lst[:5]
# +
plt.figure(figsize=[9, 4])
for c in top_concepts:
if c=="stripes":
plt.semilogx(deltas, [1 for i in range(len(all_scores))], '-o', label=c)
else:
plt.semilogx(deltas, [all_scores[i][c] for i in range(len(all_scores))], '-o', label=c)
plt.legend(prop={'family':'Arial', 'size':16})
plt.xticks(fontname='Arial', fontsize=14)
plt.xlabel('Step size ($\delta$)', fontname='Arial', fontsize=16)
plt.ylabel('CES', fontname='Arial', fontsize=16)
plt.yticks(fontname='Arial', fontsize=14);
| Appendix A4. Other Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reading isotherms
#
# The first thing to do is to read previously created isotherms. Example data can
# be found in the
# [data](https://github.com/pauliacomi/pyGAPS/tree/master/docs/examples/data)
# directory, saved in the pyGAPS JSON format, which we will now open. First, we'll
# do the necessary top-level imports for the session.
# +
from pathlib import Path
import pygaps.parsing as pgp
json_path = Path.cwd() / 'data'
# -
# Then we'll import the json files, by using the `isotherm_from_json` method which
# reads an isotherm from a file (or a string). There are four folders:
#
# - One containing nitrogen adsorption data at 77 kelvin
# Get the nitrogen data at 77 kelvin
isotherms_n2_77k_paths = Path(json_path / 'characterisation').rglob("*.json")
isotherms_n2_77k = [
pgp.isotherm_from_json(filepath)
for filepath in isotherms_n2_77k_paths
]
print('Selected', len(isotherms_n2_77k), 'isotherms with nitrogen at 77K')
# - Another with room-temperature adsorption of $CO_2$ combined with microcalorimetry
# Get the combined isotherm-calorimetry data
isotherms_calorimetry_paths = Path(json_path / 'calorimetry').rglob("*.json")
isotherms_calorimetry = [
pgp.isotherm_from_json(filepath)
for filepath in isotherms_calorimetry_paths
]
print('Selected', len(isotherms_calorimetry), 'room temperature calorimetry isotherms')
# - Some room-temperature isotherms which we will use for IAST calculations
# Get the isotherms for IAST calculations
isotherms_iast_paths = Path(json_path / 'iast').rglob("*.json")
isotherms_iast = [
pgp.isotherm_from_json(filepath)
for filepath in isotherms_iast_paths
]
print('Selected', len(isotherms_iast), 'isotherms for IAST calculation')
# - Finally a set of isotherms with $C_4H_{10}$ at different temperature, for isosteric enthalpy calculations
# Get the isotherms for isosteric enthalpy calculations
isotherms_isosteric_paths = list(Path(json_path / 'isosteric').rglob("*.json"))
isotherms_isosteric = [
pgp.isotherm_from_json(filepath)
for filepath in isotherms_isosteric_paths
]
print('Selected', len(isotherms_isosteric), 'isotherms for isosteric enthalpy calculation')
| docs/examples/import.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # View Northeast Pacific SST based on an Ensemble Empirical Mode Decomposition
# The oscillation of sea surface temperature (SST) has substantial impacts on the global climate. For example, anomalously high SST near the equator (between 5°S and 5°N and the Peruvian coast) causes the El Niño phenomenon, while low SST in this area brings about the La Niña phenomenon, both of which impose considerable influence on temperature, precipitation and wind globally.
#
# In this notebook, an adaptive and temporal local analysis method, the recently developed ensemble empirical mode decomposition (EEMD) method (Huang and Wu 2008; Wu and Huang 2009) is applied to study the oscillation of SST over Northeast Pacific(40°–50°N, 150°–135°W). The EEMD is the most recent improvement of the EMD method (Huang et al. 1998; Huang and Wu 2008). The package of [PyEMD](https://pyemd.readthedocs.io/en/latest/index.html) is used, which is a Python implementation of Empirical Mode Decomposition (EMD) and its variations. One of the most popular expansion is Ensemble Empirical Mode Decomposition (EEMD), which utilises an ensemble of noise-assisted executions. As a result of EMD one will obtain a set of components that possess oscillatory features. In case of plain EMD algorithm, these are called Intrinsic Mode Functions (IMFs) as they are expected to have a single mode. In contrary, EEMD will unlikely produce pure oscillations as the effects of injected noise can propagate throughout the decomposition.
#
# The SST data is extracted from the lastest version of Extended Reconstructed Sea Surface Temperature (ERSST) dataset, version5. It is a global monthly sea surface temperature dataset derived from the International Comprehensive Ocean–Atmosphere Dataset (ICOADS). Production of the ERSST is on a 2° × 2° grid. For more information see https://www.ncdc.noaa.gov/data-access/marineocean-data/extended-reconstructed-sea-surface-temperature-ersst-v5.
# ## 1. Load all needed libraries
# +
# %matplotlib inline
import xarray as xr
from PyEMD import EEMD
import numpy as np
import pylab as plt
plt.rcParams['figure.figsize'] = (9,5)
# -
# ## 2. Load SST data
# ### 2.1 Load time series SST
#
# Select the region (40°–50°N, 150°–135°W) and the period(1981-2016)
ds = xr.open_dataset('data\sst.mnmean.v5.nc')
sst = ds.sst.sel(lat=slice(50, 40), lon=slice(190, 240), time=slice('1981-01-01','2015-12-31'))
#sst.mean(dim='time').plot()
# ### 2.2 Calculate climatology between 1981-2010
sst_clm = sst.sel(time=slice('1981-01-01','2010-12-31')).groupby('time.month').mean(dim='time')
#sst_clm = sst.groupby('time.month').mean(dim='time')
# ### 2.3 Calculate SSTA
sst_anom = sst.groupby('time.month') - sst_clm
sst_anom_mean = sst_anom.mean(dim=('lon', 'lat'), skipna=True)
# ## 3. Carry out EMD analysis
S = sst_anom_mean.values
t = sst.time.values
# Assign EEMD to `eemd` variable
eemd = EEMD()
# Execute EEMD on S
eIMFs = eemd.eemd(S)
# ## 4. Visualize
#
# ### 4.1 Plot IMFs
# +
nIMFs = eIMFs.shape[0]
plt.figure(figsize=(11,20))
plt.subplot(nIMFs+1, 1, 1)
# plot original data
plt.plot(t, S, 'r')
# plot IMFs
for n in range(nIMFs):
plt.subplot(nIMFs+1, 1, n+2)
plt.plot(t, eIMFs[n], 'g')
plt.ylabel("eIMF %i" %(n+1))
plt.locator_params(axis='y', nbins=5)
plt.xlabel("Time [s]")
# -
# ### 4.2 Error of reconstruction
reconstructed = eIMFs.sum(axis=0)
plt.plot(t, reconstructed-S)
# ### 4.3 Summary
# Using the EEMD method, the original SSTA sequence is decomposed into nine modes, including eight IMFs and the remainder mode, R (IMF9). It is observed that the frequencies of night IMFs decrease successively and the corresponding periods increase. Also, the remainder mode, R, is monotonically increasing, in accordance with the upward trend of the original SSTA data.
#
# Obviously, the modes IMF4–IMF8 are intrinsically regular in time since they have their own characteristic frequencies, while the characteristic frequencies of the first few IMFs, especially IMF1, are indistinct. In other words, the non-periodic and strongly stochastic properties of the original SSTA data are mainly inherited in the first several IMFs, while the regular components in the SSTA data are separated within the other modes, such as IMF4–IMF8 and the remainder mode, R (IMF9).
# ## References
#
# <NAME>zuk (2017-), Python implementation of Empirical Mode Decomposition algorithm. http://www.laszukdawid.com/codes.
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> (2017): NOAA Extended Reconstructed Sea Surface Temperature (ERSST), Version 5. [indicate subset used]. NOAA National Centers for Environmental Information. doi:10.7289/V5T72FNM.
#
# <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, 1998: The empirical mode decomposition and the Hilbert spectrum for nonlinear and nonstationary time series analysis. Proc. Roy. Soc. London, 454A, 903–995.
#
# <NAME>., and <NAME>, 2008: A review on Hilbert-Huang transform: Method and its applications to geophysical studies. Rev. Geophys., 46, RG2006, doi:10.1029/2007RG000228.
#
# <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, 2008: The modulated annual cycle: An alternative reference frame for climate anomalies. Climate Dyn., 31, 823–841.
| ex33-View Northeast Pacifc sea surface temperature based on an ensemble empirical mode decomposition.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # Julia 檔案處理與資料庫連線
#
# ## Day 014 作業1:讀取 Nested Dict 內的資料
#
# Day 010 時我們介紹了字典 (Dict),字典內的資料可以是巢狀 (nested) 的,包含較複雜的資料階層結構。在今天的下載檔 CityCountyData.json 是台灣各縣市鄉鎮區及路名的中英文資料,檔案為 JSON 格式,範例內容如下:
#
# ```json
# {
# "CityName": "臺北市",
# "CityEngName": "Taipei City",
# "AreaList": [
# {
# "ZipCode": "100",
# "AreaName": "中正區",
# "AreaEngName": "Zhongzheng Dist."
# },
# ...
# }
# ```
#
# 作業內容為讀取 JSON 檔案,並列出台北市所有行政區的中英文名稱。範例答案將以 JSON.jl 套件作為示範。
#
# 檔案資料來源:[台灣 縣市,鄉鎮,地址 中英文 JSON](https://github.com/donma/TaiwanAddressCityAreaRoadChineseEnglishJSON)
using JSON
# 列出所有台北市行政區的中英文名稱
arr = JSON.parsefile("../data/CityCountyData.json")
for dist in arr[1]["AreaList"]
println((dist["AreaName"], dist["AreaEngName"]))
end
# ## 作業2:將字典資料存為 JSON 檔
#
# 作業2請產生字典 (Dict) 資料,並將字典資料存為 JSON 格式。請自行產生字典,也可使用下列的字串資料計算字數 (Day 010 作業程式)。
#
# 【提示】可以參考今天範例程式中將陣列資料存為 JSON 格式的部分。
# +
str = "永和有永和路,中和也有永和路,
中和有中和路,永和也有中和路;
中和的中和路有接永和的中和路,
永和的永和路沒接中和的永和路;
永和的中和路有接永和的永和路,
中和的永和路沒接中和的中和路。
永和有中正路,中和也有中正路;
永和的中正路用景平路接中和的中正路。
永和有中山路,中和也有中山路;
永和的中山路直接接上了中和的中山路。
永和的中正路接上了永和的中山路;
中和的中正路卻不接中和的中山路。
中正橋下來不是中正路,但永和有中正路;
秀朗橋下來不是秀朗路,但永和有秀朗路。
永福橋下來不是永福路,永和沒有永福路;
福和橋下來不是福和路,但福和路接的卻是永福橋。
中和中和路永和永和路
永和中和路中和永和路
中和中山路永和中正路
永和中山路中和中正路"
# +
d = Dict{String, Int64}()
for c ∈ str
d[string(c)] = get(d, string(c), 0) + 1
end
# -
d
@doc JSON.json
JSON.json(d)
| homework/julia_014_hw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time Series
from numpy import mean
from numpy import median
from math import sqrt
from sklearn.metrics import mean_squared_error
import pandas as pd
from matplotlib import pyplot
from warnings import catch_warnings
from warnings import filterwarnings
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from multiprocessing import cpu_count
from joblib import Parallel
from joblib import delayed
from numpy import array
import matplotlib.pyplot as plt
import random
# # Methods
#train_test_split([1,2,3,4,5],2)
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test, method, cfg):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time step in the test set
for i in range(len(test)):
# fit model and make forecast for history
if method == "average":
yhat = average_forecast(history, cfg)
elif method == "sarima":
yhat = sarima_forecast(history, cfg)
elif method == "exp":
yhat = exp_smoothing_forecast(history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# estimate prediction error
error = measure_rmse(test, predictions)
return [error,predictions]
def grid_search(data, cfg_list, n_test, method="sarima", parallel=True):
scores = None
if parallel:
# execute configs in parallel
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing')
tasks = (delayed(score_model)(data, n_test, method, cfg) for cfg in cfg_list)
scores = executor(tasks)
else:
scores = [score_model(data, n_test, method, cfg) for cfg in cfg_list]
# remove empty results
scores = [r for r in scores if r[1] != None]
return scores
def score_model(data, n_test, method, cfg, debug=False):
result = None
predictions = None
# convert config to a key
key = str(cfg)
if debug:
result,predictions = walk_forward_validation(data, n_test, method, cfg)
else:
# one failure during model validation suggests an unstable config
try:
# never show warnings when grid searching, too noisy
with catch_warnings():
filterwarnings("ignore")
result,predictions = walk_forward_validation(data, n_test, method, cfg)
except:
error = None
if result is not None:
print(' > Model[%s] %.3f' % (key, result))
return (key, result, predictions)
def find_best_solution(series,name,method="sarima",sample=False):
data = series.values
scores = []
# data split
n_test = int(len(data)*0.2) # percentage used for test
max_length = len(data) - n_test
print("Training size %s " % max_length)
print("Test size %s " % n_test)
# model configs
if method == "sarima":
print("Finding best solution for %s" % method)
cfg_list = sarima_configs()
elif method == "average":
print("Finding best solution for %s" % method)
cfg_list = simple_configs(max_length)
elif method == "exp":
print("Finding best solution for %s" % method)
cfg_list = exp_smoothing_configs()
data = data[:,0]
# sample some
if sample:
cfg_list_rand = random.sample(cfg_list,k=25)
while len(scores)<5:
cfg_list_rand = random.sample(cfg_list,k=25)
scores += grid_search(data, cfg_list_rand, n_test, method)
else:
# grid search
scores = grid_search(data, cfg_list, n_test, method)
# sort configs by error, asc
scores.sort(key=lambda tup: tup[1])
print('done')
# list top 3 configs
for cfg, error, predictions in scores[:3]:
print(cfg, error)
#display
train,test = train_test_split(data,n_test)
prediction = pd.DataFrame(list(train.flatten())+scores[0][2])
ax = pd.DataFrame(data).plot(label="Original") # main data
prediction.plot(ax=ax, alpha=.7, figsize=(14,7))
# Hide grid lines
# ax.grid(False)
# Hide axes ticks
#ax.set_xticks([])
#ax.set_yticks([])
plt.savefig("%s_%s.png" % (name,method),transparent=True)
plt.show()
# # Average Forecast
# +
def simple_configs(max_length, offsets=[1]):
configs = list()
for i in range(1, max_length+1):
for t in ['median',"mean"]:
cfg = [i, t]
configs.append(cfg)
return configs
# one-step average forecast
def average_forecast(history, config):
n, avg_type = config
if avg_type is 'mean':
return mean(history[-n:])
return median(history[-n:])
# -
# # Dummy Example
# Show the algorithm always one more set of the test set and let it predict the next, do so iteratively. Then compare the whole prediction to test using mean squared error.
# +
series = pd.read_csv('daily-total-female-births.csv', header=0, index_col=0)
data = series.values
n_test = 100
max_length = len(data) - n_test
cfg = simple_configs(max_length)[1]
train = data[:-n_test]
test = data[-n_test:]
history = [x for x in train]
predictions = list()
for i in range(len(test)):
yhat = average_forecast(history, cfg)
predictions.append(yhat)
history.append(test[i])
error = sqrt(mean_squared_error(test, predictions))
error
# -
# # SARIMA Forecast
# +
def sarima_forecast(history, config):
order, sorder, trend = config
# define model
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend,
enforce_stationarity=False, enforce_invertibility=False)
# fit model
model_fit = model.fit(disp=False)
# make one step forecast
yhat = model_fit.predict(len(history), len(history))
return yhat[0]
def sarima_configs(seasonal=[0]):
models = list()
# define config lists
p_params = [0, 1, 2]
d_params = [0, 1]
q_params = [0, 1, 2]
t_params = ['n','c','t','ct']
P_params = [0, 1, 2]
D_params = [0, 1]
Q_params = [0, 1, 2]
m_params = seasonal
# create config instances
for p in p_params:
for d in d_params:
for q in q_params:
for t in t_params:
for P in P_params:
for D in D_params:
for Q in Q_params:
for m in m_params:
cfg = [(p,d,q), (P,D,Q,m), t]
models.append(cfg)
return models
# -
# # Exponential Smoothing - <NAME>
# +
def exp_smoothing_forecast(history, config):
t,d,s,p,b,r = config
# define model
history = array(history)
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p)
# fit model
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r)
# make one step forecast
yhat = model_fit.predict(len(history), len(history))
return yhat[0]
def exp_smoothing_configs(seasonal=[None]):
models = list()
# define config lists
t_params = ['add', 'mul', None]
d_params = [True, False]
s_params = ['add', 'mul', None]
p_params = seasonal
b_params = [True, False]
r_params = [True, False]
# create config instances
for t in t_params:
for d in d_params:
for s in s_params:
for p in p_params:
for b in b_params:
for r in r_params:
cfg = [t,d,s,p,b,r]
models.append(cfg)
return models
# -
# # No-Trend: Female Births
series = pd.read_csv('daily-total-female-births.csv', header=0,parse_dates=["Date"],index_col=0)
#import seaborn; seaborn.set()
series.plot(figsize=(16,10))
plt.savefig('female.png',transparent=True)
series = pd.read_csv('daily-total-female-births.csv', header=0, index_col=0)
name = "famale-births"
find_best_solution(series,name,method="average")
find_best_solution(series,name,method="sarima")
find_best_solution(series,name,method="exp")
# # Trend: Shampoo Sales
# +
series = pd.read_csv('shampoo.csv', header=0)
series["Year"] = [1998]*12+[1999]*12+[2000]*12
series["Month"] = list(range(1,13))*3
series["Date"] = series["Month"].apply(str)+"-"+series["Year"].apply(str)
series['date'] = pd.to_datetime(series["Date"])
series = series[["date","Sales"]]
series.set_index(['date'],inplace=True)
import seaborn; seaborn.set()
series.plot(figsize=(16,10))
plt.savefig('shampoo.png')
# -
series = pd.read_csv('shampoo.csv', header=0, index_col=0)
name="shampoo"
find_best_solution(series,name,method="average")
find_best_solution(series,name,method="sarima")
find_best_solution(series,name,method="exp")
# # Seasonality: Temp
series = pd.read_csv('monthly-mean-temp.csv', header=0,parse_dates=["Month"])
series.set_index(['Month'],inplace=True)
import seaborn; seaborn.set()
series.plot(figsize=(16,10))
plt.savefig('temp.png')
series = pd.read_csv('monthly-mean-temp.csv', header=0, index_col=0)
name="temp"
find_best_solution(series,name,method="average")
find_best_solution(series,name,method="sarima")
find_best_solution(series,name,method="exp")
# # Trend and Seasonality: Monthly car sales
series = pd.read_csv('monthly-car-sales.csv', header=0,parse_dates=["Month"])
series.set_index(['Month'],inplace=True)
import seaborn; seaborn.set()
series.plot(figsize=(16,10))
plt.savefig('cars.png')
series = pd.read_csv('monthly-car-sales.csv', header=0, index_col=0)
name="cars"
find_best_solution(series,name,method="average")
find_best_solution(series,name,method="sarima")
find_best_solution(series,name,method="exp")
| Time series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="bIDAUOH-5kMI"
# # Assignment 4
#
# <p style="color:red;"><strong>Show all work, add comments and docstrings!!!</strong></p>
#
# Produce a new class named ``Roster`` that keeps track of the students enrolled in a course. A new instance of the ``Roster`` class must be initialized with the course name and term only, such as:
#
# ```python
# >>> ceuy3013 = Roster('Computing in Civil Engineering', 'Spring 2021')
# ```
#
# No students should be added at initialization, instead, a new student can be added with an ``add_student`` method which takes the name, email, level and current GPA of the student, such as:
#
# ```python
# >>> ceuy3013.add_student('<NAME>', '<EMAIL>', 'Junior', 3.86)
# '<NAME> was added to the roster'
# ```
#
# Notice that the ``add_student`` returns a success message '<NAME> was added to the roster'. In fact, we have a 2.0 threshold for adding students to the roster. Which means that adding a student with a GPA below 2.0 will return a different message:
#
# ```python
# >>> ceuy3013.add_student('<NAME>', '<EMAIL>', 'Junior', 1.65)
# '<NAME> could not be added because their current GPA is below 2.0'
# ```
#
# At which point we can use ``print`` and see all students in our roster.
#
# ```python
# >>> print(ceuy3013)
# "[['<NAME>', '<EMAIL>', 'Junior', 3.86]]"
# ```
#
# And if we add a few more with GPA above 2.0, the print statement will return the following:
#
# ```python
# >>> print(ceuy3013)
# "[['<NAME>', '<EMAIL>', 'Junior', 3.86], ['<NAME>', '<EMAIL>', 'Sophomore', 2.8], ['<NAME>', '<EMAIL>', 'Senior', 3.34], ['<NAME>', '<EMAIL>', 'Sophomore', 2.95]]"
# ```
#
# We should get total enrollment with ``len``:
#
# ```python
# >>> len(ceuy3013)
# 4
# ```
#
# Next, the ``Roster`` class should also have a method that removes a student by their email, ``remove_student`` (bonus points if your ``add_student`` method also makes sure that all emails in the roster are unique). The method should work like this:
#
# ```python
# >>> ceuy3013.remove_student('<EMAIL>')
# '<NAME> was removed from the roster'
# ```
#
# And we can verify by printing the roster again:
#
# ```python
# >>> print(ceuy3013)
# "[['<NAME>', '<EMAIL>', 'Sophomore', 2.8], ['<NAME>', '<EMAIL>', 'Senior', 3.34], ['<NAME>', '<EMAIL>', 'Sophomore', 2.95]]"
# ```
#
# Finally, there needs to be a method, ``avg_gpa``, that returns the average GPA of the enrolled students. Assuming we have the three students shown above, the result should be:
#
# ```python
# >>> ceuy3013.avg_gpa()
# 3.03
# ```
#
# Once again, **show all work!!!**. You do not need any external libraries to complete this assignment. Fill in your code below. Good luck!
#
# ---
# + id="C0KUL1IB5e8B"
class Roster:
# Type in your code here
# -- Sample Data --------------------------------------------------------
ceuy3013 = Roster('Computing in Civil Engineering', 'Spring 2021')
ceuy3013.add_student('<NAME>', '<EMAIL>', 'Junior', 3.86)
ceuy3013.add_student('<NAME>', '<EMAIL>', 'Junior', 1.65)
print(ceuy3013)
ceuy3013.add_student('<NAME>', '<EMAIL>', 'Sophomore', 2.8)
ceuy3013.add_student('<NAME>', '<EMAIL>', 'Senior', 3.34)
ceuy3013.add_student('<NAME>', '<EMAIL>', 'Sophomore', 2.95)
print(ceuy3013)
print(len(ceuy3013))
ceuy3013.remove_student('<EMAIL>')
print(ceuy3013)
print(ceuy3013.avg_gpa())
# + id="dp-7V0StJL1d"
| Homework/Shen_Matthew_Assignment4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Decoding sensor space data with generalization across time and conditions
#
#
# This example runs the analysis described in [1]_. It illustrates how one can
# fit a linear classifier to identify a discriminatory topography at a given time
# instant and subsequently assess whether this linear model can accurately
# predict all of the time samples of a second set of conditions.
#
# References
# ----------
#
# .. [1] King & Dehaene (2014) 'Characterizing the dynamics of mental
# representations: the Temporal Generalization method', Trends In
# Cognitive Sciences, 18(4), 203-210. doi: 10.1016/j.tics.2014.01.002.
#
# +
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import GeneralizingEstimator
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels
raw.filter(1., 30., fir_design='firwin') # Band pass filtering signals
events = mne.read_events(events_fname)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
tmin = -0.050
tmax = 0.400
decim = 2 # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax,
proj=True, picks=picks, baseline=None, preload=True,
reject=dict(mag=5e-12), decim=decim)
# -
# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.
#
#
# +
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=1,
verbose=True)
# Fit classifiers on the epochs where the stimulus was presented to the left.
# Note that the experimental condition y indicates auditory or visual
time_gen.fit(X=epochs['Left'].get_data(),
y=epochs['Left'].events[:, 2] > 2)
# -
# Score on the epochs where the stimulus was presented to the right.
#
#
scores = time_gen.score(X=epochs['Right'].get_data(),
y=epochs['Right'].events[:, 2] > 2)
# Plot
#
#
fig, ax = plt.subplots(1)
im = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower',
extent=epochs.times[[0, -1, 0, -1]])
ax.axhline(0., color='k')
ax.axvline(0., color='k')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Generalization across time and condition')
plt.colorbar(im, ax=ax)
plt.show()
| dev/_downloads/e807ecd348331c3d789aa11b874576a5/plot_decoding_time_generalization_conditions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from TweetSearch import TweetSearch
search_words = ['#rdguk','#Reading2050']
search_terms = ['Traffic','Transport','Mobility','Bicycle','Congestion',
'Parking','\"Private Vehicles\"','\"Rapid Transit\"','Pollution','\"Multi Modal\"',
'Roads','\"Electric Vehicles\"','Bus','\"Smart Cities\"','Car','Road','Walk',
'Pavement','Pedestrian','Electric','\"Traffic lights\"','Motorway']
filename = 'Reading_TweetList.txt'
# -
searcher = TweetSearch(filename,search_words,search_terms)
| Reading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In-Class Coding Lab: Lists
#
# The goals of this lab are to help you understand:
#
# - List indexing and slicing
# - List methods such as insert, append, find, delete
# - How to iterate over lists with loops
#
# ## Python Lists work like Real-Life Lists
#
# In real life, we make lists all the time. To-Do lists. Shopping lists. Reading lists. These lists are collections of items, for example here's my shopping list:
#
# ```
# Milk, Eggs, Bread, Beer
# ```
#
# There are 4 items in this list.
#
# Likewise, we can make a similar list in Python, and count the number of items in the list using the `len()` function:
# + code_cell_type="run_code"
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
item_count = len(shopping_list)
print("List: %s has %d items" % (shopping_list, item_count))
# -
# ## Enumerating the Items in a List
#
# In real-life, we *enumerate* lists all the time. We go through the items on our list one at a time and make a decision, for example: "Did I add that to my shopping cart yet?"
#
# In Python we go through items in our lists with the `for` loop. We use `for` because the number of items is pre-determined and thus a **definite** loop is the appropriate choice.
#
# Here's an example:
# + code_cell_type="run_code"
for item in shopping_list:
print("I need to buy some %s " % (item))
# + code_cell_type="run_code"
# or with f-strings
for item in shopping_list:
print(f"I need to buy some {item}")
# -
# ### 1.1 You Code
#
# Write code in the space below to print each stock on its own line. Use a `for` loop and an f-string to print `You own ` before the name of the stock|
# + code_cell_type="debug_code" label="1.1" solution=["stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']\n", "#TODO: Write code here\n", "for stock in stocks:\n", " print(f\"You own {stock}\")\n"]
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#TODO: Write code here
# -
# ## Indexing Lists
#
# Sometimes we refer to our items by their place in the list. For example "Milk is the first item on the list" or "Beer is the last item on the list."
#
# We can also do this in Python, and it is called *indexing* the list. It works the same as a **string slice.**
#
# **IMPORTANT** The first item in a Python lists starts at index **0**.
# + code_cell_type="run_code"
print("The first item in the list is:", shopping_list[0])
print("The last item in the list is:", shopping_list[3])
print("This is also the last item in the list:", shopping_list[-1])
print("This is the second to last item in the list:", shopping_list[-2])
# -
# ## For Loop with Index
#
# You can also loop through your Python list using an index. In this case we use the `range()` function to determine how many times we should loop, then index the item in the list using the iterator variable from the `for` loop.
# + code_cell_type="run_code"
for i in range(len(shopping_list)):
print("I need to buy some %s " % (shopping_list[i]))
# -
# ### 1.2 You Code
#
# Write code to print the 2nd and 4th stocks in the list variable `stocks`. Print them on the same line:
#
# For example:
#
# `AAPL MSFT`
# + code_cell_type="debug_code" label="1.2" solution=["stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']\n", "#TODO: Write code here\n", "print(stocks[1],stocks[3])\n"]
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#TODO: Write code here
# -
# ## Lists are Mutable
#
# Unlike strings, lists are **mutable**. This means we can change a value in the list.
#
# For example, I want `'Craft Beer'` not just `'Beer'`. I need `Organic Eggs` not `Eggs`.
# + code_cell_type="run_code"
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
print(f"Before: {shopping_list}")
shopping_list[-1] = 'Craft Beer'
shopping_list[1] = 'Organic Eggs'
print(f"After {shopping_list}")
# -
# ## List Methods
#
# In your readings and class lecture, you encountered some list methods. These allow us to maniupulate the list by adding or removing items.
# + code_cell_type="run_code"
def print_shopping_list(mylist):
print(f"My shopping list: {mylist}")
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
print_shopping_list(shopping_list)
print("Adding 'Cheese' to the end of the list...")
shopping_list.append('Cheese') #add to end of list
print_shopping_list(shopping_list)
print("Adding 'Cereal' to position 0 in the list...")
shopping_list.insert(0,'Cereal') # add to the beginning of the list (position 0)
print_shopping_list(shopping_list)
print("Removing 'Cheese' from the list...")
shopping_list.remove('Cheese') # remove 'Cheese' from the list
print_shopping_list(shopping_list)
print("Removing item from position 0 in the list...")
del shopping_list[0] # remove item at position 0
print_shopping_list(shopping_list)
# -
# ### 1.3 You Code: Debug
#
# Debug this program which allows you to manage a list of stocks.
#
#
# This program will loop indefinately. When you enter:
#
# - `A` it will ask you for a stock Symbol to add to the beginning of the list, then print the list.
# - `R` it will ask you for a stock Symbol to remove from the list, then print the list.
# - `Q` it will quit the program.
#
# Example Run:
#
# Enter Command: A, R, Q ?a
# Enter symbol to ADD: appl
# Your Stocks ['APPL']
# Enter Command: A, R, Q ?a
# Enter symbol to ADD: msft
# Your Stocks ['MSFT', 'APPL']
# Enter Command: A, R, Q ?a
# Enter symbol to ADD: amzn
# Your Stocks ['AMZN', 'MSFT', 'APPL']
# Enter Command: A, R, Q ?r
# Enter symbol to REMOVE: msft
# Your Stocks ['AMZN', 'APPL']
# Enter Command: A, R, Q ?q
#
# + code_cell_type="debug_code" label="1.3" solution=["stocks = []\n", "while True:\n", " choice = input(\"Enter Command: A, R, Q ?\").upper()\n", " if choice == 'Q':\n", " break\n", " elif choice == 'A':\n", " stock = input(\"Enter symbol to ADD: \").upper()\n", " stocks.insert(0, stock)\n", " print(f\"Your Stocks {stocks}\")\n", " elif choice == 'R':\n", " stock = input(\"Enter symbol to REMOVE: \").upper()\n", " stocks.remove(stock)\n", " print(f\"Your Stocks {stocks}\")\n", " else:\n", " print(\"Invalid Command!\") \n"]
# TODO: debug this code
stocks = []
while false:
choice = input("Enter Command: A, R, Q ?").upper()
if choice == 'Q':
break
elif choice == 'A':
stock = input("Enter symbol to ADD: ").upper()
stocks.insert(stock,0)
print(f"Your Stocks stocks")
elif choice == 'R':
stock = input("Enter symbol to REMOVE: ").upper()
stoscks.delete(stock)
print("Your Stocks {stocks}")
else:
print("Invalid Command!")
# -
# ## Sorting
#
# Since Lists are mutable. You can use the `sort()` method to re-arrange the items in the list alphabetically (or numerically if it's a list of numbers)
# + code_cell_type="run_code"
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
print("Before Sort:", shopping_list)
shopping_list.sort()
print("After Sort:", shopping_list)
# -
# ## The Magic behind `S.split()` and `S.join(list)`
#
# Now that we know about lists, we can revisit some of the more confusing string methods like `S.split()` and `S.join(list)`
#
# `S.split()` takes a string `S` and splits the string into a `list` of values.
#
# The split is based on the argument. For example, this splits a string `sentence` into a list `words`, splitting on whitespace.
# + code_cell_type="run_code"
sentence = "I like cheese"
words = sentence.split()
print(f"words is a {type(words)} values: {words}")
# -
# To demonstrate it's really a list, let's add a word to the list and then regenerate the sentence with the `S.join(list)` method.
#
#
# `S.join(list)` does the opposite of `split()` joins the `list` back together delimiting each item in the list with `S`.
#
# For example: `"-".join([1,2,3])` outputs: `1-2-3`
#
#
# Here we add `'swiss` to the list of `words` before `join()`ing back into a string `i like swiss cheese`.
#
# + code_cell_type="run_code"
words.insert(2,'swiss')
print(words)
new_sentence = " ".join(words)
print(f"Joined back into a sentence: {new_sentence}")
# -
# ## The Magic behind `file.readlines()`
#
# With an understanding of lists, we can now better understand how `file.readlines()` actually works.
#
# The `file.readlines()` function reads in the entire contents of the file, spliting it into a list of lines. Each item in the list is a line in the file.
# + code_cell_type="run_code"
with open('shopping_list.txt','r') as f:
lines = f.readlines()
print(f"This is a list: {lines}")
# -
# ## List Comprehensions
#
# If you look at the output of the previous example, you see the newline character `\n` at the end of some items in the list. To remove this, we could write a `for` loop to `strip()` the newline and the add it to another list. This is so, common Python has a shortcut way to do it, called a **list comprehension**.
#
# The list comprehension applies a function to each item in the list. It looks like this:
#
# `new_list = [ function for item in current_list ]`
#
# For example, to strip the newline:
# + code_cell_type="run_code"
print(f"Unstripped: {lines}")
# List comprehension
stripped_lines = [ line.strip() for line in lines ]
print(f"Stripped: {stripped_lines}")
# -
# In the above example:
#
# - The current list is `lines`
# - The new list is `stripped_lines` and
# - The function we apply is `strip()` to each `line` in the list of `lines`.
#
# List comprehension are handy when we need to parse and tokenize. With Python, we can do this in 2 lines of code.
#
#
# When you run this example, input exactly this:
#
# `1, 3.4, 5 ,-4`
#
# And marvel how it gets converted into a list of acutal numbers!
# + code_cell_type="run_code"
raw_input = input("Enter a comma-separated list of numbers: ")
raw_list = raw_input.split(',')
number_list = [ float(number) for number in raw_list ]
print(f"Raw Input: {raw_input}")
print(f"Tokenized Input {raw_list}")
print(f"Parsed to Numbers: {number_list}")
# -
# ## Putting it all together
#
# Winning Lotto numbers. When the lotto numbers are drawn, they are in *any* order, when they are presented they're always sorted lowest to highest. Let's write a program to input numbers, separated by a `,` then storing each to a list, coverting that list to a list of numbers, and then sorting/printing it.
#
# ALGORITHM:
#
# ```
# 1. input a comma-separated string of numbers
# 2. split the string into a list
# 3. parse the list of strings into a list of numbers
# 4. sort the list of numbers
# 4. print the sorted list of numbers like this:
# 'today's winning numbers are [1, 5, 17, 34, 56]'
# ```
#
#
# Sample Code Run:
#
# Enter lotto number drawing: 45, 13, 56, 8, 2
# Winning numbers are: [2, 8, 13, 45, 56]
#
# ### 1.4 You Code
# + code_cell_type="write_code" label="1.4" solution=["text = input(\"Enter winning lotto numbers: \")\n", "text_list = text.split(',')\n", "numbers = [ int(item) for item in text_list]\n", "numbers.sort()\n", "print(f\"Winning numbers are: {numbers}\")\n"]
## TODO: Write program here:
# -
# # Metacognition
#
#
# + [markdown] label="comfort_cell"
#
# ### Rate your comfort level with this week's material so far.
#
# **1** ==> I don't understand this at all yet and need extra help. If you choose this please try to articulate that which you do not understand to the best of your ability in the questions and comments section below.
# **2** ==> I can do this with help or guidance from other people or resources. If you choose this level, please indicate HOW this person helped you in the questions and comments section below.
# **3** ==> I can do this on my own without any help.
# **4** ==> I can do this on my own and can explain/teach how to do it to others.
#
# `--== Double-Click Here then Enter a Number 1 through 4 Below This Line ==--`
#
#
# + [markdown] label="questions_cell"
# ### Questions And Comments
#
# Record any questions or comments you have about this lab that you would like to discuss in your recitation. It is expected you will have questions if you did not complete the code sections correctly. Learning how to articulate what you do not understand is an important skill of critical thinking. Write them down here so that you remember to ask them in your recitation. We expect you will take responsilbity for your learning and ask questions in class.
#
# `--== Double-click Here then Enter Your Questions Below this Line ==--`
#
# -
# run this code to turn in your work!
from coursetools.submission import Submission
Submission().submit()
| lessons/08-Lists/LAB-Lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook reproduces the 'leave-one-feature-out' analyses
# ## Imports
# +
import numpy as np
import pandas as pd
from sherlock_helpers.constants import DATA_DIR, FIG_DIR
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# ## Set plotting params
sns.set_context('poster')
sns.set_style('ticks')
sns.set_palette('muted')
mpl.rcParams['pdf.fonttype'] = 42
# ## Load & format data
# +
feature_similarity = pd.read_pickle(DATA_DIR.joinpath('feature_similarity.p'))
feature_contribution = pd.read_pickle(DATA_DIR.joinpath('feature_contribution.p'))
feature_contribution.sort_values('full vid corr', inplace=True)
feature_contribution['color'] = sns.color_palette()
feature_similarity.sort_values('All features', axis=0, ascending=False, inplace=True)
feature_similarity.sort_values('All features', axis=1, ascending=False, inplace=True)
all_features_corr = feature_contribution.loc['All features', 'vid rec corr':'color']
single_feat_corrs = feature_contribution.loc[:, 'vid rec corr':'color'][:-1]
A = feature_contribution[['full vid corr', 'color']][:-1]
B = pd.concat([all_features_corr.to_frame().T,
single_feat_corrs.sort_values('vid rec corr')],
axis=0)
C = feature_similarity
# -
# ## Plot
# +
fig, (ax_a, ax_b, ax_c) = plt.subplots(1, 3)
fig.set_size_inches(55, 10)
plt.subplots_adjust(wspace=.3)
# PANEL A
A['full vid corr'].plot(kind='bar', ylim=[0, 1], color=A['color'], ax=ax_a)
ax_a.text(-1, 1.05, 'A')
for idx, corr in enumerate(A['full vid corr']):
ax_a.text(idx - .1, 0.4, f'{corr:.5f}', rotation='vertical')
ax_a.set_ylabel('Correlation with full\ntrajectory structure', size=35, labelpad=15)
ax_a.set_xlabel('Feature removed', size=35, labelpad=20)
# PANEL B
B['vid rec corr'].plot(kind='bar',
ylim=[0, .7],
yerr=B['vid rec sem'],
color=B['color'],
ax=ax_b)
ax_b.axvline(0.5, color='k', linestyle='dashed')
ax_b.text(-1, .74, 'B')
for idx, corr in enumerate(B['vid rec corr']):
ax_b.text(idx - .13, 0.28, f'{corr:.5f}', rotation='vertical')
ax_b.set_ylabel('Average video-recall\ntrajectory structure correlation',
size=35,
labelpad=15)
ax_b.set_xlabel('Feature removed', size=35, labelpad=20)
# PANEL C
cmap = sns.diverging_palette(240, 10, as_cmap=True)
sns.heatmap(C, vmin=-1, vmax=1, cmap=cmap, ax=ax_c)
ax_c.text(-.5, -.5, 'C')
plt.subplots_adjust()
# plt.savefig(FIG_DIR.joinpath('feature_value.pdf'), bbox_inches='tight')
plt.show()
| code/notebooks/supp/feature_importance_fig.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
# + code_folding=[]
def sigmoid(x):
return 1/(1+np.exp(-x))
# -
z=np.arange(-10,10,0.1)
z
sig=sigmoid(z)
plt.plot(z,sig)
plt.yticks([0.0,0.5,1.0])
plt.grid(True)
plt.plot(z,0+0.5*z)
plt.grid(True)
| Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MARKETING_DATA_SCIENCE/01_Data_Science_for_Marketing_Analytics/Part_07/26_Exercise_26.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rotate GEM2.5 Research Forecast Winds
#
# This notebook documents the process for correcting the wind direction
# issue in the GEM2.5 research model forecast files
# (/results/forcing/atmospheric/GEM2.5/research/).
#
#
# Our collaborators at EC discovered in early March 2016 that:
#
# > [...]the U and V components of the wind in the netcdfs are labelled as the zonal and
# > meridional components of the wind while, in fact, they are the U and V components of the wind
# > on the model grid. In other words, the U (V) component of the wind does [not] lies[sic] in
# > the East-West (North-South) direction but along the horizontal (vertical) axis of the model
# > grid.
#
# Starting on 7-Apr-2016 the files we download from EC have correct zonal/meridional wind components.
# The files that we downloaded prior to that date need to have a rotational correction algorithm applied.
# The date range of the affected files is 10-Sep-2014 through 6-Apr-2016.
# The rotational correction algorithm provided by EC is:
#
# 1. Read the U and V components of the wind on the model grid.
# Let's call them `u_grid` and `v_grid` which are both 2D arrays.
#
# 2. Convert to polar coordinates:
# ```python
# modulus = sqrt(u_grid^2 + v_grid^2)
# rad_to_deg = 180 / pi
# angle_grid = arctan2(-u_grid, -v_grid) * rad_to_deg
# ```
# where:
# * Angles here are defined according to the meteorological wind direction convention.
# * We use the 2 argument 4 quadrant arctangent function (the `ATAN(Y,X)` of IDL or the `atan2(Y,X)` of Matlab)
#
# 3. Read the `correction_angle` variable in the provided `corr_fact_west.nc` file
# (stored as `/results/forcing/atmospheric/GEM2.5/research/corr_fact_west.nc`).
# This is a 2D array of the same dimension as u_grid and v_grid.
#
# 4. Apply the correction angle using the following formula:
# ```python
# angle_WESN = angle_grid + correction_angle
# ```
#
# 5. Calculate the W-E and S-N components of the wind:
# ```python
# deg_to_rad = pi / 180
# u_WE = -modulus * sin(angle_WESN * deg_to_rad) # the zonal component of the wind
# v_SN = -modulus * cos(angle_WESN * deg_to_rad) # the meridional component of the wind
# ```
import arrow
import numpy as np
from pathlib import Path
import xarray as xr
# +
corrections = '/results/forcing/atmospheric/GEM2.5/research/corr_fact_west.nc'
forecast = '/results/forcing/atmospheric/GEM2.5/research/res_y2016m04d06.nc'
corrected_forecast = str(Path(forecast).with_suffix('.corr.nc'))
with xr.open_dataset(corrections) as corr:
with xr.open_dataset(forecast) as ds:
modulus = np.sqrt(ds.u_wind**2 + ds.v_wind**2)
angle_grid = np.rad2deg(np.arctan2(-ds.u_wind, -ds.v_wind))
angle_WESN = angle_grid + corr.correction_angle
u_WE = -modulus * np.sin(np.deg2rad(angle_WESN))
v_SN = -modulus * np.cos(np.deg2rad(angle_WESN))
ds.u_wind.values = u_WE
ds.v_wind.values = v_SN
ds.attrs['history'] = '\n'.join((
ds.history,
'{} Correct wind components to true zonal/meridional values '
'using EC supplied angles and algorithm.'
.format(arrow.now().format('YYYY-MM-DD HH:mm:ss'))
))
ds.to_netcdf(corrected_forecast, 'w')
| notebooks/RotateGEM2.5ResearchForecastWinds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="DTC2dIywpJVr"
# This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
#
# **If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
#
# This notebook was generated for TensorFlow 2.6.
#
# [](https://colab.research.google.com/github/achimr/deep-learning-with-python-notebooks/blob/master/chapter11_part01_introduction.ipynb)
# + [markdown] id="06krcg0_pJVv"
# # Deep learning for text
# + [markdown] id="8U44tCvjpJVy"
# ## Natural-language processing: The bird's eye view
# + [markdown] id="hgyFMchDpJV3"
# ## Preparing text data
# + [markdown] id="mkorNRsNpJV6"
# ### Text standardization
# + [markdown] id="1Aq7vakEpJV7"
# ### Text splitting (tokenization)
# + [markdown] id="xOhbwQ9opJV7"
# ### Vocabulary indexing
# + [markdown] id="NvXxeKR6pJV8"
# ### Using the TextVectorization layer
# + id="uo4LlSgBpJV-"
import string
class Vectorizer:
def standardize(self, text):
text = text.lower()
return "".join(char for char in text if char not in string.punctuation)
def tokenize(self, text):
text = self.standardize(text)
return text.split()
def make_vocabulary(self, dataset):
self.vocabulary = {"": 0, "[UNK]": 1}
for text in dataset:
text = self.standardize(text)
tokens = self.tokenize(text)
for token in tokens:
if token not in self.vocabulary:
self.vocabulary[token] = len(self.vocabulary)
self.inverse_vocabulary = dict(
(v, k) for k, v in self.vocabulary.items())
def encode(self, text):
text = self.standardize(text)
tokens = self.tokenize(text)
return [self.vocabulary.get(token, 1) for token in tokens]
def decode(self, int_sequence):
return " ".join(
self.inverse_vocabulary.get(i, "[UNK]") for i in int_sequence)
vectorizer = Vectorizer()
dataset = [
"I write, erase, rewrite",
"Erase again, and then",
"A poppy blooms.",
]
vectorizer.make_vocabulary(dataset)
# + id="z09Lr0utpJWB"
test_sentence = "I write, rewrite, and still rewrite again"
encoded_sentence = vectorizer.encode(test_sentence)
print(encoded_sentence)
# + id="jRS9tyyWpJWD"
decoded_sentence = vectorizer.decode(encoded_sentence)
print(decoded_sentence)
# + id="ujpoqso2pJWE"
from tensorflow.keras.layers import TextVectorization
text_vectorization = TextVectorization(
output_mode="int",
)
# + id="sfJgIXbjpJWF"
import re
import string
import tensorflow as tf
def custom_standardization_fn(string_tensor):
lowercase_string = tf.strings.lower(string_tensor)
return tf.strings.regex_replace(
lowercase_string, f"[{re.escape(string.punctuation)}]", "")
def custom_split_fn(string_tensor):
return tf.strings.split(string_tensor)
text_vectorization = TextVectorization(
output_mode="int",
standardize=custom_standardization_fn,
split=custom_split_fn,
)
# + id="_WzuKiQtpJWG"
dataset = [
"I write, erase, rewrite",
"Erase again, and then",
"A poppy blooms.",
]
text_vectorization.adapt(dataset)
# + [markdown] id="5JboPSEDpJWI"
# **Displaying the vocabulary**
# + id="nNvWZAU3pJWJ"
text_vectorization.get_vocabulary()
# + id="D8M5xc8JpJWK"
vocabulary = text_vectorization.get_vocabulary()
test_sentence = "I write, rewrite, and still rewrite again"
encoded_sentence = text_vectorization(test_sentence)
print(encoded_sentence)
# + id="ap3Hf7DtpJWL"
inverse_vocab = dict(enumerate(vocabulary))
decoded_sentence = " ".join(inverse_vocab[int(i)] for i in encoded_sentence)
print(decoded_sentence)
# + [markdown] id="UuuzeCRT0LC1"
# Back to slides ↩
# + [markdown] id="55H8EreApJWL"
# ## Two approaches for representing groups of words: Sets and sequences
# + [markdown] id="yjpJZTEWpJWL"
# ### Preparing the IMDB movie reviews data
# + id="eHAxwSM7pJWM"
# !curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# !tar -xf aclImdb_v1.tar.gz
# + id="vyKQpO9jpJWM"
# !rm -r aclImdb/train/unsup
# + id="I6aM0u9PpJWN"
# !cat aclImdb/train/pos/4077_10.txt
# + [markdown] id="O4TtdCTtm0i7"
# Notice the shuffling of the files before holding out the validation dataset to avoid bias/unbalanced data between the validation and training dataset.
# + id="28uWURYdpJWO"
import os, pathlib, shutil, random
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
# + [markdown] id="w_JiUnJEnKYs"
# Data preparation with the [tf.keras.utils.text_dataset_from_directory](https://www.tensorflow.org/api_docs/python/tf/keras/utils/text_dataset_from_directory) function.
# Targets are `int32` tensors encoding values 0 and 1.
# + id="fUh7lwLCpJWP"
from tensorflow import keras
batch_size = 32
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
# + [markdown] id="0Hhv5XGapJWQ"
# **Displaying the shapes and dtypes of the first batch**
# + id="KSstihJupJWR"
for inputs, targets in train_ds:
print("inputs.shape:", inputs.shape)
print("inputs.dtype:", inputs.dtype)
print("targets.shape:", targets.shape)
print("targets.dtype:", targets.dtype)
print("inputs[0]:", inputs[0])
print("targets[0]:", targets[0])
break
# + [markdown] id="UdhlzDnSpJWR"
# ### Processing words as a set: The bag-of-words approach
# + [markdown] id="fD8EHOWDpJWS"
# #### Single words (unigrams) with binary encoding
# + [markdown] id="BJY8xTfTpJWS"
# **Preprocessing our datasets with a `TextVectorization` layer**
#
# `multi_hot` encoding possible because there is no order in a unigram bag-of-words model, e.g. "the cat sat on the mat" is the unigram set {"cat", "mat", "on", "sat", "the"}.
#
# Notice the use of multiple CPU kernels for this preprocessing.
# + id="omIgAhTppJWT"
from tensorflow.keras.layers import TextVectorization
text_vectorization = TextVectorization(
max_tokens=20000,
output_mode="multi_hot",
)
text_only_train_ds = train_ds.map(lambda x, y: x)
text_vectorization.adapt(text_only_train_ds)
binary_1gram_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
binary_1gram_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
binary_1gram_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
# + [markdown] id="Hthn17sVpJWT"
# **Inspecting the output of our binary unigram dataset**
# + id="77X9KMOvpJWU"
for inputs, targets in binary_1gram_train_ds:
print("inputs.shape:", inputs.shape)
print("inputs.dtype:", inputs.dtype)
print("targets.shape:", targets.shape)
print("targets.dtype:", targets.dtype)
print("inputs[0]:", inputs[0])
print("targets[0]:", targets[0])
break
# + [markdown] id="14Vn7xVWpJWV"
# **Our model-building utility**
# + [markdown] id="qCdhdt2uqyrG"
# Reusable model building function.
# + id="uQwUFr4-pJWW"
from tensorflow import keras
from tensorflow.keras import layers
def get_model(max_tokens=20000, hidden_dim=16):
inputs = keras.Input(shape=(max_tokens,))
x = layers.Dense(hidden_dim, activation="relu")(inputs)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
return model
# + [markdown] id="e24PYyxupJWX"
# **Training and testing the binary unigram model**
# + id="-JXnODIYpJWY"
model = get_model()
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("binary_1gram.keras",
save_best_only=True)
]
model.fit(binary_1gram_train_ds.cache(),
validation_data=binary_1gram_val_ds.cache(),
epochs=10,
callbacks=callbacks)
model = keras.models.load_model("binary_1gram.keras")
print(f"Test acc: {model.evaluate(binary_1gram_test_ds)[1]:.3f}")
# + [markdown] id="y9dSJ7eXrjr4"
# Naive baseline: Random pick/coin toss. Test accuracy: 50%
#
# Maximum accurady on this dataset without use of external data: 95% (this was established with experimentation, most likely)
# + [markdown] id="Zt_mXeEwpJWZ"
# #### Bigrams with binary encoding
#
# With bigram encoding our sentence becomes {"the", "the cat", "cat", "cat sat", "sat", "sat on", "on", "on the", "the mat", "mat"}. This is inserting local order information.
# + [markdown] id="2pjeSaQ6pJWa"
# **Configuring the `TextVectorization` layer to return bigrams**
#
# Tokens are unigrams and bigrams - reduction in vocabulary.
# + id="WauGyb7KpJWb"
text_vectorization = TextVectorization(
ngrams=2,
max_tokens=20000,
output_mode="multi_hot",
)
# + id="KayUCpGKsSFA"
text_vectorization.adapt(text_only_train_ds)
binary_2gram_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
binary_2gram_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
binary_2gram_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
# + id="FvQCrVmKsJOQ"
for inputs, targets in binary_2gram_train_ds:
print("inputs.shape:", inputs.shape)
print("inputs.dtype:", inputs.dtype)
print("targets.shape:", targets.shape)
print("targets.dtype:", targets.dtype)
print("inputs[0]:", inputs[0])
print("targets[0]:", targets[0])
break
# + [markdown] id="HKO0Fw_MpJWb"
# **Training and testing the binary bigram model**
# + id="ZGp625UepJWd"
model = get_model()
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("binary_2gram.keras",
save_best_only=True)
]
model.fit(binary_2gram_train_ds.cache(),
validation_data=binary_2gram_val_ds.cache(),
epochs=10,
callbacks=callbacks)
model = keras.models.load_model("binary_2gram.keras")
print(f"Test acc: {model.evaluate(binary_2gram_test_ds)[1]:.3f}")
# + [markdown] id="E3EWPuQB5eHA"
# Back to slides ↩
# + [markdown] id="hyjzMU-0pJWe"
# #### Bigrams with TF-IDF encoding
# + [markdown] id="GjwGwvjypJWe"
# **Configuring the `TextVectorization` layer to return token counts**
# + id="2oo7oX_upJWf"
text_vectorization = TextVectorization(
ngrams=2,
max_tokens=20000,
output_mode="count"
)
# + [markdown] id="pe-PZEaipJWf"
# **Configuring `TextVectorization` to return TF-IDF-weighted outputs**
# + id="vgyXSr63pJWg"
text_vectorization = TextVectorization(
ngrams=2,
max_tokens=20000,
output_mode="tf_idf",
)
# + [markdown] id="GQoqjkcZpJWg"
# **Training and testing the TF-IDF bigram model**
# + id="vqZw6mwTpJWh"
text_vectorization.adapt(text_only_train_ds)
tfidf_2gram_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
tfidf_2gram_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
tfidf_2gram_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
model = get_model()
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("tfidf_2gram.keras",
save_best_only=True)
]
model.fit(tfidf_2gram_train_ds.cache(),
validation_data=tfidf_2gram_val_ds.cache(),
epochs=10,
callbacks=callbacks)
model = keras.models.load_model("tfidf_2gram.keras")
print(f"Test acc: {model.evaluate(tfidf_2gram_test_ds)[1]:.3f}")
# + [markdown] id="oaOYp-8u52Nk"
# #### Incorporating text preprocessing in the model
# + id="2f7J3c91pJWi"
inputs = keras.Input(shape=(1,), dtype="string")
processed_inputs = text_vectorization(inputs)
outputs = model(processed_inputs)
inference_model = keras.Model(inputs, outputs)
# + id="YlDyubkfpJWj"
import tensorflow as tf
raw_text_data = tf.convert_to_tensor([
["That was an excellent movie, I loved it."],
])
predictions = inference_model(raw_text_data)
print(f"{float(predictions[0] * 100):.2f} percent positive")
| chapter11_part01_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
print("How old are you?", end=' ')
age = input()
print("How tall are you?", end=' ')
height = input()
print("How much do you weigh?", end=' ')
weight = input()
print(f "So, you're {age} old, {height} tall and {weight} heavy.")
from sys import argv
script, filename = argv
txt = open(filename)
print(f "Here's your file {filename}:")
print(txt.read())
print("Type the filename again:")
file_again = input("> ")
txt_again = open(file_again)
print(txt_again_read())
print("Let's practice everything.")
print('You\'d need to know \'bout escapes with \\ that do:')
print('\n newlines and \t tabs.')
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print("--------------")
print(poem)
print("--------------")
five = 10 - 2 + 3 - 6
print(f "This should be five: {five}")
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
# remember that this is another way to format a string
print("With a starting point of: {}".format(start_point))
# it's just like with an f"" string
print(f "We'd have {beans} beans, {jars} jars, and {crates} crates.")
start_point = start_point / 10
print("We can also do that this way:")
formula = secret_formula(start_point)
# this is an easy way to apply a list to a format string
print("We'd have {} beans, {} jars, and {} crates.".format(*formula))
people = 20
cates = 30
dogs = 15
if people < cats:
print ("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs
print("People are less than or equal to dogs.)
if people == dogs:
print("People are dogs.")
| E26.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## ================ Muti-layer Perceptron (Boston dataset) ===================
# st8_Zhang_Dong_code
#
# Created on 30th June, Wed, 2021
#
# - To predict the crime rate in each town
# - regression problem
#
# ### please read the readme.txt file first.
# +
# import packages
from sklearn import datasets
from sklearn import metrics
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import cross_val_score
from sklearn.exceptions import ConvergenceWarning
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
# +
# always print floating point numbers using fixed point notation
np.set_printoptions(suppress=True)
# set plotting style
plt.style.use('ggplot')
# import warnings filter
from warnings import simplefilter
# ignore all future warnings and Convergence Warning
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=ConvergenceWarning)
# -
# load data
# set the first row as head
df = pd.read_csv('../Boston.csv', header=0)
df.head()
# convert a pandas Dataframe to numpy array
df = np.array(df)
# get the length of dataset
nums = df.shape[1]
# display the demo of dataset
print('The first three rows of the dataset:\n', df[:3,:])
print('\nThe total columns of dataset is: {}'.format(nums))
# +
# Modify df in-place by shuffling its contents.
np.random.shuffle(df)
# split the dataset: train_set & test_set
ratio = 0.8
offset = int(df.shape[0] * ratio)
train_data = df[:offset]
# normalize data
std = train_data.std(axis=0)
mean = train_data.mean(axis=0)
for i in range(nums):
# x <- (x-u)/s
df[:, i] = (df[:, i] - mean[i]) / (std[i])
train_data = df[:offset]
test_data = df[offset:]
# make the first column "crim" as the target
X_train = train_data[:, 1:] # training set: data
y_train = train_data[:, 0] # training set: target
X_test = test_data[:, 1:] # testing set: data
y_test = test_data[:, 0] # testing set: target
# -
print('The shape of X_train: ', X_train.shape)
print(X_train[0])
print('The shape of y_train: ', y_train.shape)
print(y_train[0])
print('The shape of X_test ', X_test.shape)
print(X_test[0])
print('The shape of y_test: ', y_test.shape)
print(y_test[0])
# ### ===================== Multilayer perceptron ======================
# +
# MLP model
# alpha_l1: the alpha for L1 regularization
# alpha_l2: the alpha for L2 regularization
# to set alpha_l1=0 and alpha_l2=0, so there is no regularization used in the model
model_regression = MLPRegressor(alpha_l1 = 0.0, alpha_l2=0.0)
# Train the model
model_regression.fit(X_train, y_train)
# train set
# expected_x = y_train
# predicted_x = model_regression.predict(X_train)
# print('MLP: The R2 score for training set is: ', metrics.r2_score(expected_x, predicted_x))
# print('MLP: MSE on training set:', metrics.mean_squared_error(expected_x, predicted_x))
# print('')
# test set
expected_y = y_test
predicted_y = model_regression.predict(X_test)
print('MLP: The R2 score for testing is: ', metrics.r2_score(expected_y, predicted_y))
print('MLP: MSE on testing set:', metrics.mean_squared_error(expected_y, predicted_y))
# -
# ### ================== Multilayer perceptron with Lasso ===================
# +
# train alpha_l1
# create an empty list to store training result
l1_param_train = [[], [], []] # alpha_l1, score_l1_R2, score_l1_RME
l1_param_test = [[], [], []] # alpha_l1, score_l1_R2, score_l1_RME
expected_l1_x = y_train
expected_l1_y = y_test
# Here, the idea of grid search is applied to train Lasso model
# to find the optimal alpha value in the interval of (0,2]
# step length is 0.1
for alpha_train_l1 in range(1,21):
# using Grid Search to search the best alpha in [0,2]
# MLP model with L1 penalty
# in lasso model, set alpha_l2 = 0.0
mlp_l1 = MLPRegressor(alpha_l1 = alpha_train_l1*0.1, alpha_l2=0.0)
mlp_l1.fit(X_train, y_train)
# training set
predicted_l1_x = mlp_l1.predict(X_train)
s_l1_train_r2 = metrics.r2_score(expected_l1_x, predicted_l1_x)
s_l1_train_mse = metrics.mean_squared_error(expected_l1_x, predicted_l1_x)
# append training results to the l1_param_train
l1_param_train[0].append(alpha_train_l1*0.1)
l1_param_train[1].append(s_l1_train_r2)
l1_param_train[2].append(s_l1_train_mse)
# testing set
predicted_l1_y = mlp_l1.predict(X_test)
s_l1_test_r2 = metrics.r2_score(expected_l1_y, predicted_l1_y)
s_l1_test_mse = metrics.mean_squared_error(expected_l1_y, predicted_l1_y)
# append testing results to the l1_param_test
l1_param_test[0].append(alpha_train_l1*0.1)
l1_param_test[1].append(s_l1_test_r2)
l1_param_test[2].append(s_l1_test_mse)
# -
# %matplotlib inline
# plot alpha vs. R^2
plt.title('MLP with L1')
plt.plot(l1_param_train[0], l1_param_train[1], label='R^2_train')
plt.plot(l1_param_train[0], l1_param_test[1], label='R^2_test')
plt.xlabel("alpha")
plt.legend()
plt.show()
# %matplotlib inline
# plot alpha vs. RMSE
plt.title('MLP with L1')
plt.plot(l1_param_train[0], l1_param_train[2], label='MSE_train')
plt.plot(l1_param_train[0], l1_param_test[2], label='MSE_test')
plt.xlabel("alpha")
plt.legend()
plt.show()
# print the result of MLP_l1 on testing data in (0,2]
l1_param_test = np.array(l1_param_test)
# get the index of minimun mse
# R^2 = (1-SSR)/SST, so R^2 is maximum when MSE is minimum
a_l1_test = np.argmin(l1_param_test, axis=1)
# print(a_l1_test)
print('The best result for MLP with L1 regularization: \nalpha_l1 = {} \nR^2 = {} \nMSE = {}'
.format(l1_param_test[0][a_l1_test[2]], l1_param_test[1][a_l1_test[2]], l1_param_test[2][a_l1_test[2]]))
# ### ====================== MLP with Ridge ======================
# +
# train alpha_l2
# create an empty list to store training result
l2_param_train = [[], [], []] # alpha_l2, score_l2_R2, score_l2_RME
l2_param_test = [[], [], []] # alpha_l2, score_l2_R2, score_l2_RME
expected_l2_x = y_train
expected_l2_y = y_test
# grid search
# to find the optimal alpha value in the interval of (0,2]
# step length is 0.1
for alpha_train_l2 in range(1,21):
# using Grid Search to search the best alpha in [0,2]
# MLP model with L1 penalty
# in ridge model, set alpha_l1 = 0.0
mlp_l2 = MLPRegressor(alpha_l1 = 0.0, alpha_l2=alpha_train_l2*0.1)
mlp_l2.fit(X_train, y_train)
# training set
predicted_l2_x = mlp_l2.predict(X_train)
s_l2_train_r2 = metrics.r2_score(expected_l2_x, predicted_l2_x)
s_l2_train_mse = metrics.mean_squared_error(expected_l2_x, predicted_l2_x)
# append training results to the l2_param_train
l2_param_train[0].append(alpha_train_l2*0.1)
l2_param_train[1].append(s_l2_train_r2)
l2_param_train[2].append(s_l2_train_mse)
# testing set
predicted_l2_y = mlp_l2.predict(X_test)
s_l2_test_r2 = metrics.r2_score(expected_l2_y, predicted_l2_y)
s_l2_test_mse = metrics.mean_squared_error(expected_l2_y, predicted_l2_y)
# append testing results to the l2_param_train
l2_param_test[0].append(alpha_train_l2*0.1)
l2_param_test[1].append(s_l2_test_r2)
l2_param_test[2].append(s_l2_test_mse)
# -
# %matplotlib inline
# plot alpha vs. R^2
plt.title('MLP with L2')
plt.plot(l2_param_train[0], l2_param_train[1], label='R^2_train')
plt.plot(l2_param_train[0], l2_param_test[1], label='R^2_test')
plt.xlabel("alpha")
plt.legend()
plt.show()
# %matplotlib inline
# plot alpha vs. RMSE
plt.title('MLP with L2')
plt.plot(l2_param_train[0], l2_param_train[2], label='MSE_train')
plt.plot(l2_param_train[0], l2_param_test[2], label='MSE_test')
plt.xlabel("alpha")
plt.legend()
plt.show()
# print the result of MLP_l2 on testing data in [0,2]
l2_param_test = np.array(l2_param_test)
# get the index of minimun mse
a_l2_test = np.argmin(l2_param_test, axis=1)
# print(a_l2_test)
print('The best result for MLP with L2 regularization: \nalpha_l2 = {} \nR^2 = {} \nMSE = {}'
.format(l2_param_test[0][a_l2_test[2]], l2_param_test[1][a_l2_test[2]], l2_param_test[2][a_l2_test[2]]))
# ### ============== MLP with elastic net ================
# +
# train alpha_l1_l2
# create an empty list to store training result
l1_l2_param_train = [[], [], [], []] # alpha_l1_l2, score_l1_l2_R2, score_l1_l2_RME
l1_l2_param_test = [[], [], [], []] # alpha_l1_l2, score_l1_l2_R2, score_l1_l2_RME
expected_l1_l2_x = y_train
expected_l1_l2_y = y_test
# grid search
# to find the optimal alpha value in the interval of (0,2]
# step length is 0.1
for alpha_l1 in range(1,21):
for alpha_l2 in range(1,21):
# using Grid Search to search the best alpha in [0,2]
# MLP model with L1 & L2 penalty
mlp_l1_l2 = MLPRegressor(alpha_l1=alpha_l1*0.1, alpha_l2=alpha_l2*0.1)
mlp_l1_l2.fit(X_train, y_train)
# training set
predicted_l1_l2_x = mlp_l1_l2.predict(X_train)
s_l1_l2_train_r2 = metrics.r2_score(expected_l1_l2_x, predicted_l1_l2_x)
s_l1_l2_train_mse = metrics.mean_squared_error(expected_l1_l2_x, predicted_l1_l2_x)
# append training results to the l1_l2_param_train
l1_l2_param_train[0].append(alpha_l1*0.1)
l1_l2_param_train[1].append(alpha_l2*0.1)
l1_l2_param_train[2].append(s_l1_l2_train_r2)
l1_l2_param_train[3].append(s_l1_l2_train_mse)
# testing set
predicted_l1_l2_y = mlp_l1_l2.predict(X_test)
s_l1_l2_test_r2 = metrics.r2_score(expected_l1_l2_y, predicted_l1_l2_y)
s_l1_l2_test_mse = metrics.mean_squared_error(expected_l1_l2_y, predicted_l1_l2_y)
# append testing results to the l1_l2_param_test
l1_l2_param_test[0].append(alpha_l1*0.1)
l1_l2_param_test[1].append(alpha_l2*0.1)
l1_l2_param_test[2].append(s_l1_l2_test_r2)
l1_l2_param_test[3].append(s_l1_l2_test_mse)
# +
# you can save the result to a local .csv file by the following code
# np.savetxt("MLP_L1&L2_Boston.csv", l1_l2_param_test.T, delimiter =",",fmt ='% s')
# -
# print the result of MLP_l1_l2 on testing data in [0,2]
l1_l2_param_test = np.array(l1_l2_param_test)
a_l1_l2_test = np.argmin(l1_l2_param_test, axis=1)
# print(a_l1_l2_test)
print('The best result for MLP with L1&L2 regularization: \nalpha_l1 = {} \nalpha_l2 = {} \nR^2 = {} \nMSE = {}'
.format(l1_l2_param_test[0][a_l1_l2_test[3]], l1_l2_param_test[1][a_l1_l2_test[3]],
l1_l2_param_test[2][a_l1_l2_test[3]], l1_l2_param_test[3][a_l1_l2_test[3]]))
# +
# plot the result of MLP with L1&L2 regularization
# %matplotlib inline
fig = plt.figure()
ax = Axes3D(fig)
plt.rcParams['axes.unicode_minus']=False # to make "-" show normally
# plotting
surf = ax.scatter(l1_l2_param_test[0],l1_l2_param_test[1],l1_l2_param_test[2],cmap="Blues")
# plt.colorbar(surf, shrink=0.5, aspect=5)
plt.title('MLP with L1 & L2: testing set R^2')
ax.view_init(elev=15, azim=15)
ax.set_zlabel('score', fontdict={'size': 10, 'color': 'red'})
ax.set_ylabel('alpha_l2', fontdict={'size': 10, 'color': 'red'})
ax.set_xlabel('alpha_l1', fontdict={'size': 10, 'color': 'red'})
# +
# plot the result of MLP with L1&L2 regularization
# %matplotlib inline
fig = plt.figure()
ax = Axes3D(fig)
plt.rcParams['axes.unicode_minus']=False # to make "-" show normally
# plotting
surf = ax.scatter(l1_l2_param_test[0],l1_l2_param_test[1],l1_l2_param_test[3],cmap="Blues")
ax.view_init(elev=15, azim=15)
# setting title and labels
plt.title('MLP with L1 & L2: testing set SME')
ax.set_zlabel('score', fontdict={'size': 10, 'color': 'red'})
ax.set_ylabel('alpha_l2', fontdict={'size': 10, 'color': 'red'})
ax.set_xlabel('alpha_l1', fontdict={'size': 10, 'color': 'red'})
# -
# ### ================ Grid search for other parameters ===================
# +
param_train = [[],[],[],[]] # alpha_l1, alpha_l2, score_l1_l2_R2, score_l1_l2_RME
param_test = [[],[],[],[]] # alpha_l1, alpha_l2, score_l1_l2_R2, score_l1_l2_RME
expected_x = y_train
expected_y = y_test
# here, we test two different hidden layer sizes and three diffrent solvers
# hidden_layer_sizes: (100,), (100, 30)
# solver: 'adam', 'sgd', 'lbfgs'
# the value of alpha_l1 and alpha_l2 are from the well-trained model before
for hidden_layer_sizes in [(100,), (100, 30)]:
for solver in ['adam', 'sgd', 'lbfgs']:
MLP = MLPRegressor(alpha_l1=1.8, alpha_l2=0.4,
hidden_layer_sizes=hidden_layer_sizes, solver=solver)
MLP.fit(X_train, y_train)
# training set
predicted_x = MLP.predict(X_train)
s_train_r2 = metrics.r2_score(expected_x, predicted_x)
s_train_mse = metrics.mean_squared_error(expected_x, predicted_x)
# append training result to the param_train list
param_train[0].append(hidden_layer_sizes)
param_train[1].append(solver)
param_train[2].append(s_train_r2)
param_train[3].append(s_train_mse)
# testing set
predicted_y = MLP.predict(X_test)
s_test_r2 = metrics.r2_score(expected_y, predicted_y)
s_test_mse = metrics.mean_squared_error(expected_y, predicted_y)
# append testing result to the param_test list
param_test[0].append(hidden_layer_sizes)
param_test[1].append(solver)
param_test[2].append(s_test_r2)
param_test[3].append(s_test_mse)
# -
# print the result of MLP
param_test = np.array(param_test)
# get the index of minimum mse
a_test = np.argmin(param_test, axis=1)
# print(a_test)
print('The best result for MLP:')
print('alpha_l1 = {} \nalpha_l2 = {}'.format(1.8, 0.4))
print('hidden_layer_sizes = {} \nsolver = {} \nR^2 = {} \nMSE = {}'
.format(param_test[0][a_test[3]], param_test[1][a_test[3]],
param_test[2][a_test[3]], param_test[3][a_test[3]]))
# +
# utilizing the trained parameters
MLP_op = MLPRegressor(alpha_l1=1.8, alpha_l2=0.4, hidden_layer_sizes=(100,), solver='lbfgs')
MLP_op.fit(X_train, y_train)
expected_op_y = y_test
predicted_op_y = MLP_op.predict(X_test)
# plot the fitting diagram of y and yhat on testing dataset
plt.figure(figsize=(8, 8))
plt.xlabel("y")
plt.ylabel("yhat")
sns.regplot(expected_op_y, predicted_op_y, fit_reg=True, scatter_kws={"s": 30})
# -
# ### k-fold validation
# - Below show the k-fold (k=5 & k=10) cross validation for the MLP_op model.
# - In the previous experiment, the whole data set was divided into training set and test set, so it was no longer necessary to use k-fold for validation.
# - The results below are only for the purpose of showing the results of model training.
# +
# K-fold cross validation k = 5
print('k-fold cross validation:\n')
scores_5 = []
scores_5 = cross_val_score(MLP_op, X_train, y_train, cv=5, scoring='neg_mean_squared_error', n_jobs=1)
print('MSE Loss for k = 5:\n %s\n'% scores_5)
# neg_mean_squared_error return the negative mse, but mse should be a positive number
# fix the sign of MSE scores
mse_scores_5 = -scores_5
print ('Fixed MSE Loss for k = 5:\n %s\n'% mse_scores_5)
print('In conclusion: MSE Loss for k = 5: %3f +/- %3f'% (np.mean(mse_scores_5), np.std(mse_scores_5)))
# +
# K-fold cross validation k = 10
print('k-fold cross validation:\n')
scores_10 = []
scores_10 = cross_val_score(MLP_op, X_train, y_train, cv=10, scoring='neg_mean_squared_error', n_jobs=1)
print('MSE Loss for k = 10:\n %s\n'% scores_10)
# neg_mean_squared_error return the negative mse, but mse should be a positive number
# fix the sign of MSE scores
mse_scores_10 = -scores_10
print ('Fixed MSE Loss for k = 10:\n %s\n'% mse_scores_10)
print('In conclusion: MSE Loss for k = 10: %3f +/- %3f'% (np.mean(mse_scores_10), np.std(mse_scores_10)))
| data_analytics/MLPRegression_Boston_crime.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # A Guided Tour of Ray Core: Remote Objects
# [*Remote Objects*](https://docs.ray.io/en/latest/walkthrough.html#objects-in-ray)
# implement a [*shared-memory object store*](https://en.wikipedia.org/wiki/Shared_memory) pattern.
#
# Objects are immutable, and can be accessed from anywhere on the cluster, as they are stored in the cluster shared memory.
#
# <img src="images/shared_memory.png" height=350, width=650>
#
# In general, small objects are stored in their owner’s **in-process store** while large objects are stored in the **distributed object store**. This decision is meant to reduce the memory footprint and resolution time for each object. Note that in the latter case, a placeholder object is stored in the in-process store to indicate the object has been promoted to shared memory.
#
# [Ray Architecture Reference](https://docs.google.com/document/d/1lAy0Owi-vPz2jEqBSaHNQcy2IBSDEHyXNOQZlGuj93c/preview#)
#
# ---
# First, let's start Ray…
# +
import logging
import ray
ray.init(
ignore_reinit_error=True,
logging_level=logging.ERROR,
)
# -
# ## Remote Objects example
# To start, we'll define a remote object...
# +
# %%time
num_list = [ 23, 42, 93 ]
obj_ref = ray.put(num_list)
obj_ref
# -
# Then retrieve the value of this object reference. This follows an object resolution protocol.
#
# <img src="images/object_resolution.png" height=350, width=650>
#
# Small objects are resolved by copying them directly from the owner’s **in-process store**. For example, if the owner calls `ray.get`, the system looks up and deserializes the value from the local in-process store. If the owner submits a dependent task, it inlines the object by copying the value directly into the task description. Note that these objects are local to the owner process: if a borrower attempts to resolve the value, the object is promoted to shared memory, where it can be retrieved through the distributed object resolution protocol described next.
#
# Resolving a large object. The object x is initially created on Node 2, e.g., because the task that returned the value ran on that node. This shows the steps when the owner (the caller of the task) calls `ray.get`:
#
# 1) Lookup object’s locations at the owner.
# 2) Select a location and send a request for a copy of the object.
# 3) Receive the object.
#
#
# +
# %%time
ray.get(obj_ref)
# -
# Let's combine use of a remote function with a remote object, to illustrate *composable futures*:
@ray.remote
def my_function (num_list):
return sum(num_list)
# In other words, the remote function `myfunction()` will sum the list of integers in the remote object `num_list`:
# +
# %%time
calc_ref = my_function.remote(obj_ref)
# +
# %%time
ray.get(calc_ref)
# -
# You can gather the values of multiple object references in parallel using collections:
# +
# %%time
ray.get([ray.put(i) for i in range(3)])
# -
# Now let's set a timeout to return early from attempted access of a remote object that is blocking for too long...
# +
import time
@ray.remote
def long_running_function ():
time.sleep(10)
return 42
# +
# %%time
from ray.exceptions import GetTimeoutError
obj_ref = long_running_function.remote()
try:
ray.get(obj_ref, timeout=6)
except GetTimeoutError:
print("`get` timed out")
# -
# Then shutdown Ray
ray.shutdown()
# ## References
#
# [Ray Architecture Reference](https://docs.google.com/document/d/1lAy0Owi-vPz2jEqBSaHNQcy2IBSDEHyXNOQZlGuj93c/preview#)
# Ray 1.x Architecture Technical Paper
#
# [Ray Internals: A peek at ray,get](https://www.youtube.com/watch?v=a1kNnQu6vGw)
#
# [Ray Internals: Object management with Ownership Model](https://www.anyscale.com/events/2021/06/22/ray-internals-object-management-with-the-ownership-model)
| ex_02_remo_objs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/joaochenriques/MCTE_2022/blob/main/ChannelFlows/DiskActuator/SensitivityAnalysis_V02.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="lCq1WmfN0gmp"
import numpy as np
import matplotlib.pyplot as mpl
# + id="QTeLN4C7QsSU"
import pathlib
if not pathlib.Path("mpl_utils.py").exists():
# !curl -O https://raw.githubusercontent.com/joaochenriques/MCTE_2022/main/libs/mpl_utils.py &> /dev/null
import mpl_utils as mut
mut.config_plots()
# %config InlineBackend.figure_formats = ['svg']
mpl.rcParams["figure.figsize"] = (14, 3.5) # (12.5,3)
# + [markdown] id="vUDelQzgsuZA"
# <img src="https://raw.githubusercontent.com/joaochenriques/MCTE_2022/main/ChannelFlows/DiskActuator/Figures/sub_domains.svg" width="500px" style="display:inline">
# + id="fsfeFWtUxhLK"
def CardanoRoots( aa, bb ):
# Cardano algorithm to solve our polynomial, see:
# https://www.shsu.edu/kws006/professional/Concepts_files/SolvingCubics.pdf
P = -2.0*aa
Q = -2.0*bb
Δ = (P/3.0)**3 + (Q/2)**2
if Δ < 0.0: Δ = Δ + 0J
β = ( -Q/2.0 - np.sqrt(Δ) )**(1.0/3.0)
α = P/(3.0*β)
ω = ( -1.0 + np.sqrt(3.0)*1J) / 2.0
x1 = α - β
x2 = (α*ω - β)*ω
x3 = (α - β*ω)*ω
if np.imag(x1) < 1E-15: x1 = np.real( x1 )
if np.imag(x2) < 1E-15: x2 = np.real( x2 )
if np.imag(x3) < 1E-15: x3 = np.real( x3 )
# applies only for this solution
assert( np.imag( x1 ) == 0 )
assert( np.imag( x2 ) == 0 )
assert( np.imag( x3 ) == 0 )
assert( x1 <= 0.0 )
assert( x2 <= x3 )
return (x2, x3)
# + id="Nfek_RyrkwI2" colab={"base_uri": "https://localhost:8080/", "height": 460} outputId="ea5aa676-bf22-4382-ccca-c03c80d342e0"
fig, (ax1, ax2) = mpl.subplots(1,2, figsize=(12, 4.5) )
fig.subplots_adjust( wspace = 0.17 )
C_T_lst = []
C_P_lst = []
Fr2t_lst = []
Fr4b_lst = []
Fr4t_lst = []
Fr5_lst = []
B = 0.1
Fr1 = 0.1
for Fr4b in np.linspace( Fr1*1.001, Fr1*2.6, 1000 ):
ζ4 = (1/2.)*Fr1**2 - 1/2.*Fr4b**2 + 1
C1 = Fr1 - Fr4b*ζ4
C2 = B**2*Fr4b**2 - 2*B*Fr1**2 + 2*B*Fr1*Fr4b \
+ B*ζ4**2 - B + Fr1**2 - 2*Fr1*Fr4b*ζ4 + Fr4b**2*ζ4**2
Fr4t = ( C1 + np.sqrt(C2) ) / B
ζ4t = ( Fr4b*ζ4 - Fr1 ) / ( Fr4b - Fr4t )
ζ4b = ζ4 - ζ4t
Fr2t = Fr4t*ζ4t/B
C_T = (Fr4b**2 - Fr4t**2)/Fr1**2
C_P = C_T*Fr2t/Fr1
mb = Fr4b*ζ4b + Fr4t*ζ4t
bb = mb**2
aa = (Fr4b**2*ζ4b + Fr4t**2*ζ4t + 1/2*ζ4**2)
ζs = CardanoRoots( aa, bb )
ζ5 = ζs[1]
Fr5 = mb / ζ5
if C_P <= 0.0: break
C_P_lst.append( C_P )
C_T_lst.append( C_T )
Fr2t_lst.append( Fr2t )
Fr4t_lst.append( Fr4t )
Fr4b_lst.append( Fr4b )
Fr5_lst.append( Fr5 )
ax1.set_title( "B=%.2f" % B )
ax1.plot( C_T_lst, C_P_lst, label="$\mathrm{Fr}_1=%.2f$" % Fr1 )
ax1.set_ylabel( "$C_P$" )
ax1.grid()
ax1.legend(loc="lower center");
#ax1.set_xticklabels( [] )
ax1.set_xlabel( "$C_T$" )
ax2.set_axisbelow(True)
ax2.set_title( "B=%.2f" % B )
ax2.plot( C_T_lst, Fr2t_lst, label="$\mathrm{Fr}_\mathrm{2t}$" )
ax2.plot( C_T_lst, Fr4t_lst, label="$\mathrm{Fr}_\mathrm{4t}$" )
ax2.plot( C_T_lst, Fr4b_lst, label="$\mathrm{Fr}_\mathrm{4b}$" )
ax2.plot( C_T_lst, Fr5_lst, label="$\mathrm{Fr}_{5}$" )
ax2.set_xlabel( "$C_T$" )
ax2.set_ylabel( "$\mathrm{Fr}$" )
ax2.grid()
ax2.legend(loc="lower left")
ax2.set_axisbelow(True)
mpl.savefig('ChannelFlowLimits_Ex.pdf', bbox_inches='tight', pad_inches=0.02);
# + colab={"base_uri": "https://localhost:8080/", "height": 480} id="iczMHC2UQNUO" outputId="72870083-f808-451a-ced5-dd49f9b9ff5f"
fig, (ax1, ax2) = mpl.subplots(1,2, figsize=(12, 4.5) )
fig.subplots_adjust( wspace = 0.17 )
B = 0.15
for Fr1 in ( 0.05, 0.10, 0.20, 0.30, 0.40 ):
C_T_lst = []
C_P_lst = []
Fr2t_lst = []
Fr4b_lst = []
Fr4t_lst = []
Fr5_lst = []
for Fr4b in np.linspace( Fr1*1.001, Fr1*2, 200 ):
ζ4 = (1/2.)*Fr1**2 - 1/2.*Fr4b**2 + 1
C1 = Fr1 - Fr4b*ζ4
C2 = B**2*Fr4b**2 - 2*B*Fr1**2 + 2*B*Fr1*Fr4b \
+ B*ζ4**2 - B + Fr1**2 - 2*Fr1*Fr4b*ζ4 + Fr4b**2*ζ4**2
Fr4t = ( C1 + np.sqrt(C2) ) / B
ζ4t = ( Fr4b*ζ4 - Fr1 ) / ( Fr4b - Fr4t )
ζ4b = ζ4 - ζ4t
Fr2t = Fr4t*ζ4t/B
C_T = (Fr4b**2 - Fr4t**2)/Fr1**2
C_P = C_T*Fr2t/Fr1
mb = Fr4b*ζ4b + Fr4t*ζ4t
bb = mb**2
aa = (Fr4b**2*ζ4b + Fr4t**2*ζ4t + 1/2*ζ4**2)
ζs = CardanoRoots( aa, bb )
ζ5 = ζs[1]
Fr5 = mb / ζ5
if C_P <= 0.0: break
C_P_lst.append( C_P )
C_T_lst.append( C_T )
Fr2t_lst.append( Fr2t )
Fr4t_lst.append( Fr4t )
Fr4b_lst.append( Fr4b )
Fr5_lst.append( Fr5 )
ax1.plot( C_T_lst, C_P_lst, label="$\mathrm{Fr}_1=%.2f$" % Fr1 )
if Fr1 == 0.1 or Fr1 == 0.4:
ax2.plot(np.NaN, np.NaN, '-', color='none', label="$\\bf \mathrm{\\bf Fr}_1=%.2f$" % Fr1 )
ax2.plot( C_T_lst, Fr2t_lst, dashes=(2,1), label="$\mathrm{Fr}_\mathrm{2t}$" )
ax2.plot( C_T_lst, Fr4t_lst, label="$\mathrm{Fr}_{4t}$" )
ax2.plot( C_T_lst, Fr4b_lst, dashes=(5,1), label="$\mathrm{Fr}_\mathrm{4b}$" )
ax2.plot( C_T_lst, Fr5_lst, dashes=(5,1), label="$\mathrm{Fr}_\mathrm{5}$" )
if Fr1 == 0.1: # empty entry in the legend
ax2.plot(np.NaN, np.NaN, '-', color='none', label=' ')
ax1.set_ylabel( "$C_P$" )
ax1.grid()
ax1.set_title( "B=%.2f" % B );
ax1.set_xlabel( "$C_T$" )
ax1.legend(loc="upper right",fontsize=12 )
ax1.set_xlim( (0,4) )
ax1.set_ylim( (0,1) )
ax2.set_title( "B=%.2f" % B )
ax2.set_xlabel( "$C_T$" )
ax2.set_ylabel( "$\mathrm{Fr}$" )
ax2.grid()
ax2.legend( bbox_to_anchor=(1.05, 1), loc=2,fontsize=12, borderaxespad=0.0,handlelength=2,numpoints=1,labelspacing=0.15 )
ax2.set_xlim( (0,4) )
ax2.set_ylim( (0,0.8) )
mpl.savefig('Sensitivity_B%4.2f.pdf' % B, bbox_inches='tight', pad_inches=0.02);
# + id="4kuf1z8tVMmX" colab={"base_uri": "https://localhost:8080/", "height": 460} outputId="69a71b53-ff21-40cf-fa52-e78d2e6c6012"
fig, (ax1, ax2) = mpl.subplots(1,2, figsize=(12, 4.5) )
fig.subplots_adjust( wspace = 0.17 )
Fr1 = 0.10
for B in ( 0.05, 0.10, 0.15 ):
C_T_lst = []
C_P_lst = []
Fr2t_lst = []
Fr4b_lst = []
Fr4t_lst = []
Fr5_lst = []
for Fr4b in np.linspace( Fr1*1.001, Fr1*2, 200 ):
ζ4 = (1/2.)*Fr1**2 - 1/2.*Fr4b**2 + 1
C1 = Fr1 - Fr4b*ζ4
C2 = B**2*Fr4b**2 - 2*B*Fr1**2 + 2*B*Fr1*Fr4b \
+ B*ζ4**2 - B + Fr1**2 - 2*Fr1*Fr4b*ζ4 + Fr4b**2*ζ4**2
Fr4t = ( C1 + np.sqrt(C2) ) / B
ζ4t = ( Fr4b*ζ4 - Fr1 ) / ( Fr4b - Fr4t )
ζ4b = ζ4 - ζ4t
Fr2t = Fr4t*ζ4t/B
C_T = (Fr4b**2 - Fr4t**2)/Fr1**2
C_P = C_T*Fr2t/Fr1
mb = Fr4b*ζ4b + Fr4t*ζ4t
bb = mb**2
aa = (Fr4b**2*ζ4b + Fr4t**2*ζ4t + 1/2*ζ4**2)
ζs = CardanoRoots( aa, bb )
ζ5 = ζs[1]
Fr5 = mb / ζ5
if C_P <= 0.0: break
C_P_lst.append( C_P )
C_T_lst.append( C_T )
Fr2t_lst.append( Fr2t )
Fr4t_lst.append( Fr4t )
Fr4b_lst.append( Fr4b )
Fr5_lst.append( Fr5 )
ax1.plot( C_T_lst, C_P_lst, label="B=%.2f" % B )
ax2.plot(np.NaN, np.NaN, '-', color='none', label="$\\bf {\\it\\bf B}=%.2f$" % B)
ax2.plot( C_T_lst, Fr2t_lst, dashes=(2,1), label="$\mathrm{Fr}_{2t}$" )
ax2.plot( C_T_lst, Fr4t_lst, label="$\mathrm{Fr}_{4t}$" )
ax2.plot( C_T_lst, Fr4b_lst, dashes=(5,1), label="$\mathrm{Fr}_{4b}$" )
if B != 0.15: # empty entry in the legend
ax2.plot(np.NaN, np.NaN, '-', color='none', label=" ")
ax1.set_title( r"$\mathrm{Fr}_1=%.2f$" % Fr1 )
ax1.set_ylabel( "$C_P$" )
ax1.set_xlabel( "$C_T$" )
ax1.grid()
ax1.legend(loc="upper right",fontsize=12 )
ax1.set_xlim( (0,4) )
ax1.set_ylim( (0,1) )
ax2.set_title( r"$\mathrm{Fr}_1=%.2f$" % Fr1 )
ax2.set_xlabel( "$C_T$" )
ax2.set_ylabel( "$\mathrm{Fr}$" )
ax2.grid()
ax2.legend( bbox_to_anchor=(1.05, 1), loc=2,fontsize=12, borderaxespad=0.0,handlelength=2,numpoints=1,labelspacing=0.15 )
ax2.set_xlim( (0,4) )
ax2.set_ylim( (0,0.8) )
mpl.savefig('Sensitivity_Fr%4.2f.pdf' % Fr1, bbox_inches='tight', pad_inches=0.02);
# + id="t3qrDQ3S7K8j"
| ChannelFlows/DiskActuator/SensitivityAnalysis_V02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 9.2
# language: sagemath
# metadata:
# cocalc:
# description: Open-source mathematical software system
# priority: 10
# url: https://www.sagemath.org/
# name: sage-9.2
# resource_dir: /ext/jupyter/kernels/sage-9.2
# ---
# # Lecture 23: High Performance Computing and Cython
#
# ### Please note: This lecture will be recorded and made available for viewing online. If you do not wish to be recorded, please adjust your camera settings accordingly.
#
# # Reminders/Announcements:
# - Assignment 8 is due tomorrow at 8pm Pacific (last one!)
# - Please do CAPES!
# - Final Project "examples" from last year available
# - Warning: I make no claims that these are "perfect" projects (in particular, they are missing participation checks, etc.) but I think the presentations are quite good
# - Some final project remarks:
# - If you have tried getting in touch with your groupmates and have had no response, please email me
# - I recommend *making a plan* with your groupmates by the end of next week (things will be collected on Wednesday of finals week, so presentations should occur *before* then)
# ## Python: Good and Bad
#
# At the very beginning of the class we mentioned some benefits of Python: it is a very flexible language, it has syntax which is easy to read, etc. etc. Some of this comes at a cost though! It is *very* slow. This comes from two main factors:
# - The code is *interpreted* instead of *compiled*
# - The code is *dynamically typed*.
#
# Being "interpreted" is the process of translating your typed out syntax into something that a computer can execute (bytecode). This is similar to "compiling" but interpreted code is platform independent, whereas compilation is specific to your local machine. The following StackExchange post: https://stackoverflow.com/questions/1694402/why-are-interpreted-languages-slow has a good analogy:
#
# > If you can talk in your native language to someone, that would generally work faster than having an interpreter having to translate your language into some other language for the listener to understand.
#
# In other words, two native Spanish speakers will communicate to each other more quickly than a Spanish speaker talking to an English speaker using an interpreter.
#
# Being "dynamically typed" means that variables in Python can be *anything* you want them to be (essentially).
#Integer
x = 5
print(x,type(x))
#List
x = [5]
print(x,type(x))
#String
x = '5'
print(x,type(x))
#Function (This is a stupid example that I just really like, but in general function assignments to a variable *CAN BE* advantageous)
x = print
x(x,type(x))
#Real number
x = 5.0
print(x,type(x))
# Dynamic typing allows a lot of flexibility in your code, but that comes at a price. Think about the following code blocks:
x = 5
y = 7
print(x+y)
x = [5]
y = [7]
print(x+y)
x = '5'
y = '7'
print(x+y)
# The "instructions" of addition here are the *exact same*, but the addition operator *works differently* in each circumstance. This was called *overloading*. Because the machine that executes your code does not know what type your variables are, to execute `x+y` it has to:
# - first look up the type of x
# - then look up the type of y
# - ensure that these can be "added" in a consistent way
# - potentially convert x or y to allow for this
# - figure out how to add them
# - add them
# Does this really have a huge impact on runtime? Well, let's compare Python to...
#
# ## Cython
#
# From Wikipedia: https://en.wikipedia.org/wiki/Cython, Cython is "a programming language that aims to be a superset of the Python programming language, designed to give C-like performance with code that is written mostly in Python with optional additional C-inspired syntax."
#
# It is a *compiled* language which *looks* similar to Python but *performs* more closely to C.
#
# In the Sage kernel, we will actually be able to compare Python and Cython *directly* in the same file.
#
# Let's add the numbers from 1 to 1000000 in both languages. First in Python:
import time
t = time.time() # Current time in seconds
print('hello world!')
m = sum(1 for _ in range(1000000))
print(time.time()-t) # Elapsed time in seconds
# Now we will load Cython (made possible by working in Sage)
%load_ext Cython
# And we will do the same calculation in Cython using %%cython
# + language="cython"
#
# import time
# t = time.time()
# print('hello world')
# m = sum(1 for _ in range(1000000))
# print(time.time()-t)
# -
# Note that there was a considerable *lag* in the execution of the above cell; this is due to the compilation. But this is a one time cost. In general once you compile something you can run it again directly as much as you want.
#
# The key takeaway is that the internal timing itself was *much faster* (depending on the specific run, I obtained speedups of ~2x - 10x). And this is only the very very beginning.
# ## A Word of Warning
#
# In your day to day programming life, you *may not need* the speedups that languages like Cython or C offer. In particular, a *very important* thing to take into account is the *human factor* of programming. If optimizing your code takes 10 hours of frustration, and you only shave off 10 seconds on the runtime, that may not be worth it.
#
# ## *********** Participation Check ********************
# Run the command `xkcd(1205)` in the code cell below. If you do a task once a week for 5 years and can shave off 30 seconds on doing that task, how much time is worth spending on optimizing that task?
xkcd(1205)
# Answer here:
# ## ********************************************************
# A quote attributed to Donald Knuth: "Premature optimization is the root of all evil."
#
# Python *isn't slow/bad at everything*.
# - Iterators are great at saving on memory
# - Some Python functions essentially call underlying C code directly (for instance, the `pow` function) and are already quite fast
# - Numpy arrays are designed to be efficient already
# - Certain parts of libraries (such as Sage) already use Cython for speedups
#
# In the remainder of this lecture we will try to show *when* such optimizations can be fruitful, and how to achieve that.
# ## Static Typing
#
# The main/easiest speedup that Cython offers is static typing. Let's start with the following, which is "just Python" running in the Cython environment.
# + language="cython"
# import time
# t = time.time()
# a = 0
# for i in range(10000):
# a += i
# print(a)
# print(time.time()-t)
# -
# Note that the variable `a` is dynamically typed. Let's add a short modification:
# + language="cython"
# import time
# t = time.time()
# cdef int a = 0
# for i in range(10000):
# a += i
# print(a)
# print(time.time()-t)
# -
# The extra `cdef int` tells Cython "I only want `a` to be an integer." This allows Cython to speedup interactions with `a` at the cost of some flexibility:
# + language="cython"
# import time
# t = time.time()
# cdef int a = 0
# a = 'string'
# print(a)
# print(time.time()-t)
# -
# In general, if you want a "quick" way to try and get faster runtime, you can try to statically type your variables.
# ## Annotating
#
# Ok, what if you've tried a quick pass at making your code more efficient, and want other ideas? Cython allows an `annotate` tag which can help:
# + magic_args="--annotate" language="cython"
# import time
# t = time.time()
# cdef int a = 0
# for i in range(10000):
# a += i
# print(a)
# print(time.time()-t)
# -
# A general rule is that "more yellow" is "more slow." It's not always possible to do something about this, but in this short example there *is* a big thing we can do:
# + magic_args="--annotate" language="cython"
# import time
# t = time.time()
# cdef int a = 0
# cdef int i
# for i in range(10000):
# a += i
# print(a)
# print(time.time()-t)
# -
# Note that we didn't have to assign the variable `i` to anything; all we had to do is tell Cython that `i` would be an integer.
#
# Another warning: We're not only telling Cython that `i` is an integer; we're declaring it to be a *C integer*. In general, using `cdef` creates *static C typed* variables. A result of this is that they are subject to C restrictions. For instance; an integer in Python cannot "overflow" (get too big) but a C integer can:
# + language="cython"
#
# cdef int a = 1
# for _ in range(24):
# a *= 4
# print(a)
# -
# ## Function Calls
#
# The use of `cdef` can extend to function calls. The following code approximates the integral of `f(x) = x^2 - x` on an interval `[a,b]`
# +
# Sage version (with Rationals)
def f(x):
return x ** 2 - x
def integrate_f(a, b, N):
s = 0
dx = (b - a) / N
for i in range(N):
s += f(a + i * dx)
return s * dx
import time
t = time.time()
m = integrate_f(0, 1, 1000000)
print(m)
print(time.time()-t)
# +
# Python version
def f(x):
return x ** 2 - x
def integrate_f(a, b, N):
s = 0
dx = (b - a) / N
for i in range(N):
s += f(a + i * dx)
return s * dx
import time
t = time.time()
m = integrate_f(int(0), int(1), int(1000000))
print(m)
print(time.time()-t) #Good improvement...
# + language="cython"
#
# #Python version, in cython
# def f(x):
# return x ** 2 - x
#
# def integrate_f(a, b, N):
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
#
# import time
# t = time.time()
# m = integrate_f(0, 1, 1000000)
# print(m)
# print(time.time()-t) # Some more improvement...
# -
# ## ***** Participation Check *****************
# Look at the code cell above. What variables are we using that could have a `cdef` applied to them?
#
# List them here:
#
# ## ****************************************
# + language="cython"
#
# #Cython with basic static typing
# def f(double x):
# return x ** 2 - x
#
# def integrate_f(double a,double b,int N):
# cdef int i
# cdef double s
# cdef double dx
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
#
# import time
# t = time.time()
# m = integrate_f(0, 1, 1000000)
# print(m)
# print(time.time()-t) # Some more improvement...
# + language="cython"
#
# #Cython with static typing on functions as well
# cdef double f(double x):
# return x ** 2 - x
#
# cdef double integrate_f(double a, double b, int N):
# cdef int i
# cdef double s, dx
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
#
# import time
# t = time.time()
# m = integrate_f(0, 1, 1000000)
# print(m)
# print(time.time()-t) # Way better!
# -
# In testing during prep for the lecture, I got ~1500x speedup from Sage and ~100x speedup from Python!
# + magic_args="--annotate" language="cython"
#
# def f(x):
# return x ** 2 - x
#
# def integrate_f(a, b, N):
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
#
# import time
# t = time.time()
# m = integrate_f(0, 1, 1000000)
# print(m)
# print(time.time()-t)
# + magic_args="--annotate" language="cython"
#
# cdef double f(double x):
# return x ** 2 - x
#
# cdef double integrate_f(double a, double b, int N):
# cdef int i
# cdef double s, dx
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
#
# import time
# t = time.time()
# m = integrate_f(0, 1, 1000000)
# print(m)
# print(time.time()-t)
# -
# ## A Catch
#
# A problem with `cdef` is that it *only* defines it with respect to Cython. You will not be able to call it "in Python"
# + language="cython"
#
# cdef double f(double x):
# return x ** 2 - x
#
# cdef c_integrate_f(double a, double b, int N):
# cdef int i
# cdef double s, dx
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
#
# print(c_integrate_f(0,1,1000))
# -
#This should fail:
print(c_integrate_f(0,1,1000))
# One option would be to write separate instances for Cython and Python, but that's just messy. Instead you can use `cpdef`; this creates two versions of the function. A "fast C version" and a "slow Python version"
# + language="cython"
#
# cdef double f(double x):
# return x ** 2 - x
#
# cpdef cp_integrate_f(double a, double b, int N):
# cdef int i
# cdef double s, dx
# s = 0
# dx = (b - a) / N
# for i in range(N):
# s += f(a + i * dx)
# return s * dx
# -
print(cp_integrate_f(0,1,1000))
# ## Bottlenecks
#
# A key use of optimization is to reduce "bottlenecks"
#
# 
#
# For this road, you shouldn't bother trying to make the highway paved better on either end; you should focus on adding more lanes into the middle section.
#
# Trying to find the bottlenecks in your code is called "profiling." Cython has a tutorial here: https://cython.readthedocs.io/en/latest/src/tutorial/profiling_tutorial.html
#
# There are many useful tools for this. An old timey way would be simply to add print statements in the middle of your functions to allow for granular timing; this does not scale well though and is a fairly crude technique. Jupyter provides a fairly nice one which is much nicer: the `prun` command. We will use it to approximate $\pi$ via the following formula:
#
# $$
# \sum_{k=1}^\infty\frac{1}{k^2} = \frac{\pi^2}{6}.
# $$
#
# +
def recip_square(i):
return 1. / i ** 2
def approx_pi(n=10000000):
val = 0.
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
# -
%time approx_pi()
prun approx_pi()
# We can see that a lot of time is spent computing reciprocal squares and "creating real numbers"! Let's start by statically typing some things:
# + language="cython"
#
# # cython: profile=True
#
# def c_recip_square(int i):
# return 1. / i ** 2
#
# def c_approx_pi(int n=10000000):
# cdef double val = 0.
# cdef int k
# for k in range(1, n + 1):
# val += c_recip_square(k)
# return (6 * val) ** .5
# -
prun c_approx_pi()
# Can we do better?
# + language="cython"
#
# # cython: profile=True
#
# cdef double c2_recip_square(int i):
#
# return 1. / (i*i)
#
# def c2_approx_pi(int n=10000000):
# cdef double val = 0.
# cdef int k
# for k in range(1, n+1):
# val += c2_recip_square(k)
# return (6 * val) ** .5
# -
prun c2_approx_pi()
# Be careful!!!
c2_approx_pi()
# + language="cython"
#
# # cython: profile=True
#
# cdef double c2_recip_square(int i):
# cdef double j = i
# return 1. / (j*j)
#
# def c2_approx_pi(int n=10000000):
# cdef double val = 0.
# cdef int k
# for k in range(1, n+1):
# val += c2_recip_square(k)
# return (6 * val) ** .5
# -
c2_approx_pi()
prun c2_approx_pi()
# This improvement looks good, but not *that good*. That's because a lot of overhead is introduced from the profiler. If we just time it:
%time c2_approx_pi()
# We get a *much* better result!
# ## One More Profiling Technique: lprun
#
# You can also profile *line by line* if you are really serious about this (again; not always necessary for you to get this serious). Let's go back to sets vs lists!
def sum1(S,bad):
j = 0
for i in S:
if i not in bad:
j+=i
%load_ext line_profiler
lprun -f sum1 sum1({i for i in range(100000)}, {i**2 for i in range(10000)})
lprun -f sum1 sum1([i for i in range(100000)], [i**2 for i in range(10000)])
# What we see from the above examples is that lookup in sets is *much much faster* than lookup in lists. So maybe you want to use this knowledge. Be careful! If you do this carelessly, you'll do worse!
def sum2(S,bad):
j = 0
for i in S:
if i not in set(bad):
j+=i
lprun -f sum2 sum2([i for i in range(100000)], [i**2 for i in range(10000)])
# What's going on here!?!?!
def sum3(S,bad):
j = 0
T = set(bad)
for i in S:
if i not in T:
j+=i
lprun -f sum3 sum3([i for i in range(100000)], [i**2 for i in range(10000)])
# Muuuuch better
# ## What's Next?
#
# Profiling is a subtle art, and speeding up your code can be achieved in *many* different ways which we won't get into.
#
# A common one that you may want to look into is *parallelization*. Suppose I chose 5 of you and gave your group 5 lists of numbers. Your task is to give me the sum of all the numbers in those 5 lists. You could either:
# - pick an unlucky member of your group to add them all, while four of you gossip about flowers or something
# - each take one of the lists and add the numbers in that list. Then get the final result by adding your individual results.
#
# There is a clear winner here (Option A, I hate adding and would prefer one of you to do it for me)
#
# This is the idea of "parallelization." Split your job into multiple "threads" and have a worker for each thread. Then combine your results from each worker in the end.
#
# True parallelization is not directly allowed by Python, as it uses a *Global Interpreter Lock*:https://en.wikipedia.org/wiki/Global_interpreter_lock. There are various reasons for this; chief among them are that often processes are not "thread safe" unless you are careful.
#
# You *can* get around this using Cython if you know what you are doing. Python code which utilizes Cython can allow parallel execution using OpenMP. I don't know nearly enough about this to teach you anything useful, but here are the docs if you are interested: https://cython.readthedocs.io/en/latest/src/userguide/parallelism.html
#
# ## Julia
#
# Another thing you may want to explore is the Julia programming language: https://julialang.org/ . It is supposed to be a "Python ish" language which comes with many of these speedups builtin, but I know *nothing about it*. Word on the street is that Math157 *may* transition to Julia one day, but who knows.
| Lectures/Lecture23/Lecture23_Mar03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Spyder)
# language: python3
# name: python3
# ---
# Import needed libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import metrics
# +
# Useful functions
def get_score(X_train, y_train, X_test, y_test, Classifier, options={}):
model = Classifier(**options)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
return metrics.accuracy_score(y_test, y_pred)
def get_cv_score(X, y, Classifier, /, options={}, *, cv=10, scoring='accuracy'):
scores = cross_val_score(Classifier(**options), X, y, cv=cv, scoring=scoring)
return scores.mean()
# -
# Read data
df = pd.read_csv('weather.csv', parse_dates=True, index_col='Date')
data = df.drop(columns=["WindGustDir", "WindDir9am", "WindDir3pm"], axis=1)
# Remove columns with too many NaNs
# Find number of missing values in each column
na_nums = np.array([data[col].isna().sum() for col in data.columns])
na_nums_gt500 = np.select([na_nums <= 500, na_nums > 500], [0, 1]) # entries of na_nums>500
cols_na_gt500 = [col for n, col in zip(na_nums_gt500, data.columns) if n] # corresponding columns
data = data.drop(columns=cols_na_gt500, axis=1)
data_clean = data.copy().dropna()
# Convert categorical features to numeric
data_clean["RainTomorrow"], _ = data_clean["RainTomorrow"].factorize(sort=True)
data_clean["RainToday"], _ = data_clean["RainToday"].factorize(sort=True)
# Split data into train and test sets
X = data_clean
y = X.pop("RainTomorrow")
X_train, X_test, y_train, y_test = train_test_split(X, y)
# +
# K-nearest neighbors model: find best hyperparameter k and weight method.
scores_u = [] # uniform weights
scores_d = [] # weights inversely related to distance
for k in range(1, 50):
for weight in ['uniform', 'distance']:
# score = get_score(X_train, y_train, X_test, y_test,
# KNeighborsClassifier, {'n_neighbors': k, 'weights': weight})
score = get_cv_score(X, y, KNeighborsClassifier, {'n_neighbors': k, 'weights': weight})
scores_u.append(score) if weight == 'uniform' else scores_d.append(score)
fig, ax = plt.subplots()
ax.plot(scores_u)
ax.plot(scores_d)
ax.legend(['uniform', 'distance'])
ax.set(title='Accuracy scores for different weight methods',
xlabel='$k$ neighbors', ylabel='Accuracy', xlim=(0, 50))
k_best_u = scores_u.index(np.max(scores_u))
k_best_d = scores_d.index(np.max(scores_d))
print(f"Optimal k={k_best_u + 1} with an accuracy of {scores_u[k_best_u]} with uniform weight "
"method.")
print(f"Optimal k={k_best_d + 1} with an accuracy of {scores_d[k_best_d]} with distance weight "
"method.")
# -
# Try a random forest classifier
# score = get_score(X_train, y_train, X_test, y_test,
# RandomForestClassifier, {'max_depth': 3, 'max_leaf_nodes': 3})
score = get_cv_score(X, y, RandomForestClassifier, {'max_depth': 3, 'max_leaf_nodes': 3})
score
| Rain_Kneighbors/rain_kneighbors-rf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import pyecog as pg
import os
import pandas as pd
import numpy as np
from datetime import datetime
from ipywidgets import FloatProgress # For tracking progress of power data extraction
from IPython.display import display # For tracking progress of power data extraction
###############################
### INSERT VARIABLES HERE ###
### Select NDF directory
ndf_dir = r'C:\Users\asnowball\Desktop\TESTING\NDFs'
### Select the sampling frequency in Hz
fs = 512
### Select transmitter IDs of interest (IN LIST FORMAT)
### tid_list = 'all' ... gives power outputs for all transmitters in an NDF file
tid_list = [1,2]
### Select chunk length (length in seconds of chunk over which to calculate power statistics)
### Minimum value = approx. 4 / the lower limit of the lowest frequency band
### Eg. If lowest frequency band is 1-4Hz, 4s chunk length required for reliable read-out at 1Hz
### Eg. If lowest frequency band is 0.5-1Hz, 8s chunk length required for reliable read-out at 0.5Hz
chunk_len = 4
### Select power bands of interest as list of tuples
### bands = 'full' ... outputs the full power spectral density (PSD)
bands = [(1,4),(4,8),(8,12),(12,30),(30,50),(50,70),(70,120),(120,160)]
### Select desired output format
### averaged = True ... averages power values for each NDF file
### averaged = False ... gives complete data with no averaging
averaged = True
###############################
# Power calculation function
def calculate_powerbands(flat_data, chunk_len, fs, bands):
'''
Inputs:
- flat_data: np.array, of len(arr.shape) = 1
- chunk_len: length in seconds of chunk over which to calculate bp
- fs : sampling frequency in Hz
- bands : list of tuples, containing bands of interest. e.g. [(1,4),(4,8),(8,12),(12,30),(30,50),(50,70),(70,120)]
Returns:
- numpy array, columns correspond to powerbands.
Notes:
- Band power units are "y unit"^2
- using rfft for improved efficiency - a scaling factor for power computation needs to be included
- using a hanning window, so reflecting half of the first and last chunk in
order to centre the window over the time windows of interest (and to not throw
anything away)
'''
# first reflect the first and last half chunk length so we don't lose any time
pad_dp = int((chunk_len/2) * fs)
padded_data = np.pad(flat_data, pad_width=pad_dp, mode = 'reflect')
# reshape data into array, stich together
data_arr = np.reshape(padded_data,
newshape=(int(len(padded_data)/fs/chunk_len), int(chunk_len*fs)),
order= 'C')
data_arr_stiched = np.concatenate([data_arr[:-1], data_arr[1:]], axis = 1)
# window the data with hanning window
hanning_window = np.hanning(data_arr_stiched.shape[1])
windowed_data = np.multiply(data_arr_stiched, hanning_window)
# run fft and get psd
bin_frequencies = np.fft.rfftfreq(int(chunk_len*fs)*2, 1/fs) # *2 as n.points is doubled due to stiching
A = np.abs(np.fft.rfft(windowed_data, axis = 1))
psd = 2*chunk_len*(2/.375)*(A/(fs*chunk_len*2))**2 # psd units are "y unit"^2/Hz
# A/(fs*chunk_len*2) is the normalised fft transform of windowed_data
# - rfft only returns the positive frequency amplitudes.
# To compute the power we have to sum the squares of both positive and negative complex frequency amplitudes
# .375 is the power of the Hanning window - the factor 2/.375 corrects for these two points.
# 2*chunk_len - is a factor such that the result comes as a density with units "y unit"^2/Hz and not just "y unit"^2
# this is just to make the psd usable for other purposes - in the next section bin_width*2*chunk_len equals 1.
# if full PSD output desired
if bands == 'full':
return psd
# now grab power bands from psd if desired
bin_width = np.diff(bin_frequencies[:3])[1]
pb_array = np.zeros(shape = (psd.shape[0],len(bands)))
for i,band in enumerate(bands):
lower_freq, upper_freq = band # unpack the band tuple
band_indexes = np.where(np.logical_and(bin_frequencies > lower_freq, bin_frequencies<=upper_freq))[0]
bp = np.sum(psd[:,band_indexes], axis = 1)*bin_width
pb_array[:,i] = bp
return pb_array
# Sets power band column titles
if bands == 'full':
band_list = []
for x in range(0,(chunk_len*fs)+1):
band_list.append(x/(2*chunk_len))
band_titles = ['0 Hz']
for x in range(len(band_list)-1):
band_titles.append(str(band_list[x]) + ' - ' + str(band_list[x+1]) + ' Hz')
else:
band_titles = [(str(i[0]) + ' - ' + str(i[1]) + ' Hz') for i in bands]
# Creates new folder in NDF directory for non-averaged output data
# Creates intermediary storage dictionary for averaged output data
if averaged == False:
power_dir = os.path.join(ndf_dir,'Power Outputs')
os.makedirs(power_dir)
if averaged == True:
batch_data = {}
# Tracks progress of power data extraction
progress = FloatProgress(min=0, max=len(os.listdir(ndf_dir)))
display(progress)
# Iterates through NDF directory to extract power statistics and sort into desired output format
for file in os.listdir(ndf_dir):
if file[-4:] == '.ndf':
datetime = datetime.fromtimestamp(int(file[1:11]))
ndf_file = pg.NdfFile(os.path.join(ndf_dir, file))
if tid_list == 'all':
ndf_file.load(read_ids='all', auto_glitch_removal=True, auto_resampling=True, auto_filter=True)
tids_to_load = list(ndf_file.tid_set)
else:
ndf_file.load(read_ids=tid_list, auto_glitch_removal=True, auto_resampling=True, auto_filter=True)
tids_to_load = tid_list
for tid in tids_to_load:
if averaged == True:
if tid not in batch_data.keys():
batch_data[tid] = pd.DataFrame(columns = band_titles)
raw_data = ndf_file[tid]['data']
power_data = calculate_powerbands(raw_data, chunk_len, fs, bands)
power_df = pd.DataFrame(power_data, columns=band_titles)
# Packages non-averaged data into a .csv file for output
if averaged == False:
power_df.to_csv(os.path.join(power_dir, file) + ' - TRANSMITTER {}.csv'.format(tid), index=False)
if averaged == True:
power_mean = power_df.mean()
batch_data[tid] = batch_data[tid].append(power_mean, ignore_index = True)
batch_data[tid].loc[(len(batch_data[tid]))-1, "Coastline"] = sum(abs(np.diff(raw_data))) # Add a coastline column (optional)
batch_data[tid].loc[(len(batch_data[tid]))-1, "Datetime"] = datetime # Add a datetime column (recommended)
batch_data[tid].loc[(len(batch_data[tid]))-1, "Filename"] = file # Add a filename column (recommended)
progress.value += 1 # Updates progress tracker
# Packages averaged data into an excel file for output
if averaged == True:
writer = pd.ExcelWriter(os.path.join(ndf_dir, 'Power Averages.xlsx'))
for tid in batch_data.keys():
batch_data[tid].sort_values(by = "Datetime").to_excel(writer, "Transmitter {}".format(tid), index=None)
writer.save()
| documentation_notebooks/Spell 1 - Automated Power Extraction (from NDF files) (AS, Nov 2016).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas_datareader as pdr
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# %matplotlib inline
df_tesla=pdr.get_data_yahoo('TSLA')
df_tesla.head()
# +
## simple moving average
# -
df_tesla['Open'].plot()
df_tesla['Open'].plot(figsize=(12,7))
## rolling means window size
##in last we should use mean beacuse to aggreate the value
df_tesla["Open:10 days rolling"]=df_tesla['Open'].rolling(window=10,min_periods=1).mean()
df_tesla[['Open',"Open:10 days rolling"]].plot(xlim=['2020-01-01','2022-01-01'],figsize=(15,6))
df_tesla["Open:30 days rolling"]=df_tesla['Open'].rolling(window=30,min_periods=1).mean()
df_tesla["Open:50 days rolling"]=df_tesla['Open'].rolling(window=50,min_periods=1).mean()
df_tesla[['Open',"Open:10 days rolling","Open:30 days rolling","Open:50 days rolling"]].plot(xlim=['2020-01-01','2022-01-01'],figsize=(15,6))
## Expanding
#cumilative moving average(CMA)
df_tesla['Open'].expanding().mean().plot(figsize=(15,7))
# +
##EWMA exponetional weighted moving average
# -
##lets smothing factor -0.1
df_tesla['EMA_0.1']=df_tesla['Open'].ewm(alpha=0.1,adjust=False).mean()
df_tesla[['Open','EMA_0.1']].plot(xlim=['2020-01-01','2022-01-01'],figsize=(15,6))
df_tesla['EMA_0.3']=df_tesla['Open'].ewm(alpha=0.2,adjust=False).mean()
df_tesla[['Open','EMA_0.1','EMA_0.3']].plot(xlim=['2020-01-01','2022-01-01'],figsize=(15,6))
df_tesla['EMA_5days']=df_tesla['Open'].ewm(span=5).mean()
df_tesla[['Open','EMA_0.1','EMA_5days']].plot(figsize=(15,7))
| DAY -2 Time Series Moving average,Armima etc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#Packages for visualisation
#Scikit-Learn Packages
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
#Visualisation and Other packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# -
#Load the digit recognition dataset
digits = datasets.load_digits()
print(digits.keys())
print(digits.DESCR)
print("Shapes of Images : {}".format(digits.images.shape))
print("Shapes of Data : {}".format(digits.data.shape))
#Visual EDA
plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
# +
#Feature and Target Arrays
X = digits.data
y = digits.target
#Split into training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state=42, stratify=y)
# KNN Classifier with 7 neighbors
knn = KNeighborsClassifier(n_neighbors=7)
# +
# Fitting the classifier to the training data
knn.fit(X_train, y_train)
# Accuracy Score
print(knn.score(X_test, y_test))
# +
#Understanding relation between n_neighbors and accuracy
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
#Accuracy computation on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Accuracy computation on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Plot for n_neighbors v/s Accuracy Score
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy Score')
plt.show()
| Classification/Classifier-Digit-Recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Análsis de Trayectorias de dinámica molecular
# ## RMSD y RMSF
#
# Como primer paso vamos a cargar los datos de la trayectoria de la DM, considerando cada una de las fases realizadas.
# Para ello, comenzamos importando el módulo pytraj.
import pytraj as pyt
# A continuación, cargamos la estructura inicial de la porteína, que corresponde a la conformación lineal a partir de la cual realizamos la minimización y la dinámica molecular.
# Observa que la función `pyt.load()` recibe los parámetros `filename` y `top` correspondientes al archivo de coordenadas y de topología, respectivamente.
# Cargamos la restructura inicial, es decir, la estructura lineal del péptido
lineal_strc = pyt.load(
filename = './1L2Y/1-topologia/tc5b.pdb',
top = './1L2Y/1-topologia/tc5b.psf')
# Ahora, si llamamos a la variable *cristal_strc*, veremos que ésta corresponde a un objeto *pytraj.Trajectory* con los siguientes atributos:
lineal_strc
# ### Visualización de la estructura
# Recordemos que podemos utilizar la librería *nglview* para visualizar la estructura de la proteína:
# Importamos nglview
import nglview as nv
import warnings
lineal_strc_view = nv.show_pytraj(lineal_strc)
lineal_strc_view.background = 'black'
lineal_strc_view
# ## Carga de la trayectoria
# Ahora, vamos a proceder a cargar en memoria los archivos de trayectoria `dcd` de todas las fases de la dinámica. Para ello, es necesario primero definir el archivo de topología que utilizaremos.
f_topology = './1L2Y/4-run_dm_2/tc5b_wb.psf'
top_psf = pyt.load_topology(f_topology)
top_psf
# Este archivo de topoligía incluye a todas las molécuals del sistema, sin embargo, por ahora sólo nos interesa analizar la trayectoria de la proteína, por lo que redefiniremos el archivo de topología para que sólo incluya los átomos de la misma )los primeros 20 residuos del sistema).
top_prot_psf = top_psf[':1-20']
top_prot_psf
# Ahora procederemos a cargar los archivos dcd. Sin embargo, para este análisis cargaremos archivos dcd a los cuales se les ha removido el solvente, con el objetivo de reducir el tamaño de los archivos y poder distribuirlos en línea.
# A continuación mostramos el ejemplo de cómo se generaron estos archivos "reducidos" a partir de los dcd originales.
# +
# Para optimizar el análisis, previamente se han extraido de las trayectorias
# únicamente los átomos correspondientes a la proteína y guardado en la carpeta
# 5_traj_analysis
# Un ejemplo de cómo realizar dicha extracción es el siguiente:
_min = pyt.load(filename = './1L2Y/4-run_dm_2/1_min/tc5b_wb_min.dcd',
top = './1L2Y/4-run_dm_2/tc5b_wb.psf')
print(_min)
# Ahora guardamos un nuevo archivo de trayectoria:
pyt.write_traj(filename = './1L2Y/5-traj_analysis/tc5b_PROT_MIN.dcd',
traj = _min[':1-20'],
overwrite=True)
print(F'Archivo guardado:\n{_min[":1-20"]}')
# -
# #### Archivos dcd
# Ahora ubicamos el directorio y nombre de cada uno de los archivos dcd. Esto incluye también al archivo .pdb con la estructura inicial.
# PDB inicial
f_inicial = './1L2Y/2-solvatar_wt/tc5b_wb.pdb'
# Archivos de dinámica
dir_traj = './1L2Y/5-traj_analysis' # Directorio de los archivos
f_min = F'{dir_traj}/tc5b_PROT_MIN.dcd'
f_heat = F'{dir_traj}/tc5b_PROT_SA.dcd'
f_eq = F'{dir_traj}/tc5b_PROT_EQ.dcd'
f_prod = F'{dir_traj}/tc5b_PROT_PROD.dcd'
# Ahora cargamos cada uno de los archivos utilizando el objeto de topoligía top_psf. Como ejemplo puedes ver que también es posible usar el parámetro mask para determinar qué átomos deberán ser considerados al cargarse en memoria. Sin embargo, podemos omitirlo en las fases de minimización en adelante, ya que esta selección de los átomso de laproteína ya la hicimos al cargar el archivo de topología.
# +
#Estructura inicial
init_struc = pyt.load(filename = f_inicial, top = top_psf, mask = ':1-20')
print( F'Estructura inicial: {init_struc.n_frames} frames')
# Minimización
min_traj = pyt.load(filename = f_min, top = top_prot_psf)
print( F'Minimización: {min_traj.n_frames} frames')
# Calentamiento
heat_traj = pyt.load(filename = f_heat, top = top_prot_psf)
print( F'Calentamiento: {heat_traj.n_frames} frames')
# Equilibrado
eq_traj = pyt.load(filename = f_eq, top = top_prot_psf)
print( F'Equilibrado: {eq_traj.n_frames} frames')
# Producción
prod_traj = pyt.load(filename = f_prod, top = top_prot_psf)
print(F'Producción: {prod_traj.n_frames} frames')
# -
# Además de cargar los archivos, hemos usado el atributo n_frames que nos dice cuántos frames posee cada tryectoria. En python es posible utilizar las funciones dir() para conocer qué métodos y atributos posee un objeto según la clase a que pertenece.
# ¿Cómo saber todos los atributos y métodos que tiene un objeto en python?
# En este caso, para un objeto trayectoria
print(type( prod_traj ))
print(dir( prod_traj ))
# ### Concatenamos las trayectorias
# Ahora, con el objetivo de ver la evolución conformacional de la proteína a lo largo de todas las fases, vamos a unir todas las trayectorias en una sola. Esto también nos permitirá guardar una única trayectoria para posteriores análisis.
# Primero generamos una lista con los objetos de trayectoria:
# Combinamos todas las etapas para visualizar una única trayectoria
trajs_list = [min_traj, heat_traj, eq_traj, prod_traj]
# El siguiente paso es usar la lista traj_list para iterar sobre sus elementos.
# Para ello vamos utilizar una expresión conosida como list comprehension. Por ahora las usaremos para uardar en una nueva lista el número de frames de cada fase.
# Número total de frames
n_frames_list = [ traj.n_frames for traj in trajs_list ] # Observa la sintaxis
n_frames_list
n_full_frames = sum(n_frames_list)
print( F'Número total de frames: {n_full_frames}')
# Ahora crearemos un nuevo objeto Trajectory inicializándolo como una copia del objeto init_struct, al que iremos añadiendo las coordenadas de los objetos de la lista trajs_list usando el método append?xyz:
# +
# Inicializamos la trayectoria con una copia de la estructura inicial,
# que es un objeto de la clase Trajectory de pytraj
full_traj = init_struc.copy()
# Iteramos sobre la lista de trayecorias, añadiendo las coordenadas
# al objeto full_traj
for traj in trajs_list:
full_traj.append_xyz(traj.xyz)
full_traj
# -
# ### Guardando el archivo de tryectoria
# Como vimos en el jemplo de la tayectoria de la minimización, podemos usar la función write_traj(). Observa qué parámetros se requieren para guardar el nuevo archivo.
# Podemos guardar el archivo para trabajar con él en un posterior análisis
name_file_full_traj = F'{dir_traj}/tc5b_PROT_FULL_TRAJ.dcd'
pyt.write_traj(filename = name_file_full_traj,
traj = full_traj, overwrite = True)
# ### Análisis de la trayectoria
# Bien, ahora podemos comenzar con el análisis de la trayectoria. Si bien, en general debemos enfocarnos en la fase de producción, para este ejemplo comenzaremos observando la evolución conformacional de la proteína a través de todas las fases de la trayectoria.
#
# #### Superposición de las estructuras
# Primero vamos a superponer todas las conformaciones de la proteína. Para ello, el método superpose realiza moviemientos translacionales y rotacionales de forma iterativa minimizando el RMSD entre todas las confromaciones.
# Observa que el alineamiento lo realizaremos considerando los carbonos alfa únicamente de los residuos 4 a 17, es decir, hemos omitido los primeros y los últimos tres reiduos de la proteína. Si lo deseas puedes reducir o extender esta selección.
# Superposición de los frames
full_traj.superpose(mask = ':4-17@CA', ref = 0)
# Por default el primer frame es tomado como referencia
# #### Visualizaión de la trayectoria
# Podemos usar nglview para visualizar la tryectoria con los frames superpuestos.
full_traj_sup_view = nv.show_pytraj(full_traj)
full_traj_sup_view.add_representation("licorice")
full_traj_sup_view
# ### Cálculo de RMSD
# Ahora procederemos a calcular el rmsd de todos los frames de la trayectoria con respecto a una estructura de referencia.
# Como primer ejemplo usaremos la estructura lineal usando el parámetro ref.
# Cálculo del RMSD de la trayectoria total
# tomando como referencia la estructura inicial (lineal)
rmsd_full_REF_lineal = pyt.rmsd(traj = full_traj,
mask = ':4-17,@CA',
ref = init_struc)
# #### Grafica de RMSD
# Para graficar el RMSD importamos las librerías matplotlib y seaborn y establecemos algunos parámetros por default para las gráficas.
# Importamos las librerías y parámetros para generar las gráficas
from matplotlib import pyplot as plt
import seaborn as sns
sns.set( context = 'talk', style = 'ticks', palette = "Spectral")
# %pylab inline
pylab.rcParams['figure.figsize'] = (15, 5)
# Ahora generamos la gráfica de la trayectoria total:
# Creamos la figura del RMSD de la trayectoria total
plt.scatter( x = range(full_traj.n_frames),
y = rmsd_full_REF_lineal, s = 20, alpha = 0.3 )
plt.title("RMSD trayectoria TOTAL. Ref = conformación lineal")
plt.xlabel("# Frame")
plt.show()
# Como es de esperarse, conforme el tiempo de simulación avanza hay un aumento en el RMSD de cada frame con respecto a la conformación inicial.
# Ahora bien, para un análisis más interesante usaremos como referencia la estructura por NMR de la proteína, con el objetivo de saber si, a lo largo de la simulación, las estructura de la proteína se aproxima a la estructura experimental.
# Para ello cargamos la estructura NMR de la proteína.
# Cargamos la estructura NMR
nmr_strc = pyt.load(filename = './1L2Y/5-traj_analysis/1l2y_Hs.pdb',
top = './1L2Y/5-traj_analysis/1l2y_Hs.psf')
# Calculamos el RMSD de la trayectoria usando como referencia a nmr_strc. Seguido de ello, generamos la gráfica.
rmsd_full_REF_nmr = pyt.rmsd( traj = full_traj,
mask = ':4-17,@CA',
ref = nmr_strc)
plt.scatter( x = range(full_traj.n_frames), y = rmsd_full_REF_nmr,
s = 20, alpha = 0.3, c='orange' )
plt.title("RMSD trayectoria TOTAL. Ref = conformación NMR 1l2y")
plt.xlabel("# Frame")
plt.show()
# Como ves, al principio de la simulación la conformación inicial y la estructura por NMR son muy distintas (entre 6 a 8 A), sin embargo, a lo largo de la dinámica y sobre todo en la producción, la conformación de la porteína tiende a ser más parecida a la estructura NMR.
#
# Finalmente, visualicemos únicamente la fase de producción con ambas referencias:
import pandas as pd
# +
# Creamos una tabla con pandas
# Dado que la producción corresponde a los últimos 5000 frames,
# usamos el slicing [-5000:] para extraer esos últimos 5000 datos
rmsd_prod = pd.DataFrame({"Ref_lineal": rmsd_full_REF_lineal[-5000:],
"Ref_NMR": rmsd_full_REF_nmr[-5000:]})
# Sólo por variar, vamos a usar seaborn para generar el gráfico:
ax = sns.lineplot(data = rmsd_prod)
ax.set_title("RMSD producción")
# -
# ### Cálculo de RMSF
# Vamos a finalizar este análisis evaluando el RMSF de la proteína a lo largo de la fase de producción. Para ello, primero vamos a superponer toda la trayectoria a un frame promedio.
#
# #### Obtención del frame promedio
# Obtenemos el frame promedio
avg_prod_frame = pyt.mean_structure(traj = prod_traj)
# Y superponemos la trayectoria a dicho frame
prod_traj.superpose(ref = avg_prod_frame, mask = ':3-18@CA')
# Ahora calculamos el RMSF de toda la producción
rmsf_prod_traj = pyt.rmsf( prod_traj, mask = '@CA' )
# Graficamos
plt.plot( rmsf_prod_traj.T[1], c = 'red')
plt.title("RMSF producción total")
plt.xlabel("# Residuo")
plt.show()
# Un análisis interesante sería ver cómo el RMSF de la proteína va cambiando tomando ciertos intervalos de la producción.
# Ahora calculamos el RMSF a diferentes intervalos de la producción
num_interv = 5
interv_size = int(prod_traj.n_frames / num_interv)
df_rmsf = pd.DataFrame(
{F'Interv. {i}': pyt.rmsf( prod_traj[i*interv_size : (i+1)*interv_size],
mask = '@CA').T[1] for i in range(num_interv)} )
sns.set_palette("husl")
ax = sns.lineplot(data = df_rmsf, dashes = False)
ax.set_title("RMSF producción")
# En general, podremos observar que hacia la última parte de la trayectoria, el backbone de la proteína tiende a ser más estable.
# +
# Cálculo del RMSF TOTAL
full_traj.rmsfit(ref = cristal_strc)
rmsf_full_traj = pyt.rmsf(full_traj, mask = '@CA')
# Ahora comparamos con el RMSF de los últimos 1000 frames de la producción
rmsf_prod_traj = pyt.rmsf( prod_traj[-1000:], mask = '@CA' )
plt.plot( rmsf_full_traj.T[1], c = 'red')
plt.plot( rmsf_prod_traj.T[1], c = 'cyan')
#plt.plot( rmsf_prod_traj_3.T[1], c = 'blue')
plt.title("RMSF trayectoria total y producción")
plt.xlabel("# Residuo")
plt.show()
# -
# Análisis de Componentes principales usando toda la trayectoria
traj_plus_cristal = full_traj.copy()
traj_plus_cristal.append_xyz(cristal_strc.xyz)
# El último frame es la estructura del cristal
traj_plus_cristal.rmsfit( ref = cristal_strc)
# Realización del PCA
pca_data = pyt.pca(traj_plus_cristal, mask=':3-18@CA', n_vecs = 5)
pca_projection = pca_data[0]
print('eigvenvalores de los primeros PCs', pca_data[1][0])
plt.figure(figsize= (10,8))
# Minimización, calentamiento y equilibrado
# Produccion
plt.scatter(pca_projection[0][-5000::5], pca_projection[1][-5000::5], marker='o',
c=range(traj_plus_cristal[-5000::5].n_frames), alpha=0.5, cmap='plasma')
cbar = plt.colorbar()
cbar.set_label('Prodcción frame #')
# Cristal
plt.scatter(pca_projection[0][-1], pca_projection[1][-1], marker='o',
color='red' )
plt.xlabel('PC1')
plt.ylabel('PC2')
# notar que al archivo de trayectoria se
# le ha removido el solvente para facilitar el analisis
traj = pyt.load(filename = './1L2Y/5-traj_analysis/tc5b_PROT_PROD.dcd',
top = './1L2Y/5-traj_analysis/1l2y_Hs.psf')
traj
# Cargamos la restructura inicial
estruc_cristal = mda.Universe('./1L2Y/5-traj_analysis/1l2y_Hs.psf',
'./1L2Y/5-traj_analysis/1l2y_Hs.pdb')
estruc_cristal
traj = mda.Universe('./1L2Y/5-traj_analysis/1l2y_Hs.psf',
'./1L2Y/5-traj_analysis/tc5b_PROT_PROD.dcd'
)
traj
# Visualizacion de la trayectoria
nv.show_pytraj(traj)
nv.show_mdanalysis(traj)
rmsd = pyt.rmsd(traj = traj, mask = '@CA')
plt.plot(rmsd); plt.title('Primer frame de produccion')
plt.show()
# RMSD comparado con la estructura inicial
rmsd = pyt.rmsd(traj = traj, mask = '@CA', ref = estruc_lineal)
plt.plot(rmsd); plt.title('Estructura Inicial')
plt.show()
# RMSD comparado con la estructura inicial
rmsd = pyt.rmsd(traj = traj, mask = '@CA', ref = estruc_cristal)
plt.plot(rmsd); plt.title('Estructura Cristal')
plt.show()
y = nv.show_pytraj(estruc_lineal)
y
# Visualizacion de la esructura
w = nv.show_pytraj(estruc_cristal)
w
# +
# Visualizacion de plots de ramachandran
# Cristal
angles = pyt.multidihedral(estruc_cristal, 'phi psi', resrange=range(20))
# take data for 'phi' and flatten to 1D array
phi = np.array([d.values for d in angles if 'phi' in d.key]).flatten()
# take data for 'psi' and flatten to 1D array
psi = np.array([d.values for d in angles if 'psi' in d.key]).flatten()
sns.jointplot(phi, psi, kind='scatter', stat_func=None)
plt.show()
# +
# Cristal
angles = pyt.multidihedral(traj[-2:-1], 'phi psi', resrange=range(20))
# take data for 'phi' and flatten to 1D array
phi = np.array([d.values for d in angles if 'phi' in d.key]).flatten()
# take data for 'psi' and flatten to 1D array
psi = np.array([d.values for d in angles if 'psi' in d.key]).flatten()
sns.jointplot(phi, psi, kind='scatter', stat_func=None)
plt.show()
# +
import MDAnalysis as mda
u = mda.Universe('./1L2Y/5-traj_analysis/1l2y_Hs.pdb')
# selection of atomgroups
ags = [res.phi_selection() for res in u.residues[4:9]]
from MDAnalysis.analysis.dihedrals import Dihedral
R = Dihedral(ags).run()
# +
from MDAnalysis.analysis.dihedrals import Ramachandran
u = mda.Universe('./1L2Y/5-traj_analysis/1l2y_Hs.pdb')
r = u.select_atoms("resid 2-19")
R = Ramachandran(r).run()
fig, ax = plt.subplots(figsize = plt.figaspect(1))
R.plot(ref = True)
# -
dir(R.plot())
| RMSD_y_RMSF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module Five Assignment: Cartpole Problem
# Review the code in this notebook and in the score_logger.py file in the *scores* folder (directory). Once you have reviewed the code, return to this notebook and select **Cell** and then **Run All** from the menu bar to run this code. The code takes several minutes to run.
# +
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from scores.score_logger import ScoreLogger
ENV_NAME = "CartPole-v1"
GAMMA = 0.95
LEARNING_RATE = 0.001
MEMORY_SIZE = 1000000
BATCH_SIZE = 20
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995
class DQNSolver:
def __init__(self, observation_space, action_space):
self.exploration_rate = EXPLORATION_MAX
self.action_space = action_space
self.memory = deque(maxlen=MEMORY_SIZE)
self.model = Sequential()
self.model.add(Dense(24, input_shape=(observation_space,), activation="relu"))
self.model.add(Dense(24, activation="relu"))
self.model.add(Dense(self.action_space, activation="linear"))
self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() < self.exploration_rate:
return random.randrange(self.action_space)
q_values = self.model.predict(state)
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
q_update = (reward + GAMMA * np.amax(self.model.predict(state_next)[0]))
q_values = self.model.predict(state)
q_values[0][action] = q_update
self.model.fit(state, q_values, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def cartpole():
env = gym.make(ENV_NAME)
score_logger = ScoreLogger(ENV_NAME)
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
dqn_solver = DQNSolver(observation_space, action_space)
run = 0
while True:
run += 1
state = env.reset()
state = np.reshape(state, [1, observation_space])
step = 0
while True:
step += 1
#env.render()
action = dqn_solver.act(state)
state_next, reward, terminal, info = env.step(action)
reward = reward if not terminal else -reward
state_next = np.reshape(state_next, [1, observation_space])
dqn_solver.remember(state, action, reward, state_next, terminal)
state = state_next
if terminal:
print "Run: ", + str(run) + , ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step)
score_logger.add_score(step, run)
break
dqn_solver.experience_replay()
if __name__ == "__main__":
cartpole()
# -
cartpole()
# Note: If the code is running properly, you should begin to see output appearing above this code block. It will take several minutes, so it is recommended that you let this code run in the background while completing other work. When the code has finished, it will print output saying, "Solved in _ runs, _ total runs."
#
# You may see an error about not having an exit command. This error does not affect the program's functionality and results from the steps taken to convert the code from Python 2.x to Python 3. Please disregard this error.
| Undergrad/CS-370-T1045/Week 5 /Bailey_Samuel_Assignment5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow_p27)
# language: python
# name: conda_tensorflow_p27
# ---
# # Creating, training, and serving using SageMaker Estimators
#
# The **SageMaker Python SDK** helps you deploy your models for training and hosting in optimized, production ready containers in SageMaker. The SageMaker Python SDK is easy to use, modular, extensible and compatible with TensorFlow and MXNet. This tutorial focuses on **TensorFlow** and shows how we can train and host a TensorFlow DNNClassifier estimator in SageMaker using the Python SDK.
#
#
# TensorFlow's high-level machine learning API (tf.estimator) makes it easy to
# configure, train, and evaluate a variety of machine learning models.
#
#
# In this tutorial, you'll use tf.estimator to construct a
# [neural network](https://en.wikipedia.org/wiki/Artificial_neural_network)
# classifier and train it on the
# [Iris data set](https://en.wikipedia.org/wiki/Iris_flower_data_set) to
# predict flower species based on sepal/petal geometry. You'll write code to
# perform the following five steps:
#
# 1. Deploy a TensorFlow container in SageMaker
# 2. Load CSVs containing Iris training/test data from a S3 bucket into a TensorFlow `Dataset`
# 3. Construct a `tf.estimator.DNNClassifier` neural network classifier
# 4. Train the model using the training data
# 5. Host the model in an endpoint
# 6. Classify new samples invoking the endpoint
#
# This tutorial is a simplified version of TensorFlow's [get_started/estimator](https://www.tensorflow.org/get_started/estimator#fit_the_dnnclassifier_to_the_iris_training_data) tutorial **but using SageMaker and the SageMaker Python SDK** to simplify training and hosting.
# ## The Iris dataset
#
# The [Iris data set](https://en.wikipedia.org/wiki/Iris_flower_data_set) contains
# 150 rows of data, comprising 50 samples from each of three related Iris species:
# *Iris setosa*, *Iris virginica*, and *Iris versicolor*.
#
#  **From left to right,
# [*Iris setosa*](https://commons.wikimedia.org/w/index.php?curid=170298) (by
# [Radomil](https://commons.wikimedia.org/wiki/User:Radomil), CC BY-SA 3.0),
# [*Iris versicolor*](https://commons.wikimedia.org/w/index.php?curid=248095) (by
# [Dlanglois](https://commons.wikimedia.org/wiki/User:Dlanglois), CC BY-SA 3.0),
# and [*Iris virginica*](https://www.flickr.com/photos/33397993@N05/3352169862)
# (by [<NAME>](https://www.flickr.com/photos/33397993@N05), CC BY-SA
# 2.0).**
#
# Each row contains the following data for each flower sample:
# [sepal](https://en.wikipedia.org/wiki/Sepal) length, sepal width,
# [petal](https://en.wikipedia.org/wiki/Petal) length, petal width, and flower
# species. Flower species are represented as integers, with 0 denoting *Iris
# setosa*, 1 denoting *Iris versicolor*, and 2 denoting *Iris virginica*.
#
# Sepal Length | Sepal Width | Petal Length | Petal Width | Species
# :----------- | :---------- | :----------- | :---------- | :-------
# 5.1 | 3.5 | 1.4 | 0.2 | 0
# 4.9 | 3.0 | 1.4 | 0.2 | 0
# 4.7 | 3.2 | 1.3 | 0.2 | 0
# … | … | … | … | …
# 7.0 | 3.2 | 4.7 | 1.4 | 1
# 6.4 | 3.2 | 4.5 | 1.5 | 1
# 6.9 | 3.1 | 4.9 | 1.5 | 1
# … | … | … | … | …
# 6.5 | 3.0 | 5.2 | 2.0 | 2
# 6.2 | 3.4 | 5.4 | 2.3 | 2
# 5.9 | 3.0 | 5.1 | 1.8 | 2
#
# For this tutorial, the Iris data has been randomized and split into two separate
# CSVs:
#
# * A training set of 120 samples
# iris_training.csv
# * A test set of 30 samples
# iris_test.csv
#
# These files are provided in the SageMaker sample data bucket:
# **s3://sagemaker-sample-data-{region}/tensorflow/iris**. Copies of the bucket exist in each SageMaker region. When we access the data, we'll replace {region} with the AWS region the notebook is running in.
# ## Let us first initialize variables
# + isConfigCell=true
from sagemaker import get_execution_role
#Bucket location to save your custom code in tar.gz format.
custom_code_upload_location = 's3://<bucket-name>/customcode/tensorflow_iris'
#Bucket location where results of model training are saved.
model_artifacts_location = 's3://<bucket-name>/artifacts'
#IAM execution role that gives SageMaker access to resources in your AWS account.
role = get_execution_role()
# -
# # tf.estimator
# The tf.estimator framework makes it easy to construct and train machine learning models via its high-level Estimator API. Estimator offers classes you can instantiate to quickly configure common model types such as regressors and classifiers:
#
#
# * **```tf.estimator.LinearClassifier```**:
# Constructs a linear classification model.
# * **```tf.estimator.LinearRegressor```**:
# Constructs a linear regression model.
# * **```tf.estimator.DNNClassifier```**:
# Construct a neural network classification model.
# * **```tf.estimator.DNNRegressor```**:
# Construct a neural network regression model.
# * **```tf.estimator.DNNLinearCombinedClassifier```**:
# Construct a neural network and linear combined classification model.
# * **```tf.estimator.DNNRegressor```**:
# Construct a neural network and linear combined regression model.
#
# More information about estimators can be found [here](https://www.tensorflow.org/extend/estimators)
# # Construct a deep neural network classifier
# ## Complete neural network source code
#
# Here is the full code for the neural network classifier:
# !cat "iris_dnn_classifier.py"
# With few lines of code, using SageMaker and TensorFlow, you can create a deep neural network model, ready for training and hosting. Let's give a deeper look at the code.
# ### Using a tf.estimator in SageMaker
# Using a TensorFlow estimator in SageMaker is very easy, you can create one with few lines of code:
def estimator(model_path, hyperparameters):
feature_columns = [tf.feature_column.numeric_column(INPUT_TENSOR_NAME, shape=[4])]
return tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_path)
# The code above first defines the model's feature columns, which specify the data
# type for the features in the data set. All the feature data is continuous, so
# `tf.feature_column.numeric_column` is the appropriate function to use to
# construct the feature columns. There are four features in the data set (sepal
# width, sepal height, petal width, and petal height), so accordingly `shape`
# must be set to `[4]` to hold all the data.
#
# Then, the code creates a `DNNClassifier` model using the following arguments:
#
# * `feature_columns=feature_columns`. The set of feature columns defined above.
# * `hidden_units=[10, 20, 10]`. Three
# [hidden layers](http://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw),
# containing 10, 20, and 10 neurons, respectively.
# * `n_classes=3`. Three target classes, representing the three Iris species.
# * `model_dir=model_path`. The directory in which TensorFlow will save
# checkpoint data during model training.
# ### Describe the training input pipeline
#
# The `tf.estimator` API uses input functions, which create the TensorFlow
# operations that generate data for the model.
# We can use `tf.estimator.inputs.numpy_input_fn` to produce the input pipeline:
def train_input_fn(training_dir, hyperparameters):
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=os.path.join(training_dir, 'iris_training.csv'),
target_dtype=np.int,
features_dtype=np.float32)
return tf.estimator.inputs.numpy_input_fn(
x={INPUT_TENSOR_NAME: np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)()
# ### Describe the serving input pipeline:
#
# After traininng your model, SageMaker will host it in a TensorFlow serving. You need to describe a serving input function:
def serving_input_fn(hyperparameters):
feature_spec = {INPUT_TENSOR_NAME: tf.FixedLenFeature(dtype=tf.float32, shape=[4])}
return tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)()
# Now we are ready to submit the script for training.
# # Train a Model on Amazon SageMaker using TensorFlow custom code
#
# We can use the SDK to run our local training script on SageMaker infrastructure.
#
# 1. Pass the path to the iris_dnn_classifier.py file, which contains the functions for defining your estimator, to the sagemaker.TensorFlow init method.
# 2. Pass the S3 location that we uploaded our data to previously to the fit() method.
# +
from sagemaker.tensorflow import TensorFlow
iris_estimator = TensorFlow(entry_point='iris_dnn_classifier.py',
role=role,
framework_version='1.10.0',
output_path=model_artifacts_location,
code_location=custom_code_upload_location,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
training_steps=1000,
evaluation_steps=100)
# +
# %%time
import boto3
# use the region-specific sample data bucket
region = boto3.Session().region_name
train_data_location = 's3://sagemaker-sample-data-{}/tensorflow/iris'.format(region)
iris_estimator.fit(train_data_location)
# -
# # Deploy the trained Model
#
# The deploy() method creates an endpoint which serves prediction requests in real-time.
# %%time
iris_predictor = iris_estimator.deploy(initial_instance_count=1,
instance_type='ml.m4.xlarge')
# # Invoke the Endpoint to get inferences
# Invoking prediction:
iris_predictor.predict([6.4, 3.2, 4.5, 1.5]) #expected label to be 1
# # (Optional) Delete the Endpoint
#
# After you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it.
print(iris_predictor.endpoint)
# +
import sagemaker
sagemaker.Session().delete_endpoint(iris_predictor.endpoint)
# -
| sagemaker-python-sdk/tensorflow_iris_dnn_classifier_using_estimators/tensorflow_iris_dnn_classifier_using_estimators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Pnn4rDWGqDZL"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="l534d35Gp68G"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="3TI3Q3XBesaS"
# # Training checkpoints
# + [markdown] colab_type="text" id="yw_a0iGucY8z"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/alpha/guide/checkpoints"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/checkpoints.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/checkpoints.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="LeDp7dovcbus"
#
# The phrase "Saving a TensorFlow model" typically means one of two things: (1) Checkpoints, OR (2) SavedModel.
#
# Checkpoints capture the exact value of all parameters (`tf.Variable` objects) used by a model. Checkpoints do not contain any description of the computation defined by the model and thus are typically only useful when source code that will use the saved parameter values is available.
#
# The SavedModel format on the other hand includes a serialized description of the computation defined by the model in addition to the parameter values (checkpoint). Models in this format are independent of the source code that created the model. They are thus suitable for deployment via TensorFlow Serving or programs in other programming languages (the C, C++, Java, Go, Rust, C# etc. TensorFlow APIs).
#
# This guide covers APIs for writing and reading checkpoints.
# + [markdown] colab_type="text" id="5vsq3-pffo1I"
# ## Saving from `tf.keras` training APIs
#
# See the [`tf.keras` guide on saving and
# restoring](./keras.ipynb#save_and_restore).
#
# `tf.keras.Model.save_weights`
# optionally saves in the TensorFlow checkpoint format. This guide explains the format in more depth, and introduces APIs for managing checkpoints in custom training loops.
# + [markdown] colab_type="text" id="XseWX5jDg4lQ"
# ## Writing checkpoints manually
# + [markdown] colab_type="text" id="1jpZPz76ZP3K"
# The persistent state of a TensorFlow model is stored in `tf.Variable` objects. These can be constructed directly, but are often created through high-level APIs like `tf.keras.layers`.
#
# The easiest way to manage variables is by attaching them to Python objects, then referencing those objects. Subclasses of `tf.train.Checkpoint`, `tf.keras.layers.Layer`, and `tf.keras.Model` automatically track variables assigned to their attributes. The following example constructs a simple linear model, then writes checkpoints which contain values for all of the model's variables.
# + colab={} colab_type="code" id="VEvpMYAKsC4z"
from __future__ import absolute_import, division, print_function
# !pip install tf-nightly-2.0-preview
import tensorflow as tf
# + colab={} colab_type="code" id="BR5dChK7rXnj"
class Net(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(Net, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def call(self, x):
return self.l1(x)
# + [markdown] colab_type="text" id="fNjf9KaLdIRP"
# Although it's not the focus of this guide, to be executable the example needs data and an optimization step. The model will train on slices of an in-memory dataset.
# + colab={} colab_type="code" id="tSNyP4IJ9nkU"
def toy_dataset():
inputs = tf.range(10.)[:, None]
labels = inputs * 5. + tf.range(5.)[None, :]
return tf.data.Dataset.from_tensor_slices(
dict(x=inputs, y=labels)).repeat(10).batch(2)
# + colab={} colab_type="code" id="ICm1cufh_JH8"
def train_step(net, example, optimizer):
"""Trains `net` on `example` using `optimizer`."""
with tf.GradientTape() as tape:
output = net(example['x'])
loss = tf.reduce_mean(tf.abs(output - example['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
# + [markdown] colab_type="text" id="NP9IySmCeCkn"
# The following training loop creates an instance of the model and of an optimizer, then gathers them into a `tf.train.Checkpoint` object. It calls the training step in a loop on each batch of data, and periodically writes checkpoints to disk.
# + colab={} colab_type="code" id="BbCS5A6K1VSH"
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
for example in toy_dataset():
loss = train_step(net, example, opt)
ckpt.step.assign_add(1)
if int(ckpt.step) % 10 == 0:
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
print("loss {:1.2f}".format(loss.numpy()))
# + [markdown] colab_type="text" id="lw1QeyRBgsLE"
# The preceding snippet will randomly initialize the model variables when it first runs. After the first run it will resume training from where it left off:
# + colab={} colab_type="code" id="UjilkTOV2PBK"
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
for example in toy_dataset():
loss = train_step(net, example, opt)
ckpt.step.assign_add(1)
if int(ckpt.step) % 10 == 0:
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
print("loss {:1.2f}".format(loss.numpy()))
# + [markdown] colab_type="text" id="dxJT9vV-2PnZ"
# The `tf.train.CheckpointManager` object deletes old checkpoints. Above it's configured to keep only the three most recent checkpoints.
# + colab={} colab_type="code" id="3zmM0a-F5XqC"
print(manager.checkpoints) # List the three remaining checkpoints
# + [markdown] colab_type="text" id="qwlYDyjemY4P"
# These paths, e.g. `'./tf_ckpts/ckpt-10'`, are not files on disk. Instead they are prefixes for an `index` file and one or more data files which contain the variable values. These prefixes are grouped together in a single `checkpoint` file (`'./tf_ckpts/checkpoint'`) where the `CheckpointManager` saves its state.
# + colab={} colab_type="code" id="t1feej9JntV_"
# !ls ./tf_ckpts
# + [markdown] colab_type="text" id="DR2wQc9x6b3X"
# ## Loading mechanics
#
# TensorFlow matches variables to checkpointed values by traversing a directed graph with named edges, starting from the object being loaded. Edge names typically come from attribute names in objects, for example the `"l1"` in `self.l1 = tf.keras.layers.Dense(5)`. `tf.train.Checkpoint` uses its keyword argument names, as in the `"step"` in `tf.train.Checkpoint(step=...)`.
#
# The dependency graph from the example above looks like this:
#
# 
#
# With the optimizer in red, regular variables in blue, and optimizer slot variables in orange. The other nodes, for example representing the `tf.train.Checkpoint`, are black.
#
# Slot variables are part of the optimizer's state, but are created for a specific variable. For example the `'m'` edges above correspond to momentum, which the Adam optimizer tracks for each variable. Slot variables are only saved in a checkpoint if the variable and the optimizer would both be saved, thus the dashed edges.
# + [markdown] colab_type="text" id="VpY5IuanUEQ0"
# Calling `restore()` on a `tf.train.Checkpoint` object queues the requested restorations, restoring variable values as soon as there's a matching path from the `Checkpoint` object. For example we can load just the kernel from the model we defined above by reconstructing one path to it through the network and the layer.
# + colab={} colab_type="code" id="wmX2AuyH7TVt"
to_restore = tf.Variable(tf.zeros([5]))
print(to_restore.numpy()) # All zeros
fake_layer = tf.train.Checkpoint(bias=to_restore)
fake_net = tf.train.Checkpoint(l1=fake_layer)
new_root = tf.train.Checkpoint(net=fake_net)
status = new_root.restore(tf.train.latest_checkpoint('./tf_ckpts/'))
print(to_restore.numpy()) # We get the restored value now
# + [markdown] colab_type="text" id="GqEW-_pJDAnE"
# The dependency graph for these new objects is a much smaller subgraph of the larger checkpoint we wrote above. It includes only the bias and a save counter that `tf.train.Checkpoint` uses to number checkpoints.
#
# 
#
# `restore()` returns a status object, which has optional assertions. All of the objects we've created in our new `Checkpoint` have been restored, so `status.assert_existing_objects_matched()` passes.
# + colab={} colab_type="code" id="P9TQXl81Dq5r"
status.assert_existing_objects_matched()
# + [markdown] colab_type="text" id="GoMwf8CFDu9r"
# There are many objects in the checkpoint which haven't matched, including the layer's kernel and the optimizer's variables. `status.assert_consumed()` only passes if the checkpoint and the program match exactly, and would throw an exception here.
# + [markdown] colab_type="text" id="KCcmJ-2j9RUP"
# ### Delayed restorations
#
# `Layer` objects in TensorFlow may delay the creation of variables to their first call, when input shapes are available. For example the shape of a `Dense` layer's kernel depends on both the layer's input and output shapes, and so the output shape required as a constructor argument is not enough information to create the variable on its own. Since calling a `Layer` also reads the variable's value, a restore must happen between the variable's creation and its first use.
#
# To support this idiom, `tf.train.Checkpoint` queues restores which don't yet have a matching variable.
# + colab={} colab_type="code" id="TXYUCO3v-I72"
delayed_restore = tf.Variable(tf.zeros([1, 5]))
print(delayed_restore.numpy()) # Not restored; still zeros
fake_layer.kernel = delayed_restore
print(delayed_restore.numpy()) # Restored
# + [markdown] colab_type="text" id="-DWhJ3glyobN"
# ### Manually inspecting checkpoints
#
# `tf.train.list_variables` lists the checkpoint keys and shapes of variables in a checkpoint. Checkpoint keys are paths in the graph displayed above.
# + colab={} colab_type="code" id="RlRsADTezoBD"
tf.train.list_variables(tf.train.latest_checkpoint('./tf_ckpts/'))
# + [markdown] colab_type="text" id="5fxk_BnZ4W1b"
# ### List and dictionary tracking
#
# As with direct attribute assignments like `self.l1 = tf.keras.layers.Dense(5)`, assigning lists and dictionaries to attributes will track their contents.
# + colab={} colab_type="code" id="rfaIbDtDHAr_"
save = tf.train.Checkpoint()
save.listed = [tf.Variable(1.)]
save.listed.append(tf.Variable(2.))
save.mapped = {'one': save.listed[0]}
save.mapped['two'] = save.listed[1]
save_path = save.save('./tf_list_example')
restore = tf.train.Checkpoint()
v2 = tf.Variable(0.)
assert 0. == v2.numpy() # Not restored yet
restore.mapped = {'two': v2}
restore.restore(save_path)
assert 2. == v2.numpy()
# + [markdown] colab_type="text" id="UTKvbxHcI3T2"
# You may notice wrapper objects for lists and dictionaries. These wrappers are checkpointable versions of the underlying data-structures. Just like the attribute based loading, these wrappers restore a variable's value as soon as it's added to the container.
# + colab={} colab_type="code" id="s0Uq1Hv5JCmm"
restore.listed = []
print(restore.listed) # ListWrapper([])
v1 = tf.Variable(0.)
restore.listed.append(v1) # Restores v1, from restore() in the previous cell
assert 1. == v1.numpy()
# + [markdown] colab_type="text" id="OxCIf2J6JyQ8"
# The same tracking is automatically applied to subclasses of `tf.keras.Model`, and may be used for example to track lists of layers.
# + [markdown] colab_type="text" id="zGG1tOM0L6iM"
# ## Saving object-based checkpoints with Estimator
#
# See the [guide to Estimator](https://www.tensorflow.org/guide/estimators).
#
# Estimators by default save checkpoints with variable names rather than the object graph described in the previous sections. `tf.train.Checkpoint` will accept name-based checkpoints, but variable names may change when moving parts of a model outside of the Estimator's `model_fn`. Saving object-based checkpoints makes it easier to train a model inside an Estimator and then use it outside of one.
# + colab={} colab_type="code" id="-8AMJeueNyoM"
import tensorflow.compat.v1 as tf_compat
# + colab={} colab_type="code" id="T6fQsBzJQN2y"
def model_fn(features, labels, mode):
net = Net()
opt = tf.keras.optimizers.Adam(0.1)
ckpt = tf.train.Checkpoint(step=tf_compat.train.get_global_step(),
optimizer=opt, net=net)
with tf.GradientTape() as tape:
output = net(features['x'])
loss = tf.reduce_mean(tf.abs(output - features['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=tf.group(opt.apply_gradients(zip(gradients, variables)),
ckpt.step.assign_add(1)),
# Tell the Estimator to save "ckpt" in an object-based format.
scaffold=tf_compat.train.Scaffold(saver=ckpt))
tf.keras.backend.clear_session()
est = tf.estimator.Estimator(model_fn, './tf_estimator_example/')
est.train(toy_dataset, steps=10)
# + [markdown] colab_type="text" id="tObYHnrrb_mL"
# `tf.train.Checkpoint` can then load the Estimator's checkpoints from its `model_dir`.
# + colab={} colab_type="code" id="Q6IP3Y_wb-fs"
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(
step=tf.Variable(1, dtype=tf.int64), optimizer=opt, net=net)
ckpt.restore(tf.train.latest_checkpoint('./tf_estimator_example/'))
ckpt.step.numpy() # From est.train(..., steps=10)
# + [markdown] colab_type="text" id="knyUFMrJg8y4"
# ## Summary
#
# TensorFlow objects provide an easy automatic mechanism for saving and restoring the values of variables they use.
#
| site/en/r2/guide/checkpoints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multi-class (Nonlinear) SVM Example
# --------------------------------
#
# This function wll illustrate how to implement the gaussian kernel with multiple classes on the iris dataset.
#
# Gaussian Kernel:
#
# $$K(x_1, x_2) = e^{-\gamma \cdot (x_1 - x_2)^2}$$
#
# X : (Sepal Length, Petal Width)
#
# Y: (I. setosa, I. virginica, I. versicolor) (3 classes)
#
# Basic idea: introduce an extra dimension to do one vs all classification.
#
# The prediction of a point will be the category with the largest margin or distance to boundary.
#
# We start by loading the necessary libraries.
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a computational graph session.
sess = tf.Session()
# Now we load the iris data.
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals1 = np.array([1 if y==0 else -1 for y in iris.target])
y_vals2 = np.array([1 if y==1 else -1 for y in iris.target])
y_vals3 = np.array([1 if y==2 else -1 for y in iris.target])
y_vals = np.array([y_vals1, y_vals2, y_vals3])
class1_x = [x[0] for i,x in enumerate(x_vals) if iris.target[i]==0]
class1_y = [x[1] for i,x in enumerate(x_vals) if iris.target[i]==0]
class2_x = [x[0] for i,x in enumerate(x_vals) if iris.target[i]==1]
class2_y = [x[1] for i,x in enumerate(x_vals) if iris.target[i]==1]
class3_x = [x[0] for i,x in enumerate(x_vals) if iris.target[i]==2]
class3_y = [x[1] for i,x in enumerate(x_vals) if iris.target[i]==2]
# Declare the batch size
batch_size = 50
# Initialize placeholders and create the variables for multiclass SVM
# +
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[3, None], dtype=tf.float32)
prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)
# Create variables for svm
b = tf.Variable(tf.random_normal(shape=[3,batch_size]))
# -
# Create the Gaussian Kernel
# Gaussian (RBF) kernel
gamma = tf.constant(-10.0)
dist = tf.reduce_sum(tf.square(x_data), 1)
dist = tf.reshape(dist, [-1,1])
sq_dists = tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
# Declare a function that will do reshaping and batch matrix multiplication
# Declare function to do reshape/batch multiplication
def reshape_matmul(mat, _size):
v1 = tf.expand_dims(mat, 1)
v2 = tf.reshape(v1, [3, _size, 1])
return(tf.matmul(v2, v1))
# Now we can compute the SVM model
# +
# Compute SVM Model
first_term = tf.reduce_sum(b)
b_vec_cross = tf.matmul(tf.transpose(b), b)
y_target_cross = reshape_matmul(y_target, batch_size)
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)),[1,2])
loss = tf.reduce_sum(tf.negative(tf.subtract(first_term, second_term)))
# -
# Create the same RBF kernel for a set of prediction points (used on a grid of points at the end).
# +
# Gaussian (RBF) prediction kernel
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1])
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
prediction_output = tf.matmul(tf.multiply(y_target,b), pred_kernel)
prediction = tf.argmax(prediction_output-tf.expand_dims(tf.reduce_mean(prediction_output,1), 1), 0)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.argmax(y_target,0)), tf.float32))
# -
# Create the optimization and variable initializer operations.
# +
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# -
# We now start the training loop for the multiclass SVM
# Training loop
loss_vec = []
batch_accuracy = []
for i in range(100):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = x_vals[rand_index]
rand_y = y_vals[:,rand_index]
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x,
y_target: rand_y,
prediction_grid:rand_x})
batch_accuracy.append(acc_temp)
if (i+1)%25==0:
print('Step #' + str(i+1))
print('Loss = ' + str(temp_loss))
# For a pretty picture, to see the results, we create a fine grid of points to label/color for each class.
# Create a mesh to plot points in
x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
grid_points = np.c_[xx.ravel(), yy.ravel()]
grid_predictions = sess.run(prediction, feed_dict={x_data: rand_x,
y_target: rand_y,
prediction_grid: grid_points})
grid_predictions = grid_predictions.reshape(xx.shape)
# Plot the results
# +
# Plot points and grid
plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8)
plt.plot(class1_x, class1_y, 'ro', label='I. setosa')
plt.plot(class2_x, class2_y, 'kx', label='I. versicolor')
plt.plot(class3_x, class3_y, 'gv', label='I. virginica')
plt.title('Gaussian SVM Results on Iris Data')
plt.xlabel('Pedal Length')
plt.ylabel('Sepal Width')
plt.legend(loc='lower right')
plt.ylim([-0.5, 3.0])
plt.xlim([3.5, 8.5])
plt.show()
# Plot batch accuracy
plt.plot(batch_accuracy, 'k-', label='Accuracy')
plt.title('Batch Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
# -
| 04_Support_Vector_Machines/06_Implementing_Multiclass_SVMs/06_multiclass_svm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import geopandas as gpd
# +
# read p10 spatial data
p10 = gpd.read_file(r'M:\Data\GIS layers\UrbanSim smelt\p10_EIR\p10_parcels.shp')
p10_df = p10[['PARCEL_ID', 'geometry']]
p10_df['PARCEL_ID'] = p10_df['PARCEL_ID'].apply(lambda x: int(round(x)))
print('read {} rows of p10 spatial data'.format(p10_df.shape[0]))
print(p10_df.dtypes)
# +
# read parcel_geographic data with 'fbpchcat' label for parcels
pz = pd.read_csv(r'C:\Users\ywang\Box\Modeling and Surveys\Urban Modeling\Bay Area UrbanSim\PBA50\Current PBA50 Large General Input Data\2021_02_25_parcels_geography.csv',
usecols = ['PARCEL_ID', 'fbp_gg_id', 'hra_id', 'fbp_tra_id'])
print('read {} rows of parcel_geography data'.format(pz.shape[0]))
print(pz.dtypes)
# join fbpchcat to parcel spatial data
p10_z = p10_df.merge(pz, on='PARCEL_ID', how='left')
display(p10_z.head())
# consolidate categories
tra_dict = {'tra3' : 'tra3',
'tra2c': 'tra2',
'tra2a': 'tra2',
'tra1' : 'tra1',
'tra2b': 'tra2'}
p10_z['fbp_tra_id_cat'] = p10_z['fbp_tra_id'].map(tra_dict)
# fill NAs and create growth geography categories
p10_z['fbp_gg_id'].fillna('NonGG', inplace=True)
p10_z['hra_id'].fillna('NonHRA', inplace=True)
p10_z['fbp_tra_id'].fillna('NonTRA', inplace=True)
p10_z['fbp_tra_id_cat'].fillna('NonTRA', inplace=True)
p10_z['gg_cat'] = p10_z['fbp_gg_id'] + '/' + p10_z['hra_id'] + '/' + p10_z['fbp_tra_id_cat']
print('unique gg_cat:')
print(p10_z.gg_cat.unique())
p10_z['gg_cat_detail'] = p10_z['fbp_gg_id'] + '/' + p10_z['hra_id'] + '/' + p10_z['fbp_tra_id']
print('unique gg_cat_detail:')
print(p10_z.gg_cat_detail.unique())
# +
# select GG parcels and export
p10_gg = p10_z.loc[p10_z.fbp_gg_id == 'GG']
print('categories within GG: {}'.format(p10_gg.gg_cat.unique()))
print('detailed categories within GG: {}'.format(p10_gg.gg_cat_detail.unique()))
# -
print('export {} rows of p10_gg'.format(p10_gg.shape[0]))
display(p10_gg.dtypes)
p10_gg.to_file(r'M:\Data\GIS layers\Blueprint Land Use Strategies\for_appeal_responses\p10_pba50_blueprint_gg_categories_tra_details.shp')
| policies/plu/PBA50_GG_shp_for_mapping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras import metrics, optimizers
from keras.models import Sequential
from keras.callbacks import TensorBoard
from keras.layers.core import Dense, Dropout, Activation
import numpy as np
import matplotlib.pyplot as plt
x = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[0],[1],[1],[0]])
model = Sequential()
model.add(Dense(16, input_dim=2))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
adadelta = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
sgd = optimizers.SGD(lr=0.5)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
history = model.fit(x, y, batch_size=1, epochs=1000, shuffle=True)
print(history.history.keys())
# +
plt.plot(history.history['acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# -
x_test = np.array([[0,1],[0,0],[1,0],[1,1]])
y_test = np.array([[1],[0],[1],[0]])
score = model.evaluate(x_test, y_test, verbose=0)
print(score)
realData = np.array([[1, 0], [0, 0], [1, 1], [1, 1], [0, 1]])
print(realData)
for x in model.predict_proba(realData):
print('{0: >#02.9f}'.format(float(x)))
| xor_neural_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import math
# +
mean1 = [1, 1]
cov1 = [[1, 0], [0, 10]]
mean2 = [2,2]
cov2 = [[1,0],[0,20]]
# mean1 = [1, 1]
# cov1 = [[1, 0], [0, 1]]
# mean2 = [3,3]
# cov2 = [[1,0],[0,2]]
cov1inv = np.linalg.inv(cov1)
cov2inv = np.linalg.inv(cov2)
print(cov1inv ,'\n', cov2inv)
W1 = -0.5*cov1inv
W2 = -0.5*cov2inv
det_cov1 = np.linalg.det(cov1)
det_cov2 = np.linalg.det(cov2)
print(det_cov1 ,'\n', det_cov2)
log_det_cov1 = 2.3#natural logarithm
log_det_cov2 = 3.0
# log_det_cov1 = 0 #natural logarithm
# log_det_cov2 = 0.693
# +
X1 = np.linspace(0, 10, 50, endpoint=True)
X2 = np.linspace(0, 10, 50, endpoint=False)
#print(X1, '\t', X2)
print(len(X1), '\t', len(X2))
#g = -0.025*(x2**2) -x1 + 1.9 , obtained from mean and covariance used above and using the values in equation of gi(x) in <NAME>
sampleA = np.random.multivariate_normal(mean1,cov1,50)
sampleB = np.random.multivariate_normal(mean2,cov2,50)
def getg(x1,x2):
g = -0.025*(x2**2) - x1 + 1.9
#g = -0.25*(x2**2) - 2*x1 - 0.5*x2 - 6
return g
G = np.zeros((50,50))
for i in range(0,len(X1)):
for j in range(0,len(X2)):
G[i,j] = getg(X1[i],X2[j])
#print(G)
print(G.shape)
# combinedX = np.vstack((X1, X2))
for i in range(0,50):
plt.scatter(sampleA[:,0],sampleA[:,1],c = "m")
plt.scatter(sampleB[:,0],sampleB[:,1],c = "c")
plt.contour(X1,X2, G, 0)
# -
# Using different values of mean and covarinace and using equation for gi(x) from <NAME> ,we can better classifier
| Homework8/Homework_8.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# <a id='geom-series'></a>
#
# <a id='index-0'></a>
# # Geometric Series for Elementary Economics
# ## Contents
#
# - [Geometric Series for Elementary Economics](#Geometric-Series-for-Elementary-Economics)
# - [Overview](#Overview)
# - [Key Formulas](#Key-Formulas)
# - [Example: The Money Multiplier in Fractional Reserve Banking](#Example:-The-Money-Multiplier-in-Fractional-Reserve-Banking)
# - [Example: The Keynesian Multiplier](#Example:-The-Keynesian-Multiplier)
# - [Example: Interest Rates and Present Values](#Example:-Interest-Rates-and-Present-Values)
# - [Back to the Keynesian Multiplier](#Back-to-the-Keynesian-Multiplier)
# ## Overview
#
# The lecture describes important ideas in economics that use the mathematics of geometric series.
#
# Among these are
#
# - the Keynesian **multiplier**
# - the money **multiplier** that prevails in fractional reserve banking
# systems
# - interest rates and present values of streams of payouts from assets
#
#
# (As we shall see below, the term **multiplier** comes down to meaning **sum of a convergent geometric series**)
#
# These and other applications prove the truth of the wise crack that
#
# > “in economics, a little knowledge of geometric series goes a long way “
#
#
# Below we’ll use the following imports:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (11, 5) #set default figure size
import numpy as np
import sympy as sym
from sympy import init_printing, latex
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# -
# ## Key Formulas
#
# To start, let $ c $ be a real number that lies strictly between
# $ -1 $ and $ 1 $.
#
# - We often write this as $ c \in (-1,1) $.
# - Here $ (-1,1) $ denotes the collection of all real numbers that
# are strictly less than $ 1 $ and strictly greater than $ -1 $.
# - The symbol $ \in $ means *in* or *belongs to the set after the symbol*.
#
#
# We want to evaluate geometric series of two types – infinite and finite.
# ### Infinite Geometric Series
#
# The first type of geometric that interests us is the infinite series
#
# $$
# 1 + c + c^2 + c^3 + \cdots
# $$
#
# Where $ \cdots $ means that the series continues without end.
#
# The key formula is
#
#
# <a id='equation-infinite'></a>
# $$
# 1 + c + c^2 + c^3 + \cdots = \frac{1}{1 -c } \tag{1}
# $$
#
# To prove key formula [(1.1)](#equation-infinite), multiply both sides by $ (1-c) $ and verify
# that if $ c \in (-1,1) $, then the outcome is the
# equation $ 1 = 1 $.
# ### Finite Geometric Series
#
# The second series that interests us is the finite geometric series
#
# $$
# 1 + c + c^2 + c^3 + \cdots + c^T
# $$
#
# where $ T $ is a positive integer.
#
# The key formula here is
#
# $$
# 1 + c + c^2 + c^3 + \cdots + c^T = \frac{1 - c^{T+1}}{1-c}
# $$
#
# **Remark:** The above formula works for any value of the scalar
# $ c $. We don’t have to restrict $ c $ to be in the
# set $ (-1,1) $.
#
# We now move on to describe some famous economic applications of
# geometric series.
# ## Example: The Money Multiplier in Fractional Reserve Banking
#
# In a fractional reserve banking system, banks hold only a fraction
# $ r \in (0,1) $ of cash behind each **deposit receipt** that they
# issue
#
# - In recent times
# - cash consists of pieces of paper issued by the government and
# called dollars or pounds or $ \ldots $
# - a *deposit* is a balance in a checking or savings account that
# entitles the owner to ask the bank for immediate payment in cash
# - When the UK and France and the US were on either a gold or silver
# standard (before 1914, for example)
# - cash was a gold or silver coin
# - a *deposit receipt* was a *bank note* that the bank promised to
# convert into gold or silver on demand; (sometimes it was also a
# checking or savings account balance)
#
#
# Economists and financiers often define the **supply of money** as an
# economy-wide sum of **cash** plus **deposits**.
#
# In a **fractional reserve banking system** (one in which the reserve
# ratio $ r $ satisfies $ 0 < r < 1 $), **banks create money** by issuing deposits *backed* by fractional reserves plus loans that they make to their customers.
#
# A geometric series is a key tool for understanding how banks create
# money (i.e., deposits) in a fractional reserve system.
#
# The geometric series formula [(1.1)](#equation-infinite) is at the heart of the classic model of the money creation process – one that leads us to the celebrated
# **money multiplier**.
# ### A Simple Model
#
# There is a set of banks named $ i = 0, 1, 2, \ldots $.
#
# Bank $ i $’s loans $ L_i $, deposits $ D_i $, and
# reserves $ R_i $ must satisfy the balance sheet equation (because
# **balance sheets balance**):
#
#
# <a id='equation-balance'></a>
# $$
# L_i + R_i = D_i \tag{2}
# $$
#
# The left side of the above equation is the sum of the bank’s **assets**,
# namely, the loans $ L_i $ it has outstanding plus its reserves of
# cash $ R_i $.
#
# The right side records bank $ i $’s liabilities,
# namely, the deposits $ D_i $ held by its depositors; these are
# IOU’s from the bank to its depositors in the form of either checking
# accounts or savings accounts (or before 1914, bank notes issued by a
# bank stating promises to redeem note for gold or silver on demand).
#
# Each bank $ i $ sets its reserves to satisfy the equation
#
#
# <a id='equation-reserves'></a>
# $$
# R_i = r D_i \tag{3}
# $$
#
# where $ r \in (0,1) $ is its **reserve-deposit ratio** or **reserve
# ratio** for short
#
# - the reserve ratio is either set by a government or chosen by banks
# for precautionary reasons
#
#
# Next we add a theory stating that bank $ i+1 $’s deposits depend
# entirely on loans made by bank $ i $, namely
#
#
# <a id='equation-deposits'></a>
# $$
# D_{i+1} = L_i \tag{4}
# $$
#
# Thus, we can think of the banks as being arranged along a line with
# loans from bank $ i $ being immediately deposited in $ i+1 $
#
# - in this way, the debtors to bank $ i $ become creditors of
# bank $ i+1 $
#
#
# Finally, we add an *initial condition* about an exogenous level of bank
# $ 0 $’s deposits
#
# $$
# D_0 \ \text{ is given exogenously}
# $$
#
# We can think of $ D_0 $ as being the amount of cash that a first
# depositor put into the first bank in the system, bank number $ i=0 $.
#
# Now we do a little algebra.
#
# Combining equations [(1.2)](#equation-balance) and [(1.3)](#equation-reserves) tells us that
#
#
# <a id='equation-fraction'></a>
# $$
# L_i = (1-r) D_i \tag{5}
# $$
#
# This states that bank $ i $ loans a fraction $ (1-r) $ of its
# deposits and keeps a fraction $ r $ as cash reserves.
#
# Combining equation [(1.5)](#equation-fraction) with equation [(1.4)](#equation-deposits) tells us that
#
# $$
# D_{i+1} = (1-r) D_i \ \text{ for } i \geq 0
# $$
#
# which implies that
#
#
# <a id='equation-geomseries'></a>
# $$
# D_i = (1 - r)^i D_0 \ \text{ for } i \geq 0 \tag{6}
# $$
#
# Equation [(1.6)](#equation-geomseries) expresses $ D_i $ as the $ i $ th term in the
# product of $ D_0 $ and the geometric series
#
# $$
# 1, (1-r), (1-r)^2, \cdots
# $$
#
# Therefore, the sum of all deposits in our banking system
# $ i=0, 1, 2, \ldots $ is
#
#
# <a id='equation-sumdeposits'></a>
# $$
# \sum_{i=0}^\infty (1-r)^i D_0 = \frac{D_0}{1 - (1-r)} = \frac{D_0}{r} \tag{7}
# $$
# ### Money Multiplier
#
# The **money multiplier** is a number that tells the multiplicative
# factor by which an exogenous injection of cash into bank $ 0 $ leads
# to an increase in the total deposits in the banking system.
#
# Equation [(1.7)](#equation-sumdeposits) asserts that the **money multiplier** is
# $ \frac{1}{r} $
#
# - An initial deposit of cash of $ D_0 $ in bank $ 0 $ leads
# the banking system to create total deposits of $ \frac{D_0}{r} $.
# - The initial deposit $ D_0 $ is held as reserves, distributed
# throughout the banking system according to $ D_0 = \sum_{i=0}^\infty R_i $.
# ## Example: The Keynesian Multiplier
#
# The famous economist <NAME> and his followers created a
# simple model intended to determine national income $ y $ in
# circumstances in which
#
# - there are substantial unemployed resources, in particular **excess
# supply** of labor and capital
# - prices and interest rates fail to adjust to make aggregate **supply
# equal demand** (e.g., prices and interest rates are frozen)
# - national income is entirely determined by aggregate demand
# ### Static Version
#
# An elementary Keynesian model of national income determination consists
# of three equations that describe aggregate demand for $ y $ and its
# components.
#
# The first equation is a national income identity asserting that
# consumption $ c $ plus investment $ i $ equals national income
# $ y $:
#
# $$
# c+ i = y
# $$
#
# The second equation is a Keynesian consumption function asserting that
# people consume a fraction $ b \in (0,1) $ of their income:
#
# $$
# c = b y
# $$
#
# The fraction $ b \in (0,1) $ is called the **marginal propensity to
# consume**.
#
# The fraction $ 1-b \in (0,1) $ is called the **marginal propensity
# to save**.
#
# The third equation simply states that investment is exogenous at level
# $ i $.
#
# - *exogenous* means *determined outside this model*.
#
#
# Substituting the second equation into the first gives $ (1-b) y = i $.
#
# Solving this equation for $ y $ gives
#
# $$
# y = \frac{1}{1-b} i
# $$
#
# The quantity $ \frac{1}{1-b} $ is called the **investment
# multiplier** or simply the **multiplier**.
#
# Applying the formula for the sum of an infinite geometric series, we can
# write the above equation as
#
# $$
# y = i \sum_{t=0}^\infty b^t
# $$
#
# where $ t $ is a nonnegative integer.
#
# So we arrive at the following equivalent expressions for the multiplier:
#
# $$
# \frac{1}{1-b} = \sum_{t=0}^\infty b^t
# $$
#
# The expression $ \sum_{t=0}^\infty b^t $ motivates an interpretation
# of the multiplier as the outcome of a dynamic process that we describe
# next.
# ### Dynamic Version
#
# We arrive at a dynamic version by interpreting the nonnegative integer
# $ t $ as indexing time and changing our specification of the
# consumption function to take time into account
#
# - we add a one-period lag in how income affects consumption
#
#
# We let $ c_t $ be consumption at time $ t $ and $ i_t $ be
# investment at time $ t $.
#
# We modify our consumption function to assume the form
#
# $$
# c_t = b y_{t-1}
# $$
#
# so that $ b $ is the marginal propensity to consume (now) out of
# last period’s income.
#
# We begin with an initial condition stating that
#
# $$
# y_{-1} = 0
# $$
#
# We also assume that
#
# $$
# i_t = i \ \ \textrm {for all } t \geq 0
# $$
#
# so that investment is constant over time.
#
# It follows that
#
# $$
# y_0 = i + c_0 = i + b y_{-1} = i
# $$
#
# and
#
# $$
# y_1 = c_1 + i = b y_0 + i = (1 + b) i
# $$
#
# and
#
# $$
# y_2 = c_2 + i = b y_1 + i = (1 + b + b^2) i
# $$
#
# and more generally
#
# $$
# y_t = b y_{t-1} + i = (1+ b + b^2 + \cdots + b^t) i
# $$
#
# or
#
# $$
# y_t = \frac{1-b^{t+1}}{1 -b } i
# $$
#
# Evidently, as $ t \rightarrow + \infty $,
#
# $$
# y_t \rightarrow \frac{1}{1-b} i
# $$
#
# **Remark 1:** The above formula is often applied to assert that an
# exogenous increase in investment of $ \Delta i $ at time $ 0 $
# ignites a dynamic process of increases in national income by successive amounts
#
# $$
# \Delta i, (1 + b )\Delta i, (1+b + b^2) \Delta i , \cdots
# $$
#
# at times $ 0, 1, 2, \ldots $.
#
# **Remark 2** Let $ g_t $ be an exogenous sequence of government
# expenditures.
#
# If we generalize the model so that the national income identity
# becomes
#
# $$
# c_t + i_t + g_t = y_t
# $$
#
# then a version of the preceding argument shows that the **government
# expenditures multiplier** is also $ \frac{1}{1-b} $, so that a
# permanent increase in government expenditures ultimately leads to an
# increase in national income equal to the multiplier times the increase
# in government expenditures.
# ## Example: Interest Rates and Present Values
#
# We can apply our formula for geometric series to study how interest
# rates affect values of streams of dollar payments that extend over time.
#
# We work in discrete time and assume that $ t = 0, 1, 2, \ldots $
# indexes time.
#
# We let $ r \in (0,1) $ be a one-period **net nominal interest rate**
#
# - if the nominal interest rate is $ 5 $ percent,
# then $ r= .05 $
#
#
# A one-period **gross nominal interest rate** $ R $ is defined as
#
# $$
# R = 1 + r \in (1, 2)
# $$
#
# - if $ r=.05 $, then $ R = 1.05 $
#
#
# **Remark:** The gross nominal interest rate $ R $ is an **exchange
# rate** or **relative price** of dollars at between times $ t $ and
# $ t+1 $. The units of $ R $ are dollars at time $ t+1 $ per
# dollar at time $ t $.
#
# When people borrow and lend, they trade dollars now for dollars later or
# dollars later for dollars now.
#
# The price at which these exchanges occur is the gross nominal interest
# rate.
#
# - If I sell $ x $ dollars to you today, you pay me $ R x $
# dollars tomorrow.
# - This means that you borrowed $ x $ dollars for me at a gross
# interest rate $ R $ and a net interest rate $ r $.
#
#
# We assume that the net nominal interest rate $ r $ is fixed over
# time, so that $ R $ is the gross nominal interest rate at times
# $ t=0, 1, 2, \ldots $.
#
# Two important geometric sequences are
#
#
# <a id='equation-geom1'></a>
# $$
# 1, R, R^2, \cdots \tag{8}
# $$
#
# and
#
#
# <a id='equation-geom2'></a>
# $$
# 1, R^{-1}, R^{-2}, \cdots \tag{9}
# $$
#
# Sequence [(1.8)](#equation-geom1) tells us how dollar values of an investment **accumulate**
# through time.
#
# Sequence [(1.9)](#equation-geom2) tells us how to **discount** future dollars to get their
# values in terms of today’s dollars.
# ### Accumulation
#
# Geometric sequence [(1.8)](#equation-geom1) tells us how one dollar invested and re-invested
# in a project with gross one period nominal rate of return accumulates
#
# - here we assume that net interest payments are reinvested in the
# project
# - thus, $ 1 $ dollar invested at time $ 0 $ pays interest
# $ r $ dollars after one period, so we have $ r+1 = R $
# dollars at time$ 1 $
# - at time $ 1 $ we reinvest $ 1+r =R $ dollars and receive interest
# of $ r R $ dollars at time $ 2 $ plus the *principal*
# $ R $ dollars, so we receive $ r R + R = (1+r)R = R^2 $
# dollars at the end of period $ 2 $
# - and so on
#
#
# Evidently, if we invest $ x $ dollars at time $ 0 $ and
# reinvest the proceeds, then the sequence
#
# $$
# x , xR , x R^2, \cdots
# $$
#
# tells how our account accumulates at dates $ t=0, 1, 2, \ldots $.
# ### Discounting
#
# Geometric sequence [(1.9)](#equation-geom2) tells us how much future dollars are worth in terms of today’s dollars.
#
# Remember that the units of $ R $ are dollars at $ t+1 $ per
# dollar at $ t $.
#
# It follows that
#
# - the units of $ R^{-1} $ are dollars at $ t $ per dollar at $ t+1 $
# - the units of $ R^{-2} $ are dollars at $ t $ per dollar at $ t+2 $
# - and so on; the units of $ R^{-j} $ are dollars at $ t $ per
# dollar at $ t+j $
#
#
# So if someone has a claim on $ x $ dollars at time $ t+j $, it
# is worth $ x R^{-j} $ dollars at time $ t $ (e.g., today).
# ### Application to Asset Pricing
#
# A **lease** requires a payments stream of $ x_t $ dollars at
# times $ t = 0, 1, 2, \ldots $ where
#
# $$
# x_t = G^t x_0
# $$
#
# where $ G = (1+g) $ and $ g \in (0,1) $.
#
# Thus, lease payments increase at $ g $ percent per period.
#
# For a reason soon to be revealed, we assume that $ G < R $.
#
# The **present value** of the lease is
#
# $$
# \begin{aligned} p_0 & = x_0 + x_1/R + x_2/(R^2) + \ddots \\
# & = x_0 (1 + G R^{-1} + G^2 R^{-2} + \cdots ) \\
# & = x_0 \frac{1}{1 - G R^{-1}} \end{aligned}
# $$
#
# where the last line uses the formula for an infinite geometric series.
#
# Recall that $ R = 1+r $ and $ G = 1+g $ and that $ R > G $
# and $ r > g $ and that $ r $ and $ g $ are typically small
# numbers, e.g., .05 or .03.
#
# Use the Taylor series of $ \frac{1}{1+r} $ about $ r=0 $,
# namely,
#
# $$
# \frac{1}{1+r} = 1 - r + r^2 - r^3 + \cdots
# $$
#
# and the fact that $ r $ is small to approximate
# $ \frac{1}{1+r} \approx 1 - r $.
#
# Use this approximation to write $ p_0 $ as
#
# $$
# \begin{aligned}
# p_0 &= x_0 \frac{1}{1 - G R^{-1}} \\
# &= x_0 \frac{1}{1 - (1+g) (1-r) } \\
# &= x_0 \frac{1}{1 - (1+g - r - rg)} \\
# & \approx x_0 \frac{1}{r -g }
# \end{aligned}
# $$
#
# where the last step uses the approximation $ r g \approx 0 $.
#
# The approximation
#
# $$
# p_0 = \frac{x_0 }{r -g }
# $$
#
# is known as the **Gordon formula** for the present value or current
# price of an infinite payment stream $ x_0 G^t $ when the nominal
# one-period interest rate is $ r $ and when $ r > g $.
#
# We can also extend the asset pricing formula so that it applies to finite leases.
#
# Let the payment stream on the lease now be $ x_t $ for $ t= 1,2, \dots,T $, where again
#
# $$
# x_t = G^t x_0
# $$
#
# The present value of this lease is:
#
# $$
# \begin{aligned} \begin{split}p_0&=x_0 + x_1/R + \dots +x_T/R^T \\ &= x_0(1+GR^{-1}+\dots +G^{T}R^{-T}) \\ &= \frac{x_0(1-G^{T+1}R^{-(T+1)})}{1-GR^{-1}} \end{split}\end{aligned}
# $$
#
# Applying the Taylor series to $ R^{-(T+1)} $ about $ r=0 $ we get:
#
# $$
# \frac{1}{(1+r)^{T+1}}= 1-r(T+1)+\frac{1}{2}r^2(T+1)(T+2)+\dots \approx 1-r(T+1)
# $$
#
# Similarly, applying the Taylor series to $ G^{T+1} $ about $ g=0 $:
#
# $$
# (1+g)^{T+1} = 1+(T+1)g(1+g)^T+(T+1)Tg^2(1+g)^{T-1}+\dots \approx 1+ (T+1)g
# $$
#
# Thus, we get the following approximation:
#
# $$
# p_0 =\frac{x_0(1-(1+(T+1)g)(1-r(T+1)))}{1-(1-r)(1+g) }
# $$
#
# Expanding:
#
# $$
# \begin{aligned} p_0 &=\frac{x_0(1-1+(T+1)^2 rg -r(T+1)+g(T+1))}{1-1+r-g+rg} \\&=\frac{x_0(T+1)((T+1)rg+r-g)}{r-g+rg} \\ &\approx \frac{x_0(T+1)(r-g)}{r-g}+\frac{x_0rg(T+1)}{r-g}\\ &= x_0(T+1) + \frac{x_0rg(T+1)}{r-g} \end{aligned}
# $$
#
# We could have also approximated by removing the second term
# $ rgx_0(T+1) $ when $ T $ is relatively small compared to
# $ 1/(rg) $ to get $ x_0(T+1) $ as in the finite stream
# approximation.
#
# We will plot the true finite stream present-value and the two
# approximations, under different values of $ T $, and $ g $ and $ r $ in Python.
#
# First we plot the true finite stream present-value after computing it
# below
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# True present value of a finite lease
def finite_lease_pv_true(T, g, r, x_0):
G = (1 + g)
R = (1 + r)
return (x_0 * (1 - G**(T + 1) * R**(-T - 1))) / (1 - G * R**(-1))
# First approximation for our finite lease
def finite_lease_pv_approx_1(T, g, r, x_0):
p = x_0 * (T + 1) + x_0 * r * g * (T + 1) / (r - g)
return p
# Second approximation for our finite lease
def finite_lease_pv_approx_2(T, g, r, x_0):
return (x_0 * (T + 1))
# Infinite lease
def infinite_lease(g, r, x_0):
G = (1 + g)
R = (1 + r)
return x_0 / (1 - G * R**(-1))
# -
# Now that we have defined our functions, we can plot some outcomes.
#
# First we study the quality of our approximations
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
def plot_function(axes, x_vals, func, args):
axes.plot(x_vals, func(*args), label=func.__name__)
T_max = 50
T = np.arange(0, T_max+1)
g = 0.02
r = 0.03
x_0 = 1
our_args = (T, g, r, x_0)
funcs = [finite_lease_pv_true,
finite_lease_pv_approx_1,
finite_lease_pv_approx_2]
## the three functions we want to compare
fig, ax = plt.subplots()
ax.set_title('Finite Lease Present Value $T$ Periods Ahead')
for f in funcs:
plot_function(ax, T, f, our_args)
ax.legend()
ax.set_xlabel('$T$ Periods Ahead')
ax.set_ylabel('Present Value, $p_0$')
plt.show()
# -
# Evidently our approximations perform well for small values of $ T $.
#
# However, holding $ g $ and r fixed, our approximations deteriorate as $ T $ increases.
#
# Next we compare the infinite and finite duration lease present values
# over different lease lengths $ T $.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# Convergence of infinite and finite
T_max = 1000
T = np.arange(0, T_max+1)
fig, ax = plt.subplots()
ax.set_title('Infinite and Finite Lease Present Value $T$ Periods Ahead')
f_1 = finite_lease_pv_true(T, g, r, x_0)
f_2 = np.ones(T_max+1)*infinite_lease(g, r, x_0)
ax.plot(T, f_1, label='T-period lease PV')
ax.plot(T, f_2, '--', label='Infinite lease PV')
ax.set_xlabel('$T$ Periods Ahead')
ax.set_ylabel('Present Value, $p_0$')
ax.legend()
plt.show()
# -
# The graph above shows how as duration $ T \rightarrow +\infty $,
# the value of a lease of duration $ T $ approaches the value of a
# perpetual lease.
#
# Now we consider two different views of what happens as $ r $ and
# $ g $ covary
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# First view
# Changing r and g
fig, ax = plt.subplots()
ax.set_title('Value of lease of length $T$')
ax.set_ylabel('Present Value, $p_0$')
ax.set_xlabel('$T$ periods ahead')
T_max = 10
T=np.arange(0, T_max+1)
rs, gs = (0.9, 0.5, 0.4001, 0.4), (0.4, 0.4, 0.4, 0.5),
comparisons = ('$\gg$', '$>$', r'$\approx$', '$<$')
for r, g, comp in zip(rs, gs, comparisons):
ax.plot(finite_lease_pv_true(T, g, r, x_0), label=f'r(={r}) {comp} g(={g})')
ax.legend()
plt.show()
# -
# This graph gives a big hint for why the condition $ r > g $ is
# necessary if a lease of length $ T = +\infty $ is to have finite
# value.
#
# For fans of 3-d graphs the same point comes through in the following
# graph.
#
# If you aren’t enamored of 3-d graphs, feel free to skip the next
# visualization!
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# Second view
fig = plt.figure()
T = 3
ax = fig.gca(projection='3d')
r = np.arange(0.01, 0.99, 0.005)
g = np.arange(0.011, 0.991, 0.005)
rr, gg = np.meshgrid(r, g)
z = finite_lease_pv_true(T, gg, rr, x_0)
# Removes points where undefined
same = (rr == gg)
z[same] = np.nan
surf = ax.plot_surface(rr, gg, z, cmap=cm.coolwarm,
antialiased=True, clim=(0, 15))
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('$r$')
ax.set_ylabel('$g$')
ax.set_zlabel('Present Value, $p_0$')
ax.view_init(20, 10)
ax.set_title('Three Period Lease PV with Varying $g$ and $r$')
plt.show()
# -
# We can use a little calculus to study how the present value $ p_0 $
# of a lease varies with $ r $ and $ g $.
#
# We will use a library called [SymPy](https://www.sympy.org/).
#
# SymPy enables us to do symbolic math calculations including
# computing derivatives of algebraic equations.
#
# We will illustrate how it works by creating a symbolic expression that
# represents our present value formula for an infinite lease.
#
# After that, we’ll use SymPy to compute derivatives
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# Creates algebraic symbols that can be used in an algebraic expression
g, r, x0 = sym.symbols('g, r, x0')
G = (1 + g)
R = (1 + r)
p0 = x0 / (1 - G * R**(-1))
init_printing(use_latex='mathjax')
print('Our formula is:')
p0
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
print('dp0 / dg is:')
dp_dg = sym.diff(p0, g)
dp_dg
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
print('dp0 / dr is:')
dp_dr = sym.diff(p0, r)
dp_dr
# -
# We can see that for $ \frac{\partial p_0}{\partial r}<0 $ as long as
# $ r>g $, $ r>0 $ and $ g>0 $ and $ x_0 $ is positive,
# so $ \frac{\partial p_0}{\partial r} $ will always be negative.
#
# Similarly, $ \frac{\partial p_0}{\partial g}>0 $ as long as $ r>g $, $ r>0 $ and $ g>0 $ and $ x_0 $ is positive, so $ \frac{\partial p_0}{\partial g} $
# will always be positive.
# ## Back to the Keynesian Multiplier
#
# We will now go back to the case of the Keynesian multiplier and plot the
# time path of $ y_t $, given that consumption is a constant fraction
# of national income, and investment is fixed.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# Function that calculates a path of y
def calculate_y(i, b, g, T, y_init):
y = np.zeros(T+1)
y[0] = i + b * y_init + g
for t in range(1, T+1):
y[t] = b * y[t-1] + i + g
return y
# Initial values
i_0 = 0.3
g_0 = 0.3
# 2/3 of income goes towards consumption
b = 2/3
y_init = 0
T = 100
fig, ax = plt.subplots()
ax.set_title('Path of Aggregate Output Over Time')
ax.set_xlabel('$t$')
ax.set_ylabel('$y_t$')
ax.plot(np.arange(0, T+1), calculate_y(i_0, b, g_0, T, y_init))
# Output predicted by geometric series
ax.hlines(i_0 / (1 - b) + g_0 / (1 - b), xmin=-1, xmax=101, linestyles='--')
plt.show()
# -
# In this model, income grows over time, until it gradually converges to
# the infinite geometric series sum of income.
#
# We now examine what will
# happen if we vary the so-called **marginal propensity to consume**,
# i.e., the fraction of income that is consumed
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
bs = (1/3, 2/3, 5/6, 0.9)
fig,ax = plt.subplots()
ax.set_title('Changing Consumption as a Fraction of Income')
ax.set_ylabel('$y_t$')
ax.set_xlabel('$t$')
x = np.arange(0, T+1)
for b in bs:
y = calculate_y(i_0, b, g_0, T, y_init)
ax.plot(x, y, label=r'$b=$'+f"{b:.2f}")
ax.legend()
plt.show()
# -
# Increasing the marginal propensity to consume $ b $ increases the
# path of output over time.
#
# Now we will compare the effects on output of increases in investment and government spending.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 10))
fig.subplots_adjust(hspace=0.3)
x = np.arange(0, T+1)
values = [0.3, 0.4]
for i in values:
y = calculate_y(i, b, g_0, T, y_init)
ax1.plot(x, y, label=f"i={i}")
for g in values:
y = calculate_y(i_0, b, g, T, y_init)
ax2.plot(x, y, label=f"g={g}")
axes = ax1, ax2
param_labels = "Investment", "Government Spending"
for ax, param in zip(axes, param_labels):
ax.set_title(f'An Increase in {param} on Output')
ax.legend(loc ="lower right")
ax.set_ylabel('$y_t$')
ax.set_xlabel('$t$')
plt.show()
# -
# Notice here, whether government spending increases from 0.3 to 0.4 or
# investment increases from 0.3 to 0.4, the shifts in the graphs are
# identical.
| quantitative_economics_with_python/1_geometric_series_for_elementary_economics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="4r0f__a1wqJM" colab_type="text"
# # **Load Data**
# + id="RaZgXpyqYo-4" colab_type="code" outputId="a74e7983-b9b4-4c93-81f0-1110b5281566" executionInfo={"status": "ok", "timestamp": 1580598229285, "user_tz": 420, "elapsed": 22846, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 137}
#mount google drive home directory
from google.colab import drive
drive.mount('/gdrive')
# %cd /gdrive
# + id="dARtnRqDZLUI" colab_type="code" colab={}
#data analysis and visualization
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
from pandas.plotting import scatter_matrix
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
#Common Model helpers
from sklearn import preprocessing
from sklearn import feature_selection
from sklearn import model_selection
from sklearn import metrics
import pickle
from collections import Counter
from imblearn.over_sampling import SMOTE
#Common Model Algorithms
from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from xgboost import XGBClassifier
from sklearn.ensemble import AdaBoostClassifier
#Configure Visualization Defaults
pd.set_option('display.max_columns',None)#displaying long list of columns
pd.set_option('display.max_rows', None)#displaying long list of rows
pd.set_option('display.width', 1000)#width of window
#saving models
import pickle
# + id="r72CXl4RdMP3" colab_type="code" outputId="8e31a1aa-6535-45bc-da5d-60dac84567fa" executionInfo={"status": "ok", "timestamp": 1580598238101, "user_tz": 420, "elapsed": 4833, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
data = pd.DataFrame()
data = pd.read_csv("/gdrive/My Drive/CIS_508/Colab Notebooks/Projects/train.csv", index_col = None)
data.shape
# + id="uzdkXu8xREQI" colab_type="code" outputId="9438eacb-0290-40fb-c3a2-0a3909ee845e" executionInfo={"status": "ok", "timestamp": 1580598241871, "user_tz": 420, "elapsed": 3758, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
test_data = pd.DataFrame()
test_data = pd.read_csv("/gdrive/My Drive/CIS_508/Colab Notebooks/Projects/test.csv")
test_data.shape
# + id="2SrUKtb7RUat" colab_type="code" outputId="f59c0230-0ae2-4f0e-ce90-d27adf9a90ed" executionInfo={"status": "ok", "timestamp": 1580598242178, "user_tz": 420, "elapsed": 4052, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
combined_data = pd.concat([data.iloc[:, :-1], test_data], keys=[0,1]) #drop the y_train
combined_data.shape
# + [markdown] id="TokpD5jowwYs" colab_type="text"
# # **Analysis of Data**
# + id="-NM3WHBtw1VD" colab_type="code" outputId="90c28040-1fc1-4689-ef5f-a4ca65d8a318" executionInfo={"status": "ok", "timestamp": 1580598257720, "user_tz": 420, "elapsed": 916, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 100}
data.info()
# + id="eAat7ULIzhjC" colab_type="code" outputId="8fa8ce07-6fb5-4c17-bc5d-5325db60748a" executionInfo={"status": "ok", "timestamp": 1580598259901, "user_tz": 420, "elapsed": 1540, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 216}
data.head()
# + id="bpZvxlzf0W-3" colab_type="code" outputId="6d1524b6-4cb2-492f-c35f-0e73fd6a79ee" executionInfo={"status": "ok", "timestamp": 1580598260761, "user_tz": 420, "elapsed": 585, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
data.dtypes
# + id="qM9MLPyG0eQl" colab_type="code" outputId="c1af473f-1319-4cb3-f001-6559d69f1eb6" executionInfo={"status": "ok", "timestamp": 1580598263383, "user_tz": 420, "elapsed": 503, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
data.shape
# + id="LL5scHQNw_21" colab_type="code" outputId="5228407e-1b27-428a-fb3c-a248fada21ab" executionInfo={"status": "ok", "timestamp": 1580598267388, "user_tz": 420, "elapsed": 2495, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 322}
data.describe()
# + [markdown] id="FagLUaDt2wVl" colab_type="text"
# # **Exploring Data**
# + id="Zo3X0PVM2zEg" colab_type="code" outputId="88c95df7-dabc-4df1-98be-73462590fec4" executionInfo={"status": "ok", "timestamp": 1580598308899, "user_tz": 420, "elapsed": 883, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 332}
plt.hist(data['TARGET'])
# + id="S1ppTLuO3GKq" colab_type="code" outputId="7c4ce791-4d08-4036-e04e-a4e1d65c20ff" executionInfo={"status": "ok", "timestamp": 1580598326436, "user_tz": 420, "elapsed": 1073, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 345}
stats.probplot(data['TARGET'], plot = plt)
# + id="ERewv9Ar4bfB" colab_type="code" colab={}
#scatterplot - total
#sns.set()
#sns.pairplot(data, size = 2.5)
#plt.show()
# + id="ED05F7ISG4_T" colab_type="code" colab={}
#correlation matrix - without numbers
corrmat = data.corr()
#f, ax = plt.subplots(figsize=(30, 30))
#sns.heatmap(corrmat, vmax=.8, square=True);
# + id="2jy5cT0nQNOG" colab_type="code" outputId="2285bfab-7770-44e5-d5a1-abe0f8e91f94" executionInfo={"status": "ok", "timestamp": 1580598353751, "user_tz": 420, "elapsed": 890, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
#correlation between independent variables and target
cor_target = abs(corrmat["TARGET"])
cor_target.shape
# + [markdown] id="CxDeQaP-RZvs" colab_type="text"
# # **Pre-processing**
# + id="ElzChFt_HCku" colab_type="code" colab={}
def remove_constant_columns(data):
columns = list(data.columns)
for col in columns:
if data[col].std() <= 0.1:
columns.remove(col)
return data[columns]
# + id="StBRnI5pM4Tq" colab_type="code" outputId="3cefb3a3-3628-4090-f221-3dfdd652c874" executionInfo={"status": "ok", "timestamp": 1580598354300, "user_tz": 420, "elapsed": 1427, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
combined_data = remove_constant_columns(combined_data)
combined_data.shape
# + id="AJOsow8DRUt5" colab_type="code" colab={}
if combined_data.isnull().values.any(): #if there are any missing values
total = combined_data.isnull().sum()
total.shape
# + id="HEM5H6cqSgPB" colab_type="code" outputId="607ba16a-d509-4d28-a7b3-e66a60a7377f" executionInfo={"status": "ok", "timestamp": 1580598356594, "user_tz": 420, "elapsed": 3710, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
print('Data columns with null values: \n', combined_data.isnull().sum())
print("-"*10)
print (combined_data.info())
print("-"*10)
combined_data.describe(include = 'all')
# + id="CfwXda73ANGz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 322} outputId="44664c02-5d6f-4f34-f8fe-1fdf5d39e368" executionInfo={"status": "ok", "timestamp": 1580598367191, "user_tz": 420, "elapsed": 2807, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
combined_data.describe()
# + [markdown] id="ajXn7tJFT9gy" colab_type="text"
# # **Working with Imbalanced Data**
#
# 1. Up-sample the minority class
# 2. Down-sample the majority class
# 3. Change your performance metric
# 4. Penalize algorithms (cost-sensitive training)
# 5. Use tree-based algorithms
# + id="BEP2QbaIVvId" colab_type="code" outputId="8ec2a64d-ce58-4551-8cde-070fd0885bcd" executionInfo={"status": "ok", "timestamp": 1580598450387, "user_tz": 420, "elapsed": 868, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
combined_data.shape
# + id="_aGXBeIjZOXX" colab_type="code" outputId="841f0b8c-4eec-4009-83ee-6bbdece50309" executionInfo={"status": "ok", "timestamp": 1580598451276, "user_tz": 420, "elapsed": 623, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
X = data[combined_data.columns]
X.shape
y = data["TARGET"].copy(deep = True)
y.shape
test_data = test_data[combined_data.columns]
test_data.shape
# + id="tOKUfcT_YJBI" colab_type="code" colab={}
#pd.concat([data[combined_data.columns], data["TARGET"]], axis = 1) #axis = 1 concatenates along the columns
#X.shape
# + id="7Tl7yxyUWKmQ" colab_type="code" outputId="75653467-3757-4811-b57c-0794fc587201" executionInfo={"status": "ok", "timestamp": 1580598453797, "user_tz": 420, "elapsed": 856, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 50}
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size = 0.20, random_state = 66)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# + id="cMEs7GTwXEt_" colab_type="code" outputId="bbe4cab6-d9f0-4af4-b971-f461da8780d3" executionInfo={"status": "ok", "timestamp": 1580598455461, "user_tz": 420, "elapsed": 1086, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 167}
y_train.describe()
# + id="eW8nQnGVXe9B" colab_type="code" outputId="9b98cf0e-661e-4e19-cfd1-04836ad79f78" executionInfo={"status": "ok", "timestamp": 1579718697604, "user_tz": 420, "elapsed": 29948, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 332}
plt.hist(y) #unbalanced data
# + [markdown] id="HsEQyUKMZOlF" colab_type="text"
#
# # **Tree Based Models**
#
# **1. Decision Tree Classifier**
# + id="Nkf19sBlZTev" colab_type="code" outputId="de5139df-f15a-48c3-c9d7-1c3a2c25d8be" executionInfo={"status": "ok", "timestamp": 1579718701449, "user_tz": 420, "elapsed": 33784, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 134}
#setup the model
model1 = tree.DecisionTreeClassifier(random_state=66)
#train the model
model1.fit(X_train, y_train)
print(model1, "\n")
# + id="Bb7OQgOmbPU0" colab_type="code" outputId="e30c10d4-6b29-48e6-e7f0-18d657cde118" executionInfo={"status": "ok", "timestamp": 1579718701452, "user_tz": 420, "elapsed": 33779, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
ytrain_pred = model1.predict(X_train)
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="75UiNmFXaDiT" colab_type="code" outputId="9be95271-eca9-45b5-815b-1221ba690007" executionInfo={"status": "ok", "timestamp": 1579718701721, "user_tz": 420, "elapsed": 34042, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
y_pred1 = model1.predict(X_test)
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="s8FYWwdYdSye" colab_type="text"
# **2. Random Forest Classifier**
# + id="p1iSSdbDdQ_h" colab_type="code" outputId="84967bb1-28ef-49bf-cd38-cc2a4b4fbf74" executionInfo={"status": "ok", "timestamp": 1579718728654, "user_tz": 420, "elapsed": 60969, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 167}
#setup the model
model2 = ensemble.RandomForestClassifier(random_state=66, class_weight = {0:0.1, 1:0.9}, n_estimators = 150, min_samples_split = 10)
#train the model
model2.fit(X_train, y_train)
print(model2, "\n")
# + id="LEwE7MfOdSEg" colab_type="code" outputId="3d5b510c-37be-4ffd-8de3-ebb8ad71ac39" executionInfo={"status": "ok", "timestamp": 1579718731246, "user_tz": 420, "elapsed": 63555, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
ytrain_pred = model2.predict(X_train)
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="3duf0AeAa-1r" colab_type="code" outputId="b63e8ec7-f349-4b1e-8605-b63cddb2bc13" executionInfo={"status": "ok", "timestamp": 1579718732162, "user_tz": 420, "elapsed": 64465, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
y_pred1 = model2.predict(X_test)
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + id="bk6lU2Gydya0" colab_type="code" colab={}
#hyperparameter tuning for randomforest classifier
from sklearn.model_selection import RandomizedSearchCV
# + id="5DIqekQVd7FU" colab_type="code" colab={}
# Number of trees in random forest
n_estimators = [100, 150, 200, 250, 300]
# Maximum number of levels in tree
max_depth = [10, 15, 20]
# Minimum number of samples required to split a node
min_samples_split = [15, 30, 45]
# Minimum number of samples required at each leaf node
min_samples_leaf = [5, 15, 20]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf
}
# + id="xploKZCdeyeL" colab_type="code" colab={}
rf_model2 = RandomizedSearchCV(estimator = model2, param_distributions = random_grid, n_iter = 30, cv = 3, verbose=2, random_state=66, n_jobs = -1, scoring = 'roc_auc')
# Fit the random search model
rf_model2.fit(X_train, y_train)
# + id="8lqJu7Qh1lOk" colab_type="code" colab={}
params = rf_model2.best_params_
params
# + id="hCYxMALIomg_" colab_type="code" colab={}
model2 = ensemble.RandomForestClassifier(**params)
model2
model2.fit(X_train,y_train)
# + id="kIRTwBrJHHD6" colab_type="code" colab={}
m2 = pickle.dumps(model2)
model2 = pickle.loads(m2)
m2
# + id="l3k1CbkKo4tI" colab_type="code" colab={}
#training metrics
print("Training Metrics.. \n")
ytrain_pred = model2.predict(X_train)
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="8xtEiXjCpEfP" colab_type="code" colab={}
#test metrics
print("Test Metrics.. \n")
y_pred1 = model2.predict(X_test)
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="OxpmkaWbPLxR" colab_type="text"
# **3. AdaBoost Classifier**
# + id="XTZP-144PKZr" colab_type="code" outputId="0e83c23f-876c-4f25-e339-7d7ba99264fd" executionInfo={"status": "ok", "timestamp": 1579719651425, "user_tz": 420, "elapsed": 15042, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 167}
# Train Adaboost Classifer
model3 = AdaBoostClassifier(n_estimators=50,
learning_rate=1)
model3.fit(X_train, y_train)
print(model2, "\n")
# + id="BVNtmxXXPKlR" colab_type="code" outputId="5245d767-05fe-490c-f24d-a5a8b8c94aa0" executionInfo={"status": "ok", "timestamp": 1579719653622, "user_tz": 420, "elapsed": 17231, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
ytrain_pred = model3.predict(X_train)
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="HiXpaFNvPKjd" colab_type="code" outputId="dd82028f-4430-4774-fae2-9f46fc40daea" executionInfo={"status": "ok", "timestamp": 1579719654346, "user_tz": 420, "elapsed": 17946, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
y_pred1 = model3.predict(X_test)
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="61hoGOi6774o" colab_type="text"
# # **SMOTE**
# + id="SkPp3XqsIAEn" colab_type="code" outputId="030efb0e-5995-4933-d024-0e7c68c725bc" executionInfo={"status": "ok", "timestamp": 1579836388526, "user_tz": 420, "elapsed": 3786, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 100}
#SMOTE
print("\n SMOTE\n","="*40)
print('Original dataset shape %s' % Counter(y_train))
sm = SMOTE(sampling_strategy='float', ratio=0.99)
X_train, y_train = sm.fit_resample(X_train, y_train)
print('Resampled dataset shape %s' % Counter(y_train))
# + id="iXHz_gBZMyK1" colab_type="code" outputId="898305bf-5e4f-4465-b3ab-9ce9cfc2d9eb" executionInfo={"status": "ok", "timestamp": 1579836388927, "user_tz": 420, "elapsed": 392, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
print(X_train.shape, y_train.shape)
# + [markdown] id="3ADUyEHbOz6g" colab_type="text"
# **2. Random Forest Classifier with SMOTE**
# + id="Mn3yJfqzNaBX" colab_type="code" outputId="e125936f-be27-4c29-875d-208f6298d154" executionInfo={"status": "ok", "timestamp": 1579719708769, "user_tz": 420, "elapsed": 71783, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 150}
#Random Forest Classifier
model2.fit(X_train, y_train)
# + id="mRxgUaykArIE" colab_type="code" outputId="807cd535-0bdd-487c-d6e6-9f79e9b32eed" executionInfo={"status": "ok", "timestamp": 1579645651017, "user_tz": 420, "elapsed": 7951, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
ytrain_pred = model2.predict(X_train)
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="Q0t2GIHUBYnw" colab_type="code" outputId="ab7f0dd9-0aee-4bdc-eff2-f6bb69c76a2b" executionInfo={"status": "ok", "timestamp": 1579645658691, "user_tz": 420, "elapsed": 1340, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
y_pred1 = model2.predict(X_test)
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="e5FjHSDnO6D8" colab_type="text"
# **3. AdaBoost Classifier with SMOTE**
# + id="DDNWNfH0NxR_" colab_type="code" outputId="95f872f2-2764-4a44-f071-512f635affd3" executionInfo={"status": "ok", "timestamp": 1579719833598, "user_tz": 420, "elapsed": 51847, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 50}
# Train Adaboost Classifer
model3 = AdaBoostClassifier(n_estimators=50,
learning_rate=1)
model3.fit(X_train, y_train)
print("/n", model3)
# + id="EUYQcbkAP9jy" colab_type="code" outputId="0e3580a7-5138-4d54-9b60-192ea900cdd7" executionInfo={"status": "ok", "timestamp": 1579719838499, "user_tz": 420, "elapsed": 55017, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
ytrain_pred = model3.predict(X_train)
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="wW1UmTm9QFaD" colab_type="code" outputId="e20f49ad-72da-45cd-f99e-18298c933723" executionInfo={"status": "ok", "timestamp": 1579719839398, "user_tz": 420, "elapsed": 53964, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
y_pred1 = model3.predict(X_test)
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="_WLSEwglQ-Al" colab_type="text"
# **4. Gradient Boosting Classifier with SMOTE**
# + id="Ebx_fNPrQ9Mm" colab_type="code" outputId="bde340d8-02a5-45fa-bfcc-51f4f2bc516a" executionInfo={"status": "ok", "timestamp": 1579720072176, "user_tz": 420, "elapsed": 282759, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 201}
#Gradient Boosting Classifier
model4 = ensemble.GradientBoostingClassifier()
model4.fit(X_train, y_train)
print("\n", model4)
# + id="4CCF-rwJRb2V" colab_type="code" outputId="d3f0ffe7-f267-4db3-9371-31768ae61339" executionInfo={"status": "ok", "timestamp": 1579720074157, "user_tz": 420, "elapsed": 1952, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
pred = model4.predict(X_train)
ytrain_pred = [round(value) for value in pred]
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + id="7_9eMY5qRji4" colab_type="code" outputId="795944b4-7367-41f6-c5c2-c0c9acc71ebd" executionInfo={"status": "ok", "timestamp": 1579720074158, "user_tz": 420, "elapsed": 1940, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
pred = model4.predict(X_test)
y_pred1 = [round(value) for value in pred]
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="8u-gkXMQijtZ" colab_type="text"
# **5. XGBoost Classifier**
# + id="p48rYe5pmnc3" colab_type="code" colab={}
#XGB doesnt work well with pandas, so convert them into ndarray
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
test_data = np.array(test_data)
# + id="EldnrEXaimNt" colab_type="code" outputId="55009048-2582-4b7f-d276-73c8d8f37046" executionInfo={"status": "ok", "timestamp": 1579836922692, "user_tz": 420, "elapsed": 83956, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 150}
#XGBoost Classifier
model5 = XGBClassifier()
model5.fit(X_train, y_train)
print("\n", model5)
# + id="sWeNkWSXiyK4" colab_type="code" outputId="33f32c42-a449-4207-ed03-2eb45add1cf0" executionInfo={"status": "ok", "timestamp": 1579836923887, "user_tz": 420, "elapsed": 80218, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
#training metrics
print("Training Metrics.. \n")
pred = model5.predict(X_train)
ytrain_pred = [round(value) for value in pred]
print(metrics.classification_report(y_train, ytrain_pred), "\n")
#confusion matrix
print("Confusion Matrix.. \n ",metrics.confusion_matrix(y_train, ytrain_pred))
print("\n AUC score .. ", metrics.roc_auc_score(y_train, ytrain_pred))
# + colab_type="code" outputId="feffb337-b69b-4c62-fde3-5cb0732700b2" executionInfo={"status": "ok", "timestamp": 1579836924287, "user_tz": 420, "elapsed": 388, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} id="aSXak8Eoo3WK" colab={"base_uri": "https://localhost:8080/", "height": 284}
#test metrics
print("Test Metrics.. \n")
pred = model5.predict(X_test)
y_pred1 = [round(value) for value in pred]
print(metrics.classification_report(y_test, y_pred1))
#confusion matrix and AUC score
print("Confusion matrix .. \n", metrics.confusion_matrix(y_test, y_pred1))
print("\n AUC score .. ", metrics.roc_auc_score(y_test, y_pred1))
# + [markdown] id="K6BmlDEZpNNC" colab_type="text"
# # **Validate the Test data**
# + id="G_bRfOJ_p98R" colab_type="code" outputId="985d6576-dece-4620-a37e-c7945de162f7" executionInfo={"status": "ok", "timestamp": 1579836924434, "user_tz": 420, "elapsed": 525, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
test_data.shape
# + id="beDh_rnbfZFV" colab_type="code" outputId="9b55242d-c6ab-436f-d423-233084a4a496" executionInfo={"status": "ok", "timestamp": 1579837474066, "user_tz": 420, "elapsed": 1012, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 50}
#test metrics
print("Test Metrics.. \n")
pred = model5.predict(test_data)
y_pred1 = [round(value) for value in pred]
# + id="Zxm8X6z8ptQB" colab_type="code" outputId="e0c37bdf-9b83-43e2-8af8-3002ceb6deb7" executionInfo={"status": "ok", "timestamp": 1579837683027, "user_tz": 420, "elapsed": 374, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}} colab={"base_uri": "https://localhost:8080/", "height": 33}
len(y_pred1)
# + id="dVyK3aJAqYeL" colab_type="code" colab={}
results = pd.DataFrame(data = y_pred1, columns = ["TARGET"])
# + id="E5-GqTeaqJ6U" colab_type="code" colab={}
results.to_csv("/gdrive/My Drive/CIS_508/Colab Notebooks/Projects/SantanderResults.csv")
| Santander Customer Satisfaction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/YorgosPs/BrainSeq/blob/main/BrainSeq.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="dtN5WrpH1Hz1"
# # BrainSeq (Brain Sequences)
#
# A demonstration of various algorithms from statistics, probability theory, graphs, and machine learning (coming soon) on a sequence of pseudo random numbers kindly generated by my best friend, <NAME>.
# My objective is to investigate whether Jenny's brain is a random number generator, or if there is an underlying pattern in the succession of numbers predicted. If there is a pattern, can I uncover it?
#
# + id="JDaFYSxUwNEA"
# %matplotlib inline
import numpy as np
from scipy import stats as stats
import matplotlib.pyplot as plt
import networkx as nx
from matplotlib import cm as cm
from sklearn.model_selection import train_test_split
from scipy.stats import kstest as kstest
import tensorflow as tf
from scipy.special import rel_entr
from tensorflow.keras.layers import Dense, Dropout, Activation
# + [markdown] id="DM2o7yAw14Oe"
# # Dataset
#
# The dataset consists of an array of 847 integers.
# + colab={"base_uri": "https://localhost:8080/"} id="WWcX4Ao_wZd4" outputId="5d863e1d-0fe5-44bd-c755-16027dd434c8"
jenny_responses= np.array([7,4,3,0,9,1,5,2,6,8,0,0,0,5,6,9,1,2,5,5,1,9,0,8,
8,6,6,6,5,0,1,9,8,8,9,5,8,2,1,0,5,3,5,5,9,4,3,2,1,
0,0,1,1,1,0,1,0,6,9,8,1,5,4,2,1,1,0,7,7,9,2,
3,3,8,2,0,0,0,0,9,6,2,7,9,5,3,0,8,8,0,5,0,7,9,5
,3,6,4,5,4,4,2,1,2,1,0,0,1,0,6,3,9,0,2,5,4
,6,2,1,0,0,1,2,9,9,9,9,9,5,6,7,6,5,4,3,2,1,0
,0,1,5,4,7,3,2,9,8,7,7,3,2,8,7,6,5,9,1,2,0,
0,0,1,1,2,5,8,9,0,3,2,1,5,0,8,4,3,7,8,8,6,9,0,
0,0,0,9,1,3,3,4,4,5,5,6,6,9,8,8,9,2,0,0,0,1,9,9,6,
4,3,9,2,3,4,4,0,2,5,9,2,5,8,0,9,0,4,1,4,6,1,3,1,2,5,5,0,
7,8,7,0,1,2,3,8,9,1,1,2,7,0,0,0,5,1,1,1,6,5,8,6,9,0,1,0
,1,2,4,3,8,8,5,5,2,2,2,0,2,6,6,1,0,2,0,2,1,1,9,4,4,2,6,2,0,2
,1,2,0,2,0,1,0,6,1,2,2,6,1,8,8,4,7,2,0,3,2,4,1,4,2,3,6,7
,1,9,1,9,0,4,1,9,0,4,2,0,0,5,1,9,0,5,2,6,0,7,2,1,0,7,3,6,
5,1,5,1,1,6,2,1,0,1,2,0,1,5,1,6,6,8,2,1,4,5,6,6,7,8,8,9,0,9,0,0,1
,0,0,1,0,0,2,0,0,3,0,0,4,0,0,5,0,0,6,0,0,7,0,0,8,8,
9,0,0,8,0,0,7,0,0,6,0,0,5,0,0,4,0,0,3,0,0,2,0,0,1,0,0,0,0,0,0,5,5,1,
1,6,6,7,0,2,0,6,8,4,9,9,4,2,0,1,1,3,5,4,1,0,9,1,8,8,1,5,5,3,9,9,4,0,
4,3,2,4,1,7,5,0,1,0,0,1,1,2,4,4,1,0,1,4,8,0,0,6,6,0,0,0,1,5,0,0,0,1,
9,9,0,0,0,4,1,2,2,2,2,2,1,5,5,5,7,7,8,1,8,2,6,5,5,4,1,3,8,9,7,0,2,0,
1,0,4,0,8,9,2,2,7,5,4,2,2,3,3,3,8,3,3,4,3,4,1,4,4,2,4,4,9,4,4,6,4,4,
2,5,8,9,9,1,1,1,1,3,1,1,8,8,1,8,6,0,7,7,2,7,3,5,0,7,0,2,8,3,2,4,8,6,
4,2,4,1,2,6,8,5,5,5,5,8,2,0,1,9,1,6,1,6,1,6,0,5,1,2,1,4,3,2,3,8,8,7,
9,2,8,9,9,6,1,3,0,2,2,4,3,8,6,6,8,5,7,2,3,2,3,1,2,4,1,7,7,3,6,2,3,9,
9,2,4,4,4,4,4,8,4,8,3,3,3,3,4,5,3,1,4,6,8,6,2,3,8,6,6,2,6,5,0,9,9,0,
8,1,8,0,1,0,2,4,0,4,8,4,1,8,7,1,5,5,0,2,0,2,2,5,0,5,5,1,8,7,4,8,4,0,
0,7,5,1,1,1,4,8,6,1,5,9,9,9,6,3,0,2,9,8,9,5,2,0,0,1,2,0,5,8,6,2,1,5,
0,1,2,0,1,1,0,2,5,3,0,1,0,0,0,0,0,0,1,4,9,0,2,9,3,0,0,0,0,0,4,1,4,0,
2,5,6,2,7,2,7,4,8,3,0,4,2,4,0,4,0,2,1,2,0,2,4,7,9,8,4,1,0,0,0,0])
print( 'Number of samples: ', jenny_responses.shape[0])
# + [markdown] id="DeByMsEy2FvG"
# # Basic statistics
#
# Statistics such as the mean, (unbiased) variance, etc are the first level of analysis for such dataset.
# We can also examine the Sampling distribution.
# + colab={"base_uri": "https://localhost:8080/"} id="UuFHhx3ywZpA" outputId="8abdaa1e-55fd-4fb8-ba70-56ac908b982f"
stats.describe(jenny_responses)
# + colab={"base_uri": "https://localhost:8080/", "height": 530} id="gP1mjYgqwZsX" outputId="a6a3607b-97fb-43fd-fca8-4bb61441aa64"
fig, ax = plt.subplots(figsize=[8,8])
Ni, bins, patches = ax.hist(jenny_responses, bins=10, density=False)
Prob = Ni/np.sum(Ni)
plt.xticks(0.45+0.9*np.linspace(0,9,10),['0','1', '2', '3', '4', '5', '6', '7', '8', '9'], fontsize =16);
plt.yticks(fontsize=16)
ax.set_xlim(0, 9)
for i in range(10):
patches[i].set_facecolor(cm.get_cmap('magma')(i/10))
ax.set_ylabel('Occurence Distribution', fontweight='bold', fontsize=18)
ax.set_xlabel('Number', fontweight='bold', fontsize=18)
ax.set_title('Sampling Distribution', fontweight='bold', fontsize=18);
# + [markdown] id="QgojG1m16d4w"
# The statistics of our dataset differ from what we would expect from a random number generator (RNG). If it was indeed a RNG we would find that the sampling distribution resembles the uniform distribution ($U[0,9]$). If the dataset is denoted $\mathbf{s} \in \mathrm{\mathbb{R}}^{847}$ then:
# $\bar{s_i} =\frac{1}{N} \sum_{i=1}^N s_i \rightarrow \frac{9+0}{2} = 4.5, N \rightarrow \infty$
#
# and
#
# $Var{s_i}= \frac{1}{N-1} \sum_{i=1}^N (\bar{s}-s_i)^2 \rightarrow \frac{(9-0-1)^2-1}{12} = 5.25, N \rightarrow \infty$
#
# + [markdown] id="54VlTGrbEPYs"
# ### We can, in fact, calculate the distance from the uniform distribution
# + [markdown] id="7RhhqddnEeRd"
# Kullback-Leibler divergence:
# $D_{KL} = \sum_{x \in X} P(x) log(\frac{P(x)}{Q(x)}),$
# where $X$ is the state space (here, $X=\{0, 1, ..., 9\}$), $P(x)$ is the probabilities as given by the emperical distribution, and $Q(x)$ are the respective probabilities from the discrete uniform distribution $U([0,9])$.
# + colab={"base_uri": "https://localhost:8080/"} id="sbU4LBylxsVu" outputId="6d2c4580-de02-4639-8e02-2f88b589dd2d"
d_kl=[]
for i in range(10):
d_kl.append(Prob[i]*np.log(Prob[i]/0.1))
D_KL = np.sum(d_kl)
#Confirmed with Python function
# print(np.sum(rel_entr(Prob, np.ones(10)*0.1)))
print('Kullback-Leibler Distance wrt to the Uniform is: ' , D_KL)
# + [markdown] id="UZVgIkmcEi__"
# Wasserstein Distance
#
# $W_1(\mu_1, \mu_2) = \int_{-∞}^{+∞}|M_1(x) -M_2(x)|dx ≈ \sum_{n=0}^{9}|M_1(x_n) -M_2(x_n)|$
# where $M_1, M_2$ are the respective cumulative distribution functions of $\mu_1 , \mu_2 $.
# + colab={"base_uri": "https://localhost:8080/"} id="D4g6mjRZxsYP" outputId="0d551268-2e7a-4619-9520-e8e20932344f"
D_W = stats.wasserstein_distance(jenny_responses, np.arange(0,10))
print('Wasserstein Distance wrt to the Uniform is: ' , D_W)
# # This has been confirmed with my code below
# CumDist = np.cumsum(Prob)
# print(np.sum(np.abs(CumDist - np.arange(1,11)*0.1)))
# + [markdown] id="M-j_sFclZIo_"
# ## Gap test
#
# Studying the distribution of gaps between recurrence for every number.
#
# If the occurance of every number were equiprobable (uniformly distributed) then the gap distribution would be geometric with $p=1/10$.
# + id="pupelYozCGsN"
gaps0 = []
for i in range(10):
gaps0.append(np.diff(np.where(jenny_responses==i)[0]).tolist())
gaps = [item for sublist in gaps0 for item in sublist]
gaps = np.array(gaps)
# + id="XbMfIaGQImWK"
Gaps_w = np.ptp(gaps)
Ngi, binss = np.histogram(gaps, bins=Gaps_w)
Probg = Ngi/np.sum(Ngi)
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="ViZ2FLdZEizc" outputId="46c52c1c-be3d-4fa0-ca8f-1bb17057aa56"
fig = plt.figure(figsize=[20,8])
ax1 = plt.subplot(121, projection='3d')
s=0
clist= cm.get_cmap('magma')(np.arange(10)/10)
for sublist in gaps0:
hist_temp, bins_temp = np.histogram(sublist, bins =60)
xs = (bins_temp[:-1] + bins_temp[1:])/2
ax1.bar(xs, hist_temp, zs=s, zdir='y', linewidth=15, color=clist[s]) # ec=c, alpha=0.8)
ax1.bar(xs, hist_temp, zs=s-0.05, zdir='y', linewidth=15, color=clist[s])# ec=c, alpha=0.8)
ax1.bar(xs, hist_temp, zs=s+0.05, zdir='y', linewidth=15, color=clist[s])# ec=c, alpha=0.8)
s+=1
ax1.set_ylim(9,0)
ax1.set_yticks(np.arange(10))
ax1.set_title('Gap Distribution for every number', fontsize=18)
ax1.set_xlabel('Gap', fontsize=13)
ax1.set_zlabel('Occurence', fontsize=13)
ax1.set_ylabel('Number', fontsize=13)
ax2 = plt.subplot(122)
ax2.plot(Gaps_ar, np.cumsum(Probg), linewidth=3,color='red', label='Obtained Distribution')
ax2.plot(Gaps_ar, stats.geom.cdf(Gaps_ar, 0.1),linewidth=3, color='green', label='Theoretical Distribution')
ax2.legend()
ax2.set_title('Cumulative Gap Distribution', fontsize=18)
ax2.set_xlabel('Gap', fontsize=13)
ax2.set_ylabel('Density', fontsize=13);
# + [markdown] id="FwcDtWR_nWlB"
# ### Basic Statistics of Gaps
# + colab={"base_uri": "https://localhost:8080/"} id="-dx49Sc2D8ml" outputId="908d339a-1290-446f-9f78-d4e62ee50eff"
stats.describe(gaps)
# + colab={"base_uri": "https://localhost:8080/"} id="KYamsgR6ZIQL" outputId="af373f6c-b83e-4a64-f9d7-014a876a6df2"
print('The period of Jennys brain as a RNG is: ', np.mean(gaps))
# + [markdown] id="i8VYbxbGyloh"
# The period is large, a good indication for a Random Number Generator.
# + [markdown] id="uRBHI2jbnbEw"
# Kullback-Leibler divergence
#
# + colab={"base_uri": "https://localhost:8080/"} id="hPIQqId_nqQU" outputId="e200afcb-e6b5-40e4-e04c-f6be33ca9eaf"
d_kl=[]
for i in range(70):
# print(GM1[i], Probg[i])
d_kl.append((Probg[i]+1E-17)*np.log((Probg[i]+1E-17)/GM1[i]))
D_KL = np.sum(d_kl)
# # #Confirmed with Python function
# print(np.sum(rel_entr(Probg, GM1)))
print('Kullback-Leibler Distance of the gap distribution wrt to the Uniform is: ' , D_KL)
# + [markdown] id="_czhELRqsyZW"
# Wasserstein distance
# + colab={"base_uri": "https://localhost:8080/"} id="bWu0XuvJsvF7" outputId="d202b50d-1f39-4b72-a175-6f917358c077"
D_W = stats.wasserstein_distance(gaps, stats.geom(0.1).rvs(size=gaps.shape[0]*100))
print('Wasserstein Distance wrt to the Uniform is: ' , D_W)
# # This has been confirmed with my code below
# print(np.sum(np.abs(np.cumsum(Probg) - stats.geom.cdf(Gaps_ar, 0.1))))
# + [markdown] id="Gv_hOc75vI_Z"
# **Observations:**
# * Both the distribution of numbers and the distribution of gaps indicate that there is a significant deviation from the theoretically expected results.
# * There is a larger preference for smaller numbers rather than larger numbers. This is reflected in the distribution of numbers, but also in the distribution of gaps: Larger numbers have larger gaps between their recurrence.
# * Most of the results seem to be skewed by the repetition of zeros, which is most likely caused by Jenny's boredom (!!).
# + [markdown] id="I5G_MRw84sE_"
# # Empirical Distribution Function and Inverse Transform Sampling
#
# Given the distribution of numbers, we demonstrate how we can sample a much larger dataset of numbers which follow the same distribution (obeying the law of large numbers)
#
# In other words, asssuming that there is no pattern in the way Jenny generates numbers, only a probability for each of them, we can 'simulate' Jenny's brain to generate random numbers with the same distribution. Here we generate 6000 more samples.
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="kHEBMpQlxlXl" outputId="bf000be3-7ef8-48a1-c1cd-9f139d143521"
CumDist = np.cumsum(Prob)
fig, ax = plt.subplots(figsize=[5,5])
ax.plot(np.arange(10), CumDist, '-o', c='b')
ax.set_ylim(0,1.01)
ax.set_xlim(-0.1, 9.1)
ax.set_xticks(np.arange(10));
ax.set_xlabel('$N_i$', fontsize=21)
ax.set_ylabel('$\Sigma_{i=1}p_i$', fontsize=21)
ax.set_title('Cumulative Distribution', fontweight='bold', fontsize=18)
plt.show()
# + id="WlhklLrJxldW"
edf_samples=[]
for i in range(30000):
ri = np.random.uniform()
edf_samples.append(np.min(np.where((ri - CumDist)<0)[0]))
# + colab={"base_uri": "https://localhost:8080/", "height": 532} id="s4dd7BHWxlgk" outputId="b0efe393-b224-49a8-c24b-9b7c2b535f8c"
fig, ax = plt.subplots(nrows=1, ncols=2, tight_layout=True, figsize=[10,6])
ax[0].set_xticks([])
ax[1].set_xticks([])
# plt.xticks(0.45+0.9*np.linspace(0,9,10),['0','1', '2', '3', '4', '5', '6', '7', '8', '9']);
Ni_edf, bins_edf, patches_edf = ax[0].hist(edf_samples, bins=10, density=False)
ax[0].set_xticks(0.45+0.9*np.linspace(0,9,10),['0','1', '2', '3', '4', '5', '6', '7', '8', '9']);
ax[1].set_xticks(0.45+0.9*np.linspace(0,9,10),['0','1', '2', '3', '4', '5', '6', '7', '8', '9']);
_,_, patches = ax[1].hist(jenny_responses, bins=10, density=False)
# plt.xticks(0.45+0.9*np.linspace(0,9,10),['0','1', '2', '3', '4', '5', '6', '7', '8', '9']);
ax[0].set_xlim(0, 9)
ax[1].set_xlim(0, 9)
for i in range(10):
patches_edf[i].set_facecolor(cm.get_cmap('magma')(i/10))
patches[i].set_facecolor(cm.get_cmap('magma')(i/10))
ax[1].set_ylabel('Occurence Distribution', fontweight='bold', fontsize=15)
ax[1].set_xlabel('Number', fontweight='bold', fontsize=15)
ax[1].set_title('Original Data', fontweight='bold', fontsize=15);
ax[0].set_ylabel('Occurence Distribution', fontweight='bold', fontsize=15)
ax[0].set_xlabel('Number', fontweight='bold', fontsize=15)
ax[0].set_title('EDF Generated Data', fontweight='bold', fontsize=15);
# + [markdown] id="pK-G-TbAE4kZ"
# # Thinking of our data as a Markov Chain
# Now, we introduce the assumption that the number generated next, depends only on the number last generated. In other words:
# $P(X_k =x | X_{k-1}, X_{k-2}, ...) = P(X_k =x| X_{k-1})$
# + colab={"base_uri": "https://localhost:8080/", "height": 335} id="YqRvzpKQxsa0" outputId="39ddd6b1-fac2-4f75-9338-b88be990e663"
Occur, _,_ = np.histogram2d(jenny_responses[:-1], jenny_responses[1:])
# Occur, _,_ = np.histogram2d(edf_samples[:-1], edf_samples[1:])
Trans = np.diag( np.sum(Occur, axis=1)**-1) @Occur
Orig = Occur @ np.diag( np.sum(Occur, axis=0)**-1)
fig, ax = plt.subplots(nrows=1, ncols=3, tight_layout=True, figsize=[15,6])
ax0=ax[0].imshow(Occur, origin='lower')
ax[0].set_xlabel('$u_n$', fontsize=18)
ax[0].set_ylabel('$u_{n+1}$', fontsize=18)
ax[0].set_xticks(np.linspace(0,9,10))
ax[0].set_yticks(np.linspace(0,9,10))
ax[0].set_title('Transition Occurence', fontweight='bold', fontsize=18)
fig.colorbar(ax0, ax=ax[0], shrink=0.5)
ax[0].text(9.8, 8.9, '$ N( x \mapsto x_i)$', fontsize=14)
ax1=ax[1].imshow(Trans, origin='lower')
ax[1].set_xlabel('$u_n$', fontsize=18)
ax[1].set_ylabel('$u_{n+1}$', fontsize=18)
ax[1].set_xticks(np.linspace(0,9,10))
ax[1].set_yticks(np.linspace(0,9,10))
ax[1].set_title('Transition Probability', fontweight='bold', fontsize=18)
fig.colorbar(ax1, ax=ax[1], shrink=0.5)
ax[1].text(9.9, 8.9, '$p(x |x_i)$', fontsize=14)
ax2=ax[2].imshow(Orig, origin='lower')
ax[2].set_xlabel('$u_n$', fontsize=18)
ax[2].set_ylabel('$u_{n+1}$', fontsize=18)
ax[2].set_xticks(np.linspace(0,9,10))
ax[2].set_yticks(np.linspace(0,9,10))
ax[2].set_title('Origination Probability', fontweight='bold', fontsize=18)
fig.colorbar(ax2, ax=ax[2], shrink=0.5);
ax[2].text(9.9, 8.9, '$p(x_i |x)$', fontsize=14);
# + [markdown] id="fbcYyFC365CP"
# Stdying the transition probabilities, we can see that there is some pattern in our data. If there wasn't, we would see equal probability to transition from any number to a certain number (all rows would have a single color). This is confirmed, with the EDF generated data, which are truly random.
# + [markdown] id="MSSfHmqLy_r-"
# ## Stationary Distribution and Potential
#
# The stationary distribution is defined as the probability to find ourselves at a specific number after infinite steps of the Markov Chain. This can be found using the first left eigenvector of the transition probability matrix:
# $v_1 T = \lambda_1 v_1 $ , $\lambda_1 =1$ , $\pi = \frac{v_1}{\sum v_1}$
#
# The potential representation of the markov chain cen be found under the assumption of a Boltzmann distribution:
#
# $\pi_i = e^{-U(i)/T}$
#
# + colab={"base_uri": "https://localhost:8080/", "height": 369} id="b72_hpJzxseE" outputId="97e722d6-dbe9-4718-dc5f-53e61e27f780"
eigenvalues, eigenvectors = np.linalg.eig(Trans.T)
stationary =np.real( eigenvectors[:,0]/ np.sum(eigenvectors[:,0]))
Urec = -np.log(stationary)
Urec = (Urec-np.min(Urec))/(np.max(Urec)- np.min(Urec))
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=[13, 5], tight_layout=True)
ax[0].plot(np.arange(1, 11),np.real(eigenvalues), '-o', c='b', label='$Re(\lambda_i)$')
ax[0].plot(np.arange(1, 11),np.imag(eigenvalues), '-o', c='r', label='$Imag(\lambda_i)$')
ax[0].set_xticks(np.arange(1, 11))
ax[0].set_xlabel('Index', fontsize=15)
ax[0].set_ylabel('$\lambda_i$', fontsize=15)
ax[0].legend()
ax[0].set_title('Spectrum', fontweight='bold', fontsize=16)
ax[1].plot(np.arange(10),stationary, '-o', c='b')
ax[1].set_xticks(np.arange(10))
ax[1].set_xlabel('$N_i$', fontsize=15)
ax[1].set_ylabel('$\pi_i$', fontsize=15)
ax[1].set_title('Stationary Distribution', fontweight='bold', fontsize=16)
ax[2].plot(np.arange(10),Urec, '-o', c='b')
ax[2].set_xticks(np.arange(10))
ax[2].set_xlabel('$N_i$', fontsize=15)
ax[2].set_ylabel('$ U_i $', fontsize=15)
ax[2].set_title('Recovered Potential', fontweight='bold', fontsize=16);
# + [markdown] id="glh-rI93zPIK"
# One can see (as expected), that the stationary distribution matches the histogram of occurences obtained in the previous section. This is expected, because, in the context of stationarity (or after inifinite time on the Markov Chain) it doesn't matter which the preceding number is, or equivalently, we have reduced a 2D probability density to a 1D probability density.
#
# Interestingly, we can derive an equivalent 'Potential' on the discetestate space. This informs us of the ease or difficulty to transition from one number to the other.
# + [markdown] id="NZq_lG7W0KTG"
# # Graph
# Following our analysis of our dataset as a Markov Chain, we can proceed and view our datase as a graph
# + colab={"base_uri": "https://localhost:8080/", "height": 398} id="o3ntJBPm5FjI" outputId="c674cd9f-5628-4722-f8aa-7a95f0fb5ba8"
Dist = -np.log(Trans)
Dist_sym = 1/2*(Dist+Dist.T)
Dist_sym_tr = Dist_sym*0
Dist_sym_tr[Dist_sym<2.5] = Dist_sym[Dist_sym<2.5]
fig, ax = plt.subplots(nrows=1, ncols=2, tight_layout=True, figsize=[10,6])
ax[0].imshow(Dist, origin='lower')
ax[0].set_xlabel('$u_n$', fontsize=18)
ax[0].set_ylabel('$u_{n+1}$', fontsize=18)
ax[0].set_xticks(np.linspace(0,9,10))
ax[0].set_yticks(np.linspace(0,9,10))
ax[0].set_title('Asymmetric Distances', fontweight='bold', fontsize=18)
ax[1].imshow(Dist_sym, origin='lower')
ax[1].set_xlabel('$u_n$', fontsize=18)
ax[1].set_ylabel('$u_{n+1}$', fontsize=18)
ax[1].set_xticks(np.linspace(0,9,10))
ax[1].set_yticks(np.linspace(0,9,10))
ax[1].set_title('Symmetric Distances', fontweight='bold', fontsize=18);
# + [markdown] id="rX89h1nQ17Qx"
# Starting from the transition probabilities between pairs of numbers, we can construct a sense of distance between them. To do this, we use the Arrhenius formula :
# $d(i \rightarrow j) = - log(p(x_i |x_j))$
#
# These distances are not symmetric by default (it is not as easy to go from i to j as it is to go from j to i). We can construct a symmetric distance metric, by considering the average distance of going from i to j and from j to i.
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="ZWHJJnXN5FF7" outputId="7805d78f-bb9a-4ff1-a968-b7ada64a77cf"
G1=nx.DiGraph()
graph_con=[]
for i in range(10):
for j in range(10):
if Dist_sym[i,j]<np.median(Dist_sym):
graph_con.append((i,j, Dist_sym[i,j])) #Trans[i,j]))
# else:
# graph_con.append((i,j, 300)) #Trans[i,j]))
G1.add_weighted_edges_from(graph_con)
fig, ax=plt.subplots(figsize=[10,10])
nx.draw_kamada_kawai(G1, with_labels = True)#,edge_labels='weight')
ax.set_title('Graph representation of number sequence', fontweight='bold', fontsize=15);
# + [markdown] id="jaaQmFDm6ERe"
# Some observations from the graph representation is that 7 and 3 are the most isolated nodes. That is consisted with the potential representation, where they were 'mountains'. It is also consisted with the distribution of numbers, where we found that 7 and 3 had the lowest occurence.
| BrainSeq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # A Crash Course on ReflectDirect
# **Welcome — thanks for downloading!**
#
# `ReflectDirect` is designed for anyone interested in reflected light from directly-imaged planets.
#
# If you want an overview of what it can do, you're in the right place.
#
# Before getting started, make sure you're up to date with:
#
# - Jupyter notebook
# - scipy
# - matplotlib
# - ipywidgets
# ## A Little Setup
# At the simplest level, you need only two statements:
# +
import reflectdirect as refdir
# %matplotlib inline
# -
# **You should also** put the path to the package folder `'png_maps'` here. It will make importing brightness maps easier. For example:
#
# map_source = '/Users/Bozo/Some_random_place/ReflectDirect-main/png_maps/'
#
# I recommend putting any other png images you want use into this folder, as well.
map_source = '...../png_maps/
# And in case you need them, here are some more imports from inside the module:
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.special import sph_harm
import exoplanetsubspots as exoss
pi = np.pi
# -
# ## Things to Reference
# We use some conventions to define how planetary systems are aligned and oriented.
#
# Just run `Geometry_Reference` to get a diagram about them. You can save this diagram by passing `ref_save=True`.
refdir.Geometry_Reference()
# The heart of `ReflectDirect` is the class `DirectImaging_Planet`. It's how you create, interact with, and analyze your planets.
#
# All the public methods in our package have full docstrings, so use those "?" for help, such as:
#
# refdir.DirectImaging_Planet?
#
# Or better yet, browse the class details and more using [the online ReflectDirect API.](https://joelcolinschwartz.github.io/ReflectDirect/)
# ## Make a Planet
# Because `DirectImaging_Planet` is a class, most attributes are adjustable/persistent for each instance you call. That's handy, as you'll see.
#
# We will stick to a single instance in this demo, so choose a value $(1-6)$ for `ex_system_num` to create your planet.
#
# (FYI, there are more images in the `'png_maps'` folder than shown in this demo.)
# +
ex_system_num = 1
### 1: The default spherical harmonics
if ex_system_num == 1:
planet = refdir.DirectImaging_Planet()
### 2: Different spherical harmonics
elif ex_system_num == 2:
planet = refdir.DirectImaging_Planet(kind='ylm',mp_data=[[1,0,1.0],[5,-4,1.0],[4,0,1.0]],
primeD=45,alb_lims=[-1,1],name='YLM Ball')
planet.InvertFlipBlend_Amap(blend='EW')
### 3: A random checkerboard array (Try changing `kind` to 'aryI', too.)
elif ex_system_num == 3:
an_ary = np.random.random((10,20))
an_ary[an_ary > 0.5] = 1
an_ary[an_ary < 1] = 0
planet = refdir.DirectImaging_Planet(kind='aryA',mp_data=an_ary,name='Checkerboard')
### 4: An Exo-Earth image
elif ex_system_num == 4:
planet = refdir.DirectImaging_Planet(kind='pngI',mp_data=map_source+'bluemarble_2048.png',
alb_lims=[0.1,0.85],n_clat=91,n_long=181,ratRO=36,
name='Exo-Earth')
planet.Setup_ProRet_Degeneracy()
### 5: An image with a bright swath, like a glow worm.
elif ex_system_num == 5:
planet = refdir.DirectImaging_Planet(kind='pngA',mp_data=map_source+'glow_worm.png',
alb_lims=[0,10],name='Glow Worm')
planet.InvertFlipBlend_Amap(blend='NS')
### 6: Pacman!
elif ex_system_num == 6:
planet = refdir.DirectImaging_Planet(kind='pngI',mp_data=map_source+'pacman.png',
alb_lims=[0,5],n_clat=181,n_long=361,name='Pacman')
planet.Adjust_MotionTimes(rot_res=72)
planet.InvertFlipBlend_Amap(image='pri',into='alt',invert=False,flip='NS',blend='none')
# -
# Now use `EquiRect_Amap` to see how your planet looks. By default you get both the primary and alternate brightness maps.
planet.EquiRect_Amap()
# At first, your planet has a near-edge-on orbit with no tilt, and its `times` span $-0.5$ to $0.5$ orbits.
#
# One way to change this setup is by using `Adjust_Geometry` and `Adjust_MotionTimes`. Try setting `demo_adjust=True` if you want.
# +
demo_adjust = True
if demo_adjust == True:
planet.Adjust_Geometry(which='both',incD=63,oblD=35,solD=285)
planet.Adjust_MotionTimes(which='both',ratRO=3.0,orb_min=0.3,orb_max=3.7)
# -
# Then, use `Info_Printout` to view your parameters. It's a quick way to check an instance.
planet.Info_Printout()
# ## Analyze Your System
# Let's test more features of the class `DirectImaging_Planet`. You can run these things in any order.
#
# Also, if you check the API, you'll see that several methods store their figures. Those attributes are each `fig_****` — replace the stars with the first 4-5 letters of the method (all lowercase).
# #### Geometry
# Use `Geometry_Diagram` for an overhead view of the system (like the reference diagram):
planet.Geometry_Diagram()
# Or see things on the observer's line of sight with `Orthographic_Viewer`. Just pass your orbital phase in degrees.
#
# There are different viewing styles here — try `show='real'` for example.
planet.Orthographic_Viewer(0,show='amap',alt=True)
# #### Light Curve
# Graph some light curves with `LightCurve_Plot`, which uses the attribute `times`.
#
# There are different viewing styles here too — try `show='appar'` for example.
#
# (If you want to return actual data, use `Light_Curves` instead.)
planet.LightCurve_Plot(alt=True,diff=False,show='flux')
# #### Kernel
# The kernel represents spots on your planet that are visible *and* lit up.
#
# Want its details at a given phase? Use `Kernels_Plot`.
#
# Add a faint version of your map with `over_amap=True`.
planet.Kernels_Plot(15,over_amap=False)
# Or, see how the kernel characteristics evolve over an orbit with `KChar_Evolve_Plot`.
#
# Try passing `'dom'` or `explode='obl'`, for example.
planet.KChar_Evolve_Plot('wid',explode='none',gap=10)
# #### Spin Axis
# With `SpinAxis_Constraints`, you can predict how real observations might constrain your planet's spin axis. It's all based on the kernel.
#
# Also try `constraint='real'`, which takes longer to calculate but gives you $n$-sigma regions.
planet.SpinAxis_Constraints(phaseD_list=[50,[50,290],290],constraint='perf')
# ## Play in the Sand(box)
# The primary and alternate parameters are useful. You can compare and contrast specific maps, spin rates, axis alignments, and much more with them.
#
# But sometimes you just want to mess around! That's what `Sandbox_Reflection` is for. It combines several methods into one and gives you a bunch of interactive controls.
#
# The markdown cell below is a sample image. **If you want to give it a go, change the image cell to code and write:**
#
# planet.Sandbox_Reflection()
#
# And protip, use the extra slots. You get up to 4 phases at a time that way.
# 
# ## Just One More Thing...
# This demo covers a lot of `ReflectDirect` options, but not everything.
#
# **[Check out the API](https://joelcolinschwartz.github.io/ReflectDirect/) and method docstrings for all the details.** Can't stress that enough.
#
# Enjoy!
# #### *Questions/Comments/Feedback?*
# *Email to:* <EMAIL>
#
# *And find more:* [joelcolinschwartz.com](http://joelcolinschwartz.com)
| RD_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **1**. (25 points)
#
# - Write a **recursive** function that returns the length of the hailstone sequence staring with a positive integer $n$. (15 points)
#
# The hailstone sequence is defined by the following rules:
# ```
# - If n is 1, stop
# - If n is even, divide by 2 and repeat
# - If n is odd, multiply by 3 and add 1 and repeat
# ```
# For example, the hailstone sequence starting with $n = 3$ has length 8:
# ```
# - 3, 10, 5, 16, 8, 4, 2, 1
# ```
#
# Use the `functools` package to avoid duplicate function calls.
#
# - Find the number that gives the longest sequence for starting numbers less than 100,000. Report the number and the length of the generated sequence. (10 points)
#
# +
# -
# **2**. (25 points)
#
# - Create a `pnadas` DataFrame called `df` from the data set at https://bit.ly/2ksKr8f, taking care to only read in the `time` and `value` columns. (5 points)
# - Fill all rows with missing values with the value from the last non-missing value (i.e. forward fill) (5 points)
# - Convert to a `pandas` Series `s` using `time` as the index (5 points)
# - Create a new series `s1` with the rolling average using a shifting window of size 7 and a minimum period of 1 (5 points)
# - Report the `time` and value for the largest rolling average (5 points)
# +
# -
# **3**. (25 points)
#
# - Get information in JSON format about startship 23 from the Star Wars API https://swapi.co/api using the `requests` package (5 points)
# - Report the time interval between `created` and `edited` in minutes using the `pendulum` package. It is also ok if you prefer to do this using the standard `datetime` library (5 points)
# - Replace the URL values stored at the `films` key with the titles of the actual films (5 points)
# - Save the new JSON (with film titles and not URLs) to a file `ship.json` (5 points)
# - Read in the JSON file you have just saved as a Python dictionary (5 points)
# +
# -
# **4**. (25 points)
#
# Use SQL to answer the following questions using the SQLite3 database `anemia.db`:
#
# - Show the tables (not indexes) and their schema (in SQL) in the anemia database (5 points)
# - Count the number of male and female patients (5 points)
# - Find the average age of male and female patients (as of right now) (5 points)
# - Show the sex, hb and name of patients with severe anemia ordered by severity. Severe anemia is defined as
# - Hb < 7 if female
# - Hb < 8 if male
#
# (10 points)
#
# You many assume `pid` is the PRIMARY KEY and the FOREIGN KEY in the appropriate tables.
#
# Note: Hb is short for hemoglobin levels.
#
# Hint: In SQLite3, you can use `DATE('now')` to get today's date.
# %load_ext sql
# %sql sqlite:///anemia.db
# +
| exams/BIOS823_Mock_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rLsMb4vqY244"
# Note: You can run this example right now in a Jupyter-style notebook, no setup required! Just click "Run in Google Colab"
#
# <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left">
# <td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/model_analysis/tfma_basic">
# <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a></td>
# <td><a target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/model_analysis/tfma_basic.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td>
# <td><a target="_blank" href="https://github.com/tensorflow/tfx/blob/master/docs/tutorials/model_analysis/tfma_basic.ipynb">
# <img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td>
# </table></div>
# + [markdown] id="YuSYVbwEYNHw"
# # TensorFlow Model Analysis
# ***An Example of a Key Component of TensorFlow Extended (TFX)***
# + [markdown] id="mPt5BHTwy_0F"
# [TensorFlow Model Analysis (TFMA)](https://www.tensorflow.org/tfx/guide/tfma) is a library for performing model evaluation across different slices of data. TFMA performs its computations in a distributed manner over large amounts of data using [Apache Beam](https://beam.apache.org/documentation/programming-guide/).
#
# This example colab notebook illustrates how TFMA can be used to investigate and visualize the performance of a model with respect to characteristics of the dataset. We'll use a model that we trained previously, and now you get to play with the results! The model we trained was for the [Chicago Taxi Example](https://github.com/tensorflow/tfx/tree/master/tfx/examples/chicago_taxi_pipeline), which uses the [Taxi Trips dataset](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) released by the City of Chicago. Explore the full dataset in the [BigQuery UI](https://bigquery.cloud.google.com/dataset/bigquery-public-data:chicago_taxi_trips).
#
# As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve or will it introduce bias? For more information, read about <a target='_blank' href='https://developers.google.com/machine-learning/fairness-overview/'>ML fairness</a>.
#
# Note: In order to understand TFMA and how it works with Apache Beam, you'll need to know a little bit about Apache Beam itself. The <a target='_blank' href='https://beam.apache.org/documentation/programming-guide/'>Beam Programming Guide</a> is a great place to start.
# + [markdown] id="Fnm6Mj3vTGLm"
# The columns in the dataset are:
# <table>
# <tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr>
#
# <tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr>
# <tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr>
# <tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr>
# <tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr>
# <tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr>
# </table>
# + [markdown] id="q7-ouHFnWAsu"
# ## Install Jupyter Extensions
# Note: If running in a local Jupyter notebook, then these Jupyter extensions must be installed in the environment before running Jupyter.
#
# ```bash
# jupyter nbextension enable --py widgetsnbextension --sys-prefix
# jupyter nbextension install --py --symlink tensorflow_model_analysis --sys-prefix
# jupyter nbextension enable --py tensorflow_model_analysis --sys-prefix
# ```
# + [markdown] id="LZj-impiAD_l"
# ## Install TensorFlow Model Analysis (TFMA)
#
# This will pull in all the dependencies, and will take a minute.
#
# **Note to ensure all the dependencies are installed properly, you may need to re-run this install step multiple times before there are no errors.**
# + id="SA2E343NAMRF"
# This setup was tested with TF 2.3 and TFMA 0.24 (using colab), but it should
# also work with the latest release.
import sys
# Confirm that we're using Python 3
assert sys.version_info.major==3, 'This notebook must be run using Python 3.'
print('Installing TensorFlow')
import tensorflow as tf
print('TF version: {}'.format(tf.__version__))
print('Installing Tensorflow Model Analysis and Dependencies')
# !pip install -q tensorflow_model_analysis
import apache_beam as beam
print('Beam version: {}'.format(beam.__version__))
import tensorflow_model_analysis as tfma
print('TFMA version: {}'.format(tfma.__version__))
# + [markdown] id="_aD7n5eECydb"
# **NOTE: The output above should be clear of errors before proceeding. Re-run the install if you are still seeing errors. Also, make sure to restart the runtime/kernel before moving to the next step.**
# + [markdown] id="RptgLn2RYuK3"
# ## Load The Files
# We'll download a tar file that has everything we need. That includes:
#
# * Training and evaluation datasets
# * Data schema
# * Training and serving saved models (keras and estimator) and eval saved models (estimator).
# + id="K4QXVIM7iglN"
# Download the tar file from GCP and extract it
import io, os, tempfile
TAR_NAME = 'saved_models-2.2'
BASE_DIR = tempfile.mkdtemp()
DATA_DIR = os.path.join(BASE_DIR, TAR_NAME, 'data')
MODELS_DIR = os.path.join(BASE_DIR, TAR_NAME, 'models')
SCHEMA = os.path.join(BASE_DIR, TAR_NAME, 'schema.pbtxt')
OUTPUT_DIR = os.path.join(BASE_DIR, 'output')
# !curl -O https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/{TAR_NAME}.tar
# !tar xf {TAR_NAME}.tar
# !mv {TAR_NAME} {BASE_DIR}
# !rm {TAR_NAME}.tar
print("Here's what we downloaded:")
# !ls -R {BASE_DIR}
# + [markdown] id="_xa7ZDV1MycO"
# ## Parse the Schema
#
# Among the things we downloaded was a schema for our data that was created by [TensorFlow Data Validation](https://www.tensorflow.org/tfx/data_validation/). Let's parse that now so that we can use it with TFMA.
# + id="uW5eB4TPcwFw"
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow.core.example import example_pb2
schema = schema_pb2.Schema()
contents = file_io.read_file_to_string(SCHEMA)
schema = text_format.Parse(contents, schema)
# + [markdown] id="UP3yuJxfNXRL"
# ## Use the Schema to Create TFRecords
#
# We need to give TFMA access to our dataset, so let's create a TFRecords file. We can use our schema to create it, since it gives us the correct type for each feature.
# + id="8-wud3fPczl6"
import csv
datafile = os.path.join(DATA_DIR, 'eval', 'data.csv')
reader = csv.DictReader(open(datafile, 'r'))
examples = []
for line in reader:
example = example_pb2.Example()
for feature in schema.feature:
key = feature.name
if feature.type == schema_pb2.FLOAT:
example.features.feature[key].float_list.value[:] = (
[float(line[key])] if len(line[key]) > 0 else [])
elif feature.type == schema_pb2.INT:
example.features.feature[key].int64_list.value[:] = (
[int(line[key])] if len(line[key]) > 0 else [])
elif feature.type == schema_pb2.BYTES:
example.features.feature[key].bytes_list.value[:] = (
[line[key].encode('utf8')] if len(line[key]) > 0 else [])
# Add a new column 'big_tipper' that indicates if tips was > 20% of the fare.
# TODO(b/157064428): Remove after label transformation is supported for Keras.
big_tipper = float(line['tips']) > float(line['fare']) * 0.2
example.features.feature['big_tipper'].float_list.value[:] = [big_tipper]
examples.append(example)
tfrecord_file = os.path.join(BASE_DIR, 'train_data.rio')
with tf.io.TFRecordWriter(tfrecord_file) as writer:
for example in examples:
writer.write(example.SerializeToString())
# !ls {tfrecord_file}
# + [markdown] id="fp8Ub7GTXH3j"
# ## Setup and Run TFMA
#
# TFMA supports a number of different model types including TF keras models, models based on generic TF2 signature APIs, as well TF estimator based models. The [get_started](https://www.tensorflow.org/tfx/model_analysis/get_started) guide has the full list of model types supported and any restrictions. For this example we are going to show how to configure a keras based model as well as an estimator based model that was saved as an [`EvalSavedModel`](https://www.tensorflow.org/tfx/model_analysis/eval_saved_model). See the [FAQ](https://www.tensorflow.org/tfx/model_analysis/faq) for examples of other configurations.
#
# TFMA provides support for calculating metrics that were used at training time (i.e. built-in metrics) as well metrics defined after the model was saved as part of the TFMA configuration settings. For our keras [setup](https://www.tensorflow.org/tfx/model_analysis/setup) we will demonstrate adding our metrics and plots manually as part of our configuration (see the [metrics](https://www.tensorflow.org/tfx/model_analysis/metrics) guide for information on the metrics and plots that are supported). For the estimator setup we will use the built-in metrics that were saved with the model. Our setups also include a number of slicing specs which are discussed in more detail in the following sections.
#
# After creating a [`tfma.EvalConfig`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalConfig) and [`tfma.EvalSharedModel`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalSharedModel) we can then run TFMA using [`tfma.run_model_analysis`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/run_model_analysis). This will create a [`tfma.EvalResult`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult) which we can use later for rendering our metrics and plots.
# + [markdown] id="qgC7NdCatT8y"
# ### Keras
# + id="PLJxcjpjfwkx"
import tensorflow_model_analysis as tfma
# Setup tfma.EvalConfig settings
keras_eval_config = text_format.Parse("""
## Model information
model_specs {
# For keras (and serving models) we need to add a `label_key`.
label_key: "big_tipper"
}
## Post training metric information. These will be merged with any built-in
## metrics from training.
metrics_specs {
metrics { class_name: "ExampleCount" }
metrics { class_name: "BinaryAccuracy" }
metrics { class_name: "BinaryCrossentropy" }
metrics { class_name: "AUC" }
metrics { class_name: "AUCPrecisionRecall" }
metrics { class_name: "Precision" }
metrics { class_name: "Recall" }
metrics { class_name: "MeanLabel" }
metrics { class_name: "MeanPrediction" }
metrics { class_name: "Calibration" }
metrics { class_name: "CalibrationPlot" }
metrics { class_name: "ConfusionMatrixPlot" }
# ... add additional metrics and plots ...
}
## Slicing information
slicing_specs {} # overall slice
slicing_specs {
feature_keys: ["trip_start_hour"]
}
slicing_specs {
feature_keys: ["trip_start_day"]
}
slicing_specs {
feature_values: {
key: "trip_start_month"
value: "1"
}
}
slicing_specs {
feature_keys: ["trip_start_hour", "trip_start_day"]
}
""", tfma.EvalConfig())
# Create a tfma.EvalSharedModel that points at our keras model.
keras_model_path = os.path.join(MODELS_DIR, 'keras', '2')
keras_eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=keras_model_path,
eval_config=keras_eval_config)
keras_output_path = os.path.join(OUTPUT_DIR, 'keras')
# Run TFMA
keras_eval_result = tfma.run_model_analysis(
eval_shared_model=keras_eval_shared_model,
eval_config=keras_eval_config,
data_location=tfrecord_file,
output_path=keras_output_path)
# + [markdown] id="hMtoi_FpthQL"
# ### Estimator
# + id="6MJg42JVtjjj"
import tensorflow_model_analysis as tfma
# Setup tfma.EvalConfig settings
estimator_eval_config = text_format.Parse("""
## Model information
model_specs {
# To use EvalSavedModel set `signature_name` to "eval".
signature_name: "eval"
}
## Post training metric information. These will be merged with any built-in
## metrics from training.
metrics_specs {
metrics { class_name: "ConfusionMatrixPlot" }
# ... add additional metrics and plots ...
}
## Slicing information
slicing_specs {} # overall slice
slicing_specs {
feature_keys: ["trip_start_hour"]
}
slicing_specs {
feature_keys: ["trip_start_day"]
}
slicing_specs {
feature_values: {
key: "trip_start_month"
value: "1"
}
}
slicing_specs {
feature_keys: ["trip_start_hour", "trip_start_day"]
}
""", tfma.EvalConfig())
# Create a tfma.EvalSharedModel that points at our eval saved model.
estimator_base_model_path = os.path.join(
MODELS_DIR, 'estimator', 'eval_model_dir')
estimator_model_path = os.path.join(
estimator_base_model_path, os.listdir(estimator_base_model_path)[0])
estimator_eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=estimator_model_path,
eval_config=estimator_eval_config)
estimator_output_path = os.path.join(OUTPUT_DIR, 'estimator')
# Run TFMA
estimator_eval_result = tfma.run_model_analysis(
eval_shared_model=estimator_eval_shared_model,
eval_config=estimator_eval_config,
data_location=tfrecord_file,
output_path=estimator_output_path)
# + [markdown] id="A0khNBC9FlEO"
# ## Visualizing Metrics and Plots
#
# Now that we've run the evaluation, let's take a look at our visualizations using TFMA. For the following examples, we will visualize the results from running the evaluation on the keras model. To view the estimator based model update the `eval_result` to point at our `estimator_eval_result` variable.
# + id="XFY0BqGtGkJ0"
eval_result = keras_eval_result
# eval_result = estimator_eval_result
# + [markdown] id="cSl9qyTCbBKR"
# ### Rendering Metrics
#
# To view metrics you use [`tfma.view.render_slicing_metrics`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/view/render_slicing_metrics)
#
# By default the views will display the `Overall` slice. To view a particular slice you can either use the name of the column (by setting `slicing_column`) or provide a [`tfma.SlicingSpec`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/SlicingSpec).
#
# The metrics visualization supports the following interactions:
#
# * Click and drag to pan
# * Scroll to zoom
# * Right click to reset the view
# * Hover over the desired data point to see more details.
# * Select from four different types of views using the selections at the bottom.
#
# For example, we'll be setting `slicing_column` to look at the `trip_start_hour` feature from our previous `slicing_specs`.
# + id="hJ5_UMnWYmaE"
tfma.view.render_slicing_metrics(eval_result, slicing_column='trip_start_hour')
# + [markdown] id="LJuxvGCpn4yF"
# ### Slices Overview
#
# The default visualization is the **Slices Overview** when the number of slices is small. It shows the values of metrics for each slice. Since we've selected `trip_start_hour` above, it's showing us metrics like accuracy and AUC for each hour, which allows us to look for issues that are specific to some hours and not others.
#
# In the visualization above:
#
# * Try sorting the feature column, which is our `trip_start_hours` feature, by clicking on the column header
# * Try sorting by precision, and **notice that the precision for some of the hours with examples is 0, which may indicate a problem**
#
# The chart also allows us to select and display different metrics in our slices.
#
# * Try selecting different metrics from the "Show" menu
# * Try selecting recall in the "Show" menu, and **notice that the recall for some of the hours with examples is 0, which may indicate a problem**
#
# It is also possible to set a threshold to filter out slices with smaller numbers of examples, or "weights". You can type a minimum number of examples, or use the slider.
# + [markdown] id="cQT-1Ckcnd_7"
# ### Metrics Histogram
#
# This view also supports a **Metrics Histogram** as an alternative visualization, which is also the default view when the number of slices is large. The results will be divided into buckets and the number of slices / total weights / both can be visualized. Columns can be sorted by clicking on the column header. Slices with small weights can be filtered out by setting the threshold. Further filtering can be applied by dragging the grey band. To reset the range, double click the band. Filtering can also be used to remove outliers in the visualization and the metrics tables. Click the gear icon to switch to a logarithmic scale instead of a linear scale.
#
# * Try selecting "Metrics Histogram" in the Visualization menu
#
# + [markdown] id="hSnqI6Esb1XM"
# ### More Slices
#
# Our initial `tfma.EvalConfig` created a whole list of `slicing_specs`, which we can visualize by updating slice information passed to `tfma.view.render_slicing_metrics`. Here we'll select the `trip_start_day` slice (days of the week). **Try changing the `trip_start_day` to `trip_start_month` and rendering again to examine different slices.**
# + id="355wqvY3yBod"
tfma.view.render_slicing_metrics(eval_result, slicing_column='trip_start_day')
# + [markdown] id="PsXM0NYGeajg"
# TFMA also supports creating feature crosses to analyze combinations of features. Our original settings created a cross `trip_start_hour` and `trip_start_day`:
# + id="k7vbFS1Me1SH"
tfma.view.render_slicing_metrics(
eval_result,
slicing_spec=tfma.SlicingSpec(
feature_keys=['trip_start_hour', 'trip_start_day']))
# + [markdown] id="GmeODqrUfJw2"
# Crossing the two columns creates a lot of combinations! Let's narrow down our cross to only look at **trips that start at noon**. Then let's select `binary_accuracy` from the visualization:
# + id="kdvBNfcHfRWg"
tfma.view.render_slicing_metrics(
eval_result,
slicing_spec=tfma.SlicingSpec(
feature_keys=['trip_start_day'], feature_values={'trip_start_hour': '12'}))
# + [markdown] id="f8acksU33KMm"
# ### Rendering Plots
#
# Any plots that were added to the `tfma.EvalConfig` as post training `metric_specs` can be displayed using [`tfma.view.render_plot`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/view/render_plot).
#
# As with metrics, plots can be viewed by slice. Unlike metrics, only plots for a particular slice value can be displayed so the `tfma.SlicingSpec` must be used and it must specify both a slice feature name and value. If no slice is provided then the plots for the `Overall` slice is used.
#
# In the example below we are displaying the `CalibrationPlot` and `ConfusionMatrixPlot` plots that were computed for the `trip_start_hour:1` slice.
# + id="X4TCKjGw3S-a"
tfma.view.render_plot(
eval_result,
tfma.SlicingSpec(feature_values={'trip_start_hour': '1'}))
# + [markdown] id="meRvFkKcPbux"
# ## Tracking Model Performance Over Time
#
# Your training dataset will be used for training your model, and will hopefully be representative of your test dataset and the data that will be sent to your model in production. However, while the data in inference requests may remain the same as your training data, in many cases it will start to change enough so that the performance of your model will change.
#
# That means that you need to monitor and measure your model's performance on an ongoing basis, so that you can be aware of and react to changes. Let's take a look at how TFMA can help.
#
# Let's load 3 different model runs and use TFMA to see how they compare using [`render_time_series`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/view/render_time_series).
# + id="zJYUOjmFfuPy"
# Note this re-uses the EvalConfig from the keras setup.
# Run eval on each saved model
output_paths = []
for i in range(3):
# Create a tfma.EvalSharedModel that points at our saved model.
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=os.path.join(MODELS_DIR, 'keras', str(i)),
eval_config=keras_eval_config)
output_path = os.path.join(OUTPUT_DIR, 'time_series', str(i))
output_paths.append(output_path)
# Run TFMA
tfma.run_model_analysis(eval_shared_model=eval_shared_model,
eval_config=keras_eval_config,
data_location=tfrecord_file,
output_path=output_path)
# + [markdown] id="RsO-gqCRK0ar"
# First, we'll imagine that we've trained and deployed our model yesterday, and now we want to see how it's doing on the new data coming in today. The visualization will start by displaying AUC. From the UI you can:
#
# * Add other metrics using the "Add metric series" menu.
# * Close unwanted graphs by clicking on x
# * Hover over data points (the ends of line segments in the graph) to get more details
#
# Note: In the metric series charts the X axis is the model directory name of the model run that you're examining. These names themselves are not meaningful.
# + id="KjEws8T0cDm9"
eval_results_from_disk = tfma.load_eval_results(output_paths[:2])
tfma.view.render_time_series(eval_results_from_disk)
# + [markdown] id="EQ7kZxESN9Bx"
# Now we'll imagine that another day has passed and we want to see how it's doing on the new data coming in today, compared to the previous two days:
# + id="VjQmlXMmLwHf"
eval_results_from_disk = tfma.load_eval_results(output_paths)
tfma.view.render_time_series(eval_results_from_disk)
# + [markdown] id="N1jpShgQxlVL"
# ## Model Validation
#
# TFMA can be configured to evaluate multiple models at the same time. Typically this is done to compare a new model against a baseline (such as the currently serving model) to determine what the performance differences in metrics (e.g. AUC, etc) are relative to the baseline. When [thresholds](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/MetricThreshold) are configured, TFMA will produce a [`tfma.ValidationResult`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/ValidationResult) record indicating whether the performance matches expecations.
#
# Let's re-configure our keras evaluation to compare two models: a candidate and a baseline. We will also validate the candidate's performance against the baseline by setting a [`tmfa.MetricThreshold`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/MetricThreshold) on the AUC metric.
# + id="kkatdR6Y1-4G"
# Setup tfma.EvalConfig setting
eval_config_with_thresholds = text_format.Parse("""
## Model information
model_specs {
name: "candidate"
# For keras we need to add a `label_key`.
label_key: "big_tipper"
}
model_specs {
name: "baseline"
# For keras we need to add a `label_key`.
label_key: "big_tipper"
is_baseline: true
}
## Post training metric information
metrics_specs {
metrics { class_name: "ExampleCount" }
metrics { class_name: "BinaryAccuracy" }
metrics { class_name: "BinaryCrossentropy" }
metrics {
class_name: "AUC"
threshold {
# Ensure that AUC is always > 0.9
value_threshold {
lower_bound { value: 0.9 }
}
# Ensure that AUC does not drop by more than a small epsilon
# e.g. (candidate - baseline) > -1e-10 or candidate > baseline - 1e-10
change_threshold {
direction: HIGHER_IS_BETTER
absolute { value: -1e-10 }
}
}
}
metrics { class_name: "AUCPrecisionRecall" }
metrics { class_name: "Precision" }
metrics { class_name: "Recall" }
metrics { class_name: "MeanLabel" }
metrics { class_name: "MeanPrediction" }
metrics { class_name: "Calibration" }
metrics { class_name: "CalibrationPlot" }
metrics { class_name: "ConfusionMatrixPlot" }
# ... add additional metrics and plots ...
}
## Slicing information
slicing_specs {} # overall slice
slicing_specs {
feature_keys: ["trip_start_hour"]
}
slicing_specs {
feature_keys: ["trip_start_day"]
}
slicing_specs {
feature_keys: ["trip_start_month"]
}
slicing_specs {
feature_keys: ["trip_start_hour", "trip_start_day"]
}
""", tfma.EvalConfig())
# Create tfma.EvalSharedModels that point at our keras models.
candidate_model_path = os.path.join(MODELS_DIR, 'keras', '2')
baseline_model_path = os.path.join(MODELS_DIR, 'keras', '1')
eval_shared_models = [
tfma.default_eval_shared_model(
model_name=tfma.CANDIDATE_KEY,
eval_saved_model_path=candidate_model_path,
eval_config=eval_config_with_thresholds),
tfma.default_eval_shared_model(
model_name=tfma.BASELINE_KEY,
eval_saved_model_path=baseline_model_path,
eval_config=eval_config_with_thresholds),
]
validation_output_path = os.path.join(OUTPUT_DIR, 'validation')
# Run TFMA
eval_result_with_validation = tfma.run_model_analysis(
eval_shared_models,
eval_config=eval_config_with_thresholds,
data_location=tfrecord_file,
output_path=validation_output_path,
schema=schema)
# + [markdown] id="siF6npd3IfJq"
# When running evaluations with one or more models against a baseline, TFMA automatically adds diff metrics for all the metrics computed during the evaluation. These metrics are named after the corresponding metric but with `_diff` appended to the metric name.
#
# Let's take a look at the metrics produced by our run:
# + id="yGIw9TDuJ7wn"
tfma.view.render_time_series(eval_result_with_validation)
# + [markdown] id="JIsehm_V4oKU"
# Now let's look at the output from our validation checks. To view the validation results we use [`tfma.load_validator_result`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/load_validation_result). For our example, the validation fails because AUC is below the threshold.
# + id="48EdSTUW5eE1"
validation_result = tfma.load_validation_result(validation_output_path)
print(validation_result.validation_ok)
# + [markdown] id="tghWegsjhpkt"
# # Copyright © 2020 The TensorFlow Authors.
# + cellView="form" id="rSGJWC5biBiG"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="tvsmelXGasty"
# Note: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.
| site/en-snapshot/tfx/tutorials/model_analysis/tfma_basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kalz2q/mycolabnotebooks/blob/master/arabictable.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="gvDnC4VlEOyb" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="31030ee2-6f3d-48e5-db68-1cdb71dad106"
#@title
# %%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
font-size: 0.8rem;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
th {
background-color: rgb(235, 235, 235);
}
td {
text-align: center;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
caption {
padding: 10px;
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
tr td:nth-child(4) {
color: blue;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "a-s-salaamu",
"kana": "アッサラーム",
"arabic": "اَلسَّلَامُ",
"meaning": "peace"
},
{
"latin": "2alaykum",
"kana": "アライクム",
"arabic": "عَلَيْكُمْ",
"meaning": "on you"
},
{
"latin": "SabaaH",
"kana": "すぁバーふ",
"arabic": "صَبَاح",
"meaning": "morning"
},
{
"latin": "marHaban",
"kana": "マるはバン",
"arabic": "مَرْحَبًا",
"meaning": "Hello"
},
{
"latin": "2anaa bikhayr",
"kana": "アナー ビクハイる",
"arabic": "أَنا بِخَيْر",
"meaning": "I'm fine"
},
{
"latin": "kabir",
"kana": "カビーる",
"arabic": "كَبير",
"meaning": "large, big"
},
{
"latin": "kabiira",
"kana": "カビーら",
"arabic": "كبيرة",
"meaning": "large, big"
},
{
"latin": "siith",
"kana": "スィース",
"arabic": "سيث",
"meaning": "Seth (male name)"
},
{
"latin": "baluuza",
"kana": "バルーザ",
"arabic": "بَلوزة",
"meaning": "blouse"
},
{
"latin": "tii shiirt",
"kana": "ティー シイールト",
"arabic": "تي شيرْت",
"meaning": "T-shirt"
},
{
"latin": "ma3Taf",
"kana": "マあタフ",
"arabic": "معْطَف",
"meaning": "a coat"
},
{
"latin": "riim",
"kana": "リーム",
"arabic": "ريم",
"meaning": "Reem (male name)"
},
{
"latin": "tan-nuura",
"kana": "タンヌーら",
"arabic": "تَنّورة",
"meaning": "a skirt"
},
{
"latin": "jadiid",
"kana": "ジャディード",
"arabic": "جَديد",
"meaning": "new"
},
{
"latin": "wishshaaH",
"kana": "ウィシャーハ",
"arabic": "وِشَاح",
"meaning": "a scarf"
},
{
"latin": "juudii",
"kana": "ジューディー",
"arabic": "جودي",
"meaning": "Judy(name)"
},
{
"latin": "jamiil",
"kana": "ジャミール",
"arabic": "جَميل",
"meaning": "good, nice, pretty, beautiful"
},
{
"latin": "kalb",
"kana": "キャルブ",
"arabic": "كَلْب",
"meaning": "a dog"
},
{
"latin": "2abyaD",
"kana": "アビヤッド",
"arabic": "أَبْيَض",
"meaning": "white"
},
{
"latin": "qub-b3a",
"kana": "クッバあ",
"arabic": "قُبَّعة",
"meaning": "a hat"
},
{
"latin": "muruu2a",
"kana": "ムるーア",
"arabic": "مْروءة",
"meaning": "chivalry"
},
{
"latin": "Taawila",
"kana": "たーウィラ",
"arabic": "طاوِلة",
"meaning": "a table"
},
{
"latin": "haadhihi madiina qadiima",
"kana": "ハーじヒ マディーナ かディーマ",
"arabic": "هَذِهِ مَدينة قَديمة",
"meaning": "This is an ancient city"
},
{
"latin": "haadhihi binaaya jamiila",
"kana": "ハーじヒ ビナーヤ ジャミーラ",
"arabic": "هَذِهِ بِناية جَميلة",
"meaning": "This is a beautiful building"
},
{
"latin": "hadhaa muHammad",
"kana": "ハーざー ムはンマド",
"arabic": "هَذا مُحَمَّد",
"meaning": "This is Mohammed"
},
{
"latin": "haadhihi Hadiiqa jamiila",
"kana": "ハーじヒ はディーか ジャミーラ",
"arabic": "هَذِهِ حَديقة جَميلة",
"meaning": "This is a pretty garden"
},
{
"latin": "haadhihi Hadiiqa qadiima",
"kana": "ハーじヒ はディーか カディーマ",
"arabic": "هَذِهِ حَديقة قَديمة",
"meaning": "This is an old garden"
},
{
"latin": "al-Haa2iT",
"kana": "アルはーイと",
"arabic": "الْحائط",
"meaning": "the wall"
},
{
"latin": "Haa2iT",
"kana": "はーイと",
"arabic": "حائِط",
"meaning": "wall"
},
{
"latin": "hadhaa al-Haa2iT kabiir",
"kana": "ハーざ ル はーイと カビーる",
"arabic": "هَذا الْحائِط كَبير",
"meaning": "this wall is big"
},
{
"latin": "al-kalb",
"kana": "アル カルブ",
"arabic": "الْكَلْب",
"meaning": "the dog"
},
{
"latin": "haadhihi al-binaaya",
"kana": "ハーじヒ アル ビナーヤ",
"arabic": "هذِهِ الْبِناية",
"meaning": "this building"
},
{
"latin": "al-ghurfa",
"kana": "アル グるファ",
"arabic": "اَلْغُرفة",
"meaning": "the room"
},
{
"latin": "al-ghurfa kabiira",
"kana": "アルグるファ カビーら",
"arabic": "اَلْغرْفة كَبيرة",
"meaning": "Theroom is big"
},
{
"latin": "haadhihi alghurfa kabiira",
"kana": "ハーじヒ アルグるファ カビーら",
"arabic": "هَذِهِ الْغُرْفة كَبيرة",
"meaning": "this room is big"
},
{
"latin": "hadhaa alkalb kalbii",
"kana": "ハーざー アルカルブ カルビー",
"arabic": "هَذا الْكَلْب كَْلبي",
"meaning": "this dog is my dog"
},
{
"latin": "hadhaa alkalb jaw3aan",
"kana": "ハーざー アルカルブ ジャウあーン",
"arabic": "هَذا الْكَلْب جَوْعان",
"meaning": "this dog is hungry"
},
{
"latin": "haadhihi al-binaaya waasi3a",
"kana": "ハーじヒ アルビナーヤ ワースィあ",
"arabic": "هَذِهِ الْبناية واسِعة",
"meaning": "this building is spacious"
},
{
"latin": "al-kalb ghariib",
"kana": "アルカルブ ガりーブ",
"arabic": "اَلْكَلْب غَريب",
"meaning": "The dog is weird"
},
{
"latin": "alkalb kalbii",
"kana": "アルカルブ カルビー",
"arabic": "اَلْكَلْب كَلْبي",
"meaning": "The dog is my dog"
},
{
"latin": "hunaak",
"kana": "フゥナーク",
"arabic": "هُناك",
"meaning": "there"
},
{
"latin": "hunaak bayt",
"kana": "フゥナーク バイト",
"arabic": "هُناك بَيْت",
"meaning": "There is a house"
},
{
"latin": "al-bayt hunaak",
"kana": "アル バイト フゥナーク",
"arabic": "اَلْبَيْت هُناك",
"meaning": "The house is there"
},
{
"latin": "hunaak wishaaH 2abyaD",
"kana": "フゥナーク ウィシャーハ アビヤド",
"arabic": "هُناك وِشاح أبْيَض",
"meaning": "There is a white scarf"
},
{
"latin": "alkalb munaak",
"kana": "アルカルブ ムナーク",
"arabic": "اَلْكَلْب مُناك",
"meaning": "The dog is there"
},
{
"latin": "fii shanTatii",
"kana": "フィー シャンたティー",
"arabic": "في شَنْطَتي",
"meaning": "in my bag"
},
{
"latin": "hal 3indak maHfaDha yaa juurj",
"kana": "ハル あインダク マはファづぁ ヤー ジューるジ",
"arabic": "هَل عِنْدَك مَحْفَظة يا جورْج",
"meaning": "do you have a wallet , george"
},
{
"latin": "3indii shanTa ghaalya",
"kana": "あインディー シャンた ガーリヤ",
"arabic": "عِنْدي شَنْطة غالْية",
"meaning": "I have an expensive bag"
},
{
"latin": "shanTatii fii shanTatik ya raanyaa",
"kana": "シャンたティー フィー シャンたティク ヤー ラーニヤー",
"arabic": "شِنْطَتي في شَنْطتِك يا رانْيا",
"meaning": "my bag is in your bag rania"
},
{
"latin": "huunak maHfaDha Saghiira",
"kana": "フゥナーク マはファざ すぁギーら",
"arabic": "هُناك مَحْفَظة صَغيرة",
"meaning": "There is a small wallet"
},
{
"latin": "hunaak kitaab jadiid",
"kana": "フゥナーク キターブ ジャディード",
"arabic": "هَناك كِتاب جَديد",
"meaning": "There is a new book"
},
{
"latin": "hunaak kitaab Saghiir",
"kana": "フゥナーク キターブ すぁギーる",
"arabic": "هُناك كِتاب صَغير",
"meaning": "There is a small book"
},
{
"latin": "hunaak qubb3a fii shanTatak yaa buub",
"kana": "フゥナーク クッバあ フィー シャンたタク ヤー ブーブ",
"arabic": "هُناك قُبَّعة في شَنْطَتَك يا بوب",
"meaning": "There is a hat in your bag bob"
},
{
"latin": "hunaak shanTa Saghiira",
"kana": "フゥナーク シャンた すぁギーら",
"arabic": "هُناك شَنْطة صَغيرة",
"meaning": "There is a small bag"
},
{
"latin": "shanTatii hunaak",
"kana": "シャンたティー フゥナーク",
"arabic": "شَنْطَتي هُناك",
"meaning": "my bag is over there"
},
{
"latin": "hunaak kitaab Saghiir wa-wishaaH kabiir fii ShanTatii",
"kana": "フゥナーク キターブ すぁギーる ワ ウィシャーは カビーる フィー シャンたティー",
"arabic": "هُناك كِتاب صَغير وَوِشاح كَبير في شَنْطَتي",
"meaning": "There is a small book and a big scarf in my bag"
},
{
"latin": "hunaak maHfaDha Saghiir fii shanTatii",
"kana": "フゥナーク マはファざ すぁギーる フィー シャンたティー",
"arabic": "هُناك مَحْفَظة صَغيرة في شَنْطَتي",
"meaning": "There is a small wallet in my bag"
},
{
"latin": "aljaami3a hunaak",
"kana": "アルジャーミあ フゥナーク",
"arabic": "اَلْجامِعة هُناك",
"meaning": "The university is there"
},
{
"latin": "hunaak kitaab",
"kana": "フゥナーク キターブ",
"arabic": "هُناك كِتاب",
"meaning": "There is a book"
},
{
"latin": "al-madiina hunaak",
"kana": "アルマディーナ フゥナーク",
"arabic": "اَلْمَدينة هُناك",
"meaning": "Thecity is there"
},
{
"latin": "hal 3indik shanTa ghaaliya yaa Riim",
"kana": "ハル あインディク シャンた ガーリヤ ヤー りーム",
"arabic": "هَل عِندِك شَنْطة غالْية يا ريم",
"meaning": "do you have an expensive bag Reem"
},
{
"latin": "hal 3indik mashruub yaa saamya",
"kana": "ハル あインディク マシュるーブ ヤー サーミヤ",
"arabic": "هَل عِنْدِك مَشْروب يا سامْية",
"meaning": "do you have a drink samia"
},
{
"latin": "hunaak daftar rakhiiS",
"kana": "フゥナーク ダフタる らクヒーすぅ",
"arabic": "هُناك دَفْتَر رَخيص",
"meaning": "There is a cheep notebook"
},
{
"latin": "laysa 3indii daftar",
"kana": "ライサ あインディー ダフタる",
"arabic": "لَيْسَ عِنْدي دَفْتَر",
"meaning": "I do not have a notebook"
},
{
"latin": "laysa hunaak masharuub fii shanTatii",
"kana": "ライサ フゥナーク マシャるーブ フィー シャンたティー",
"arabic": "لَيْسَ هُناك مَشْروب في شَنْطَتي",
"meaning": "There is no drink in my bag"
},
{
"latin": "laysa hunaak kitaab qaSiir fii baytii",
"kana": "ライサ フゥナーク キターブ カスィール フィー バイティ",
"arabic": "لَيْسَ هُناك كِتاب قَصير في بَيْتي",
"meaning": "There is no short book in my house"
},
{
"latin": "laysa hunaak daftar rakhiiS",
"kana": "ライサ フゥナーク ダフタる らクヒース",
"arabic": "لَيْسَ هُناك دَفْتَر رَخيص",
"meaning": "There is no cheap notebook"
},
{
"latin": "laysa 3indii sii dii",
"kana": "ライサ あインディー スィー ヂー",
"arabic": "لَيْسَ عَنْدي سي دي",
"meaning": "I do not have a CD"
},
{
"latin": "laysa hunaak qalam fii shanTatii",
"kana": "ライサ フゥナーク かラム フィー シャンたティー",
"arabic": "لَيْسَ هُناك قَلَم في شَنْطَتي",
"meaning": "There is no pen in my bag"
},
{
"latin": "laysa hunaak kitaab qaSiir fii shanTatii",
"kana": "ライサ フゥナーク キターブ かすぃーる フィー シャンたティー",
"arabic": "لَيْسَ هُناك كِتاب قَصير في شَنْطَتي",
"meaning": "There is no short book in my bag"
},
{
"latin": "laysa hunaak daftar 2abyaD",
"kana": "ライサ フゥナーク ダフタる アビヤど",
"arabic": "لَيْسَ هُناك دَفْتَر أَبْيَض",
"meaning": "There is no white notebook."
},
{
"latin": "maTbakh",
"kana": "マとバクフ",
"arabic": "مَطْبَخ",
"meaning": "a kitchen"
},
{
"latin": "3ilka",
"kana": "アイルカ",
"arabic": "عِلْكة",
"meaning": "chewing gum"
},
{
"latin": "miftaaH",
"kana": "ミフターフ",
"arabic": "مفْتاح",
"meaning": "a key"
},
{
"latin": "tuub",
"kana": "トゥーブ",
"arabic": "توب",
"meaning": "top"
},
{
"latin": "nuquud",
"kana": "ヌクード",
"arabic": "نُقود",
"meaning": "money, cash"
},
{
"latin": "aljazeera",
"kana": "アルジャズィーラ",
"arabic": "الجزيرة",
"meaning": "Al Jazeera"
},
{
"latin": "kursii",
"kana": "クるスィー",
"arabic": "كَرْسي",
"meaning": "a chair"
},
{
"latin": "sari3",
"kana": "サリア",
"arabic": "سَريع",
"meaning": "fast"
},
{
"latin": "Haasuub",
"kana": "はースーブ",
"arabic": "حاسوب",
"meaning": "a computer"
},
{
"latin": "maktab",
"kana": "マクタブ",
"arabic": "مَكْتَب",
"meaning": "office, desk"
},
{
"latin": "hadhaa maktab kabiir",
"kana": "ハーざー マクタブ カビーる",
"arabic": "هَذا مَِكْتَب كَبير",
"meaning": "This is a big office"
},
{
"latin": "kursii alqiTTa",
"kana": "クるスィー アルキッた",
"arabic": "كُرْسي الْقِطّة",
"meaning": "the cat's chair"
},
{
"latin": "Haasuub al-2ustaadha",
"kana": "はースーブ アル ウスターざ",
"arabic": "حاسوب اَلْأُسْتاذة",
"meaning": "professor's computer"
},
{
"latin": "kursii jadiid",
"kana": "クるスィー ジャディード",
"arabic": "كُرْسي جَديد",
"meaning": "a new chair"
},
{
"latin": "SaHiifa",
"kana": "すぁひーファ",
"arabic": "صَحيفة",
"meaning": "newspaper"
},
{
"latin": "haatif",
"kana": "ハーティフ",
"arabic": "هاتِف",
"meaning": "phone"
},
{
"latin": "2amriikiyy",
"kana": "アムりーキーイ",
"arabic": "أمْريكِي",
"meaning": "American"
}
];
</script>
<script>
// table要素を生成
var table = document.createElement("table");
// ヘッダーを作成
var tr = document.createElement("tr");
for (key in json[0]) {
// th要素を生成
var th = document.createElement("th");
// th要素内にテキストを追加
th.textContent = key;
// th要素をtr要素の子要素に追加
tr.appendChild(th);
}
// tr要素をtable要素の子要素に追加
table.appendChild(tr);
// テーブル本体を作成
for (var i = 0; i < json.length; i++) {
// tr要素を生成
var tr = document.createElement("tr");
// th・td部分のループ
for (key in json[0]) {
// td要素を生成
var td = document.createElement("td");
// td要素内にテキストを追加
td.textContent = json[i][key];
// td要素をtr要素の子要素に追加
tr.appendChild(td);
}
// tr要素をtable要素の子要素に追加
table.appendChild(tr);
}
// 生成したtable要素を追加する
document.getElementById("maintable").appendChild(table);
</script>
| arabictable.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Session 4: Data Structuring 3
#
# *<NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# ## Agenda
#
# In this session, we will continue our work with `pandas` and data structuring:
#
# - Missings and Duplicated Data
# - Combining Data Sets
# - Split-Apply-Combine
# - Rehaping Data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Recap
#
# - Series & DataFrames
# - Boolean Data
# - Numeric Operations and Methods
# - String Operations
# - Categorical Data
# - Time Series Data
# + [markdown] slideshow={"slide_type": "fragment"}
# *But I'm nowhere near an expert in data structuring 1 and 2 :( ...*
# + [markdown] slideshow={"slide_type": "fragment"}
# **Don't worry**
# - You have lot's of time to catch up!
# - Solutions will help you along the way
# - We teach you all the tool; but you only need some of them to in your exam
# + [markdown] slideshow={"slide_type": "slide"}
# ## Questions past week
# - Dictionaries in multiple layers
# - Naming index keys
# - Method chaining
# + [markdown] slideshow={"slide_type": "slide"}
# ### Dictionaries in Multiple Layers
#
# In the exercises, you encountered a problem with navigating in dictionaries with multiple layers. Why are these interesting?
# - It clearly illustrates how dictionaries are (not) related in structure
# - DataFrames have a matrix-like (2D) structure
# - Dictionaries have a non-sequential, 1D structure
# - In order to represent a df-like object with dictionaries, we must combine them in multiple layers
# - Data that come in the form of JSON will often have this structure
# - Potentially in many more layers
# + slideshow={"slide_type": "fragment"}
my_dict1 = {0: 432, 1: 654, 2: 776 , 3: 234, 4: 765}
my_dict2 = {0: 131, 1: 432, 2: 876 , 3: 432, 4: 174}
my_dict3 = {0: 222, 1: 654, 2: 23 , 3: 862, 4: 175}
my_d = {"first": my_dict1, "second": my_dict2, "third": my_dict3}
# -
my_d
my_d['first']
my_d['first'][2]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Naming Columns and Rows
#
# We can name columns. And we can name rows. Both are easy:
# + slideshow={"slide_type": "fragment"}
my_df = pd.DataFrame(np.arange(9).reshape(3,3))
my_df
# +
cols = []
rows = []
[rows.append(f'row{i + 1}') for i in range(3)] # you can also write ('row{a}'.format(a=i+1))
[cols.append('column'+str(i+1)) for i in range(3)]
my_df.columns, my_df.index = cols, rows
my_df
# + [markdown] slideshow={"slide_type": "slide"}
# ### Method Chaining
#
# Make multiple operations in one line! Be careful that your code remains readible...
# + slideshow={"slide_type": "-"}
my_df_new = my_df\
.rename(columns = {'column1': 'COL100', 'column2': 'COL200', 'column3': 'COL300'})\
.assign(my_sum = lambda my_df: my_df['COL100']*100+my_df['COL200']*10+my_df['COL300'])\
.sort_values(by='my_sum', ascending = False)\
.reset_index(drop=True)
my_df_new
# + [markdown] slideshow={"slide_type": "fragment"}
# What did that lambda function just do? Essentially, the idea is that it is a function that takes $\textbf{x}$ as input:
# -
func = lambda x1, x2, x3: x1+x2+x3
func(1,2,3)
# + slideshow={"slide_type": "-"}
# Loading packages
import numpy as np
import pandas as pd
import seaborn as sns
tips = sns.load_dataset('tips')
titanic = sns.load_dataset('titanic')
#import matplotlib.pyplot as plt
#import requests
# + [markdown] slideshow={"slide_type": "slide"}
# # Today: More Structuring :-)
#
# In practice, you are rarely handed some data that are just ready for analysis. In particular, it may suffer from:
# + [markdown] slideshow={"slide_type": "fragment"}
# - Presence of missings and/or duplicated values:
# - missing: Should we ignore? delete? replace (impute)? collect new data?
# - Depends on the context!
# - duplicated: Are they present? Should they be removed?
# - Depends on the context!
# + [markdown] slideshow={"slide_type": "fragment"}
# - Your data comes in separate portions that must be combined:
# - Vertical merge? Horizontal merge?
# - Inner or outer merge? On which keys?
# + [markdown] slideshow={"slide_type": "fragment"}
# - Your data may have the different shape:
# - What is a long format and a wide format?
# - How to shift between the two?
# + [markdown] slideshow={"slide_type": "fragment"}
# - You may need some aggregate pieces of information on different subgroups?
# - How to group-specific means, medians, variances, etc?
# + [markdown] slideshow={"slide_type": "slide"}
# # Missing Data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Representation of Missing Data
# *How can we think about missing data?*
#
# Missing data, i.e. empty observations:
# - In Python: `None`
# - In pandas: numpy's 'not a number' abbreviated with `NaN` or simply `nan`
# + [markdown] slideshow={"slide_type": "slide"}
# ## Missing Data in Pandas
# *What does a DataFrame with missing data look like*
# + slideshow={"slide_type": "-"}
nan_data = [[0,np.nan,1],\
[2,3,None],\
[4,5,6]]
nan_df = pd.DataFrame(nan_data, columns = ['A', 'B', 'C'])
nan_df
# + slideshow={"slide_type": "-"}
nan_df.isnull()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating Missings (1:2)
#
# *Would we ever want to create missings?*
# + [markdown] slideshow={"slide_type": "fragment"}
# In the ideal world, no. In practice, yes:
# - Sometimes, missings are coded to a numerical value.
# - These can be very influential in your data analysis if not uncoverred.
#
# Example:
# + slideshow={"slide_type": "-"}
nan_data2 = [[0,999999,1],\
[2,3,999999],\
[4,5,6]]
pd.DataFrame(nan_data2, columns = ['A', 'B', 'C'])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating Missings (2:2)
#
# The solutions is simple:
# + slideshow={"slide_type": "-"}
pd.DataFrame(nan_data2, columns = ['A', 'B', 'C']).replace(999999,np.NaN)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Imputing Missings
# *Are there ways to replace missings with values with pandas?*
# + [markdown] slideshow={"slide_type": "fragment"}
# As mentioned earlier, pandas does handle missings directly. See table 7-2 in PDA. Examples:
# + slideshow={"slide_type": "-"}
nan_df.fillna(method='bfill') # can also use 'ffill'
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dropping Missings
# *Are there also ways to simply drop missings with pandas?*
# + [markdown] slideshow={"slide_type": "fragment"}
# Yes, this is fortunately easy. Use the `.dropna()` feature in pandas:
# - Remember to drop along the right axis.
# + slideshow={"slide_type": "-"}
nan_df.dropna(axis=0) # , thresh=2
# + slideshow={"slide_type": "-"}
nan_df.dropna(axis=1)
# + [markdown] slideshow={"slide_type": "fragment"}
# *Note:* For practical purposes, you risk dropping too many observations here!
# + [markdown] slideshow={"slide_type": "slide"}
# # Duplicated Data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Duplicates: A Definition
# *What does it mean that there are duplicates in the data?*
#
# Slightly tricky to define:
# - More than one entry where there should in fact be only one.
# - If for a certain set of variables, a combination is repeated.
# + [markdown] slideshow={"slide_type": "fragment"}
# In practice, it requires some understanding of your data:
# - Two observations are identical: Is this a duplicate, or is it truly because 'occurance happened twice'?
# - In income registries from DK: One individual should not show up more than once per year in the data.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Handling Duplicates (1:2)
# *In practice, what could a duplicate look like?*
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's revisit our categorical education data:
# +
edu_list = ['BSc Political Science', 'Secondary School'] + ['High School']*2
edu_cats = ['Secondary School', 'High School', 'BSc Political Science']
str_ser = pd.Series(edu_list*10**5)
str_ser.head(6)
# + [markdown] slideshow={"slide_type": "fragment"}
# Now, let's see what is duplicated:
# -
str_ser.duplicated().head(6) # can also specify: ", keep='last'"
# + [markdown] slideshow={"slide_type": "slide"}
# ## Handling Duplicates (2:2)
# *How do we drop duplicates?*
# + [markdown] slideshow={"slide_type": "fragment"}
# Simply use the `.drop_duplicates()` method.
# -
str_ser.drop_duplicates()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Multiple Variables (1:2)
# *How do we drop duplicates when we have multiple variables (e.g. persons and years)?*
# + [markdown] slideshow={"slide_type": "fragment"}
# Simulate an additional binary variable for purposes of illustration:
# -
edu_df = pd.DataFrame({'edu': edu_list*10**5, 'num': np.round(np.random.rand(4*10**5),0)})
edu_df.head(10)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Multiple Variables (2:2)
# Now, specify the combination of columns for which we require unique values when dropping:
# -
edu_df.drop_duplicates(['edu', 'num'], keep='first')
# + [markdown] slideshow={"slide_type": "slide"}
# # Joining DataFrames
# + [markdown] slideshow={"slide_type": "slide"}
# ## Joining DataFrames
#
# Until now, we've worked with one DataFrame at a time:
# - with the exception of `concat` in Assignment 0
#
# We will now learn to put them together.
# + [markdown] slideshow={"slide_type": "fragment"}
# For the following, we use the multi column display from <NAME> [here](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html).
# + slideshow={"slide_type": "-"}
class display(object):
"""Display HTML representation of multiple objects"""
template = """<div style="float: left; padding: 10px;">
<p style='font-family:"Courier New", Courier, monospace'>{0}</p>{1}
</div>"""
def __init__(self, *args):
self.args = args
def _repr_html_(self):
return '\n'.join(self.template.format(a, eval(a)._repr_html_())
for a in self.args)
def __repr__(self):
return '\n\n'.join(a + '\n' + repr(eval(a))
for a in self.args)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Some DataFrames
# Let's make some data to play with. Here, use `dict` approach:
# + slideshow={"slide_type": "-"}
left = pd.DataFrame({'key': ['A', 'B', 'C', 'D'], 'value_left': range(4)})
right = pd.DataFrame({'key': ['C', 'D', 'E', 'F'], 'value_right': range(4,8)})
display('left', 'right')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Merging Data
# The forthcoming figures all follow this convention:
#
# - <font color="blue">blue</font>: rows in merge output
# - <font color="red">red</font>: rows excluded from output (i.e., removed)
# - <font color="green">green</font>: missing values replaced with NaNs
#
# We use `merge` which is pandas function and a method for dataframes.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Inner Merge (default)
# This merge only uses only *shared* keys
# + slideshow={"slide_type": "-"}
inner_merge = pd.merge(left, right, on='key', how='inner')
inner_merge
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://i.stack.imgur.com/YvuOa.png' alt="Drawing" style="width: 400px;"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Left Merge
# This merge uses only *left* keys
# + slideshow={"slide_type": "-"}
left_merge = pd.merge(left, right, on='key', how='left')
left_merge
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://i.stack.imgur.com/BECid.png' alt="Drawing" style="width: 400px;"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Right Merge
# This merge uses only *right* keys (pretty redundant...)
# + slideshow={"slide_type": "-"}
right_merge = pd.merge(left, right, on='key', how='right')
right_merge
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://i.stack.imgur.com/8w1US.png' alt="Drawing" style="width: 400px;"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Outer Merge
# This merge uses *all* keys
# + slideshow={"slide_type": "-"}
outer_merge = pd.merge(left, right, on='key', how='outer')
outer_merge
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://i.stack.imgur.com/euLoe.png' alt="Drawing" style="width: 400px;"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Overview of Merge Types
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://www.dofactory.com/Images/sql/sql-joins.png' alt="Drawing" style="width: 450px;"/></center>
#
# More merge type exists, see [this post](https://stackoverflow.com/questions/53645882/pandas-merging-101) for details.
# -
# + [markdown] slideshow={"slide_type": "slide"}
# ## Joining DataFrames
#
# We can also join by keys in the index. This is possible with `join` or `concat`.
#
# Additional properties of `concat`:
# - works vertically and horizontally.
# - works with multiple DataFrames at once;
#
# Requirement: overlapping index keys or column names.
# -
df0 = left.set_index('key')
df1 = right.set_index('key')
df0
# + [markdown] slideshow={"slide_type": "slide"}
# ## Horizontal Join
#
# Works like `merge` where _key_ is now the index!
# + slideshow={"slide_type": "-"}
inner_data = df0.join(df1, how='inner')
inner_data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Horizontal Join, Axis = 1
#
# `concat` on axis=1 acts similar to a outer join.
# + slideshow={"slide_type": "-"}
dfs = [df0, df1]
pd.concat(dfs, axis=1, sort=False)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Vertical Join, Axis = 0
#
# On the other hand, `concat` on axis=0 stacks the dataframes on top of each other!
# + slideshow={"slide_type": "-"}
pd.concat([df0, df1], join='outer', axis=0, sort=False)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Vertical and Horizontal
#
# An overview of `concat` operations (left: horizontal, right: vertical)
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://i.stack.imgur.com/1rb1R.jpg' alt="Drawing" style="width: 450px;"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example of Using Concat (1:3)
#
# Let's load some stock data...
# + slideshow={"slide_type": "-"}
import yfinance as yf
import matplotlib.pyplot as plt # For plotting
stocks = ['aapl', 'goog', 'msft', 'amzn', 'fb', 'tsla']
def load_stock(s):
return yf.download(s, data_source='yahoo', start='2000-01-01')['Adj Close']
stock_dfs = {s:load_stock(s) for s in stocks} # dictionary of all stock price
stock_df = pd.concat(stock_dfs, axis=1) # horizontal join
# -
stock_dfs
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example of Using Concat (2:3)
#
# What do the data look like?
# + slideshow={"slide_type": "-"}
stock_df.tail(5)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example of Using Concat (3:3)
#
# Now, let's try and visualize it (more about this in next session)!
# + slideshow={"slide_type": "-"}
ax = stock_df.plot(logy=True, figsize=(10,3))
ax.legend(["Apple", "Google","Microsoft", "Amazon","Facebook", "Tesla"], loc='best', ncol=2)
# + [markdown] slideshow={"slide_type": "slide"}
# # Split-Apply-Combine
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Definition
# *What is the split-apply-combine framework?*
# -
# A procedure to...
# 1. **split** a DataFrame into subsets of data,
# 2. **apply** certain functions (sorting, mean, other custom stuff), and
# 3. **combine** it back into a DataFrame
#
# Application example: compute mean personal income.
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Process $-$ an Overview
# -
# How do we *split* observations by x and *apply* the calculation mean of y?*
# + [markdown] slideshow={"slide_type": "-"}
# <center><img src='https://raw.githubusercontent.com/abjer/sds2017/master/slides/figures/split-apply-combine.png'></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Split-apply-combine in Pandas
#
# A powerful tool in DataFrames is the `groupby` method. Example:
# -
tips.head(5)
# + slideshow={"slide_type": "-"}
split_var = 'sex' # like x in figure
apply_var = 'total_bill' # like y in figure
tips.groupby(split_var)[apply_var].mean()
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is groupby?
# `groupby` creats a collection of sub-dataframes we can process.
#
# We can iterate over a groupby object. Example:
# + slideshow={"slide_type": "-"}
results = {}
for group, group_df in tips.groupby('sex'):
results[group] = group_df.total_bill.mean()
results
# -
pd.Series(results)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Other Methods and Functions in groupby
#
# Can other methods be applied?
# + [markdown] slideshow={"slide_type": "fragment"}
# - Yes: `mean`, `std`, `min`, `max` all work.
# - Using `.apply()` method and inserting your ***homemade*** function works too.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Additional variables groupby
# Does `groupby` work for multiple variables, functions?
# +
split_vars = ['sex', 'time']
apply_vars = ['total_bill', 'tip']
apply_fcts = ['median', 'mean', 'std']
combined = tips.groupby(split_vars)[apply_vars].agg(apply_fcts)
print(combined.reset_index())
# + [markdown] slideshow={"slide_type": "-"}
# Note grouping with multiple variables uses a [MultiIndex](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.MultiIndex.html) which we do not cover.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Maintaining the Structure
# *How do we get our `groupby` output into the original dataframe?*
#
# - Option 1: you merge it (be careful!)
#
# - Option 2: you use `transform`.
# -
tips['mu_sex'] = tips.groupby(split_vars)[apply_var].transform('mean')
tips.head(5)
tips['dev_sex'] = (tips.total_bill - tips.mu_sex)
tips.head(5)
# + [markdown] slideshow={"slide_type": "slide"}
# # Reshaping Data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Stacking Data (1:2)
# + [markdown] slideshow={"slide_type": "-"}
# A DataFrame can be collapsed into a Series with the **stack** command.
#
# Let's generate a simple example:
# + slideshow={"slide_type": "fragment"}
df = pd.DataFrame([[1,2],[3,4]],columns=['EU','US'],index=[2000,2010])
df
# + [markdown] slideshow={"slide_type": "fragment"}
# Now, stack the data:
# + slideshow={"slide_type": "fragment"}
stacked = df.stack() # going from wide to long format
stacked
# + [markdown] slideshow={"slide_type": "slide"}
# ## Stacking Data (2:2)
# + [markdown] slideshow={"slide_type": "-"}
# Clearly, the data now comes with **hierarchical indexing**; first year, then region.
#
# If we want, we can just reset the indexing:
# + slideshow={"slide_type": "fragment"}
stacked = stacked.reset_index()
stacked
# + [markdown] slideshow={"slide_type": "-"}
# And then do the renaming...
# -
stacked.columns = ['year', 'place', 'some_val']
stacked
# + [markdown] slideshow={"slide_type": "slide"}
# ## To Wide Format
# + [markdown] slideshow={"slide_type": "-"}
# We can easily transform a *long* DataFrame to *wide* with `unstack()`. Consider the example:
# -
stacked_new = df.stack() # going from wide to long format
stacked_new
# + [markdown] slideshow={"slide_type": "fragment"}
# And transform to wide using either the first or the second index:
# + slideshow={"slide_type": "-"}
stacked_new.unstack(level=0)
# + slideshow={"slide_type": "-"}
stacked_new.unstack(level=1)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Associated Readings
# + [markdown] slideshow={"slide_type": "fragment"}
# \[PDA, chapter 7: Revisited... \]
# + [markdown] slideshow={"slide_type": "fragment"}
# PDA, chapter 8:
# - Hierarchical indexing
# - Combining and merging data
# - Reshaping your data
# + [markdown] slideshow={"slide_type": "fragment"}
# PDA, chapter 10:
# - Introducing `groupby` for data aggregation
# - Different ways of undertaking data aggregation
# - Introducing the *split-apply-combine*-method
# + [markdown] slideshow={"slide_type": "fragment"}
# Wickham (2011) \[optional\]:
# - Another perspective on the *split-apply-combine*-method for data analysis
# - Technical, _R_-based.
# -
# ## session_4_exercises.ipynb
# Can be found on github today 16:00.
# - Method 1: sync your cloned repo
# - Method 2: download from git repo
#
# `Remember` to create a local copy of the notebook
| teaching_material/session_4/session_4_slides.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data model
#
# Compared to what is provided in most general-purpose programming languages, only a small set of abstract data types are needed for analysis (or high-level programming in general). Numbers, booleans, and fixed-size (rectangular) arrays of numbers and booleans are sufficient for data analysis in most fields of study. Particle physicists demonstrably need variable-length lists of numbers and booleans as well: interest in awkward-array has overwhelmingly focused on the `JaggedArray` class, which only provides this capability. For the intuitive notion of a "particle object," some sort of record type is needed as well. To manipulate sets without restriction, we also need heterogeneity, which can be expressed by a union type.
# ## Data types
#
# The following four type generators ("PLUR") provide a system of surprising generality:
#
# * **P**rimitive integers, floating-point numbers, booleans, and any fixed byte-width value (e.g. UUIDs, IP addresses, ...),
# * **L**ists of variable length but homogeneous type,
# * **U**nions of heterogeneous types, such as "electrons and muons" (with different fields in each), and
# * **R**ecords of named, typed fields (a.k.a. objects, structs, composites, classes...).
#
# For instance, JSON (with a schema) is a PLUR system, in which numbers, boolean, and `null` are the primitives, and strings are regarded as a special case of "lists of 1-byte characters." Protobuf, Avro, Thrift, Parquet, and Arrow are statically typed PLUR systems. Unions and records are the sum types and product types of [algebraic type theory](https://en.wikipedia.org/wiki/Algebraic_data_type), respectively. Only one thing from general-purpose programming might be missed by physicists:
#
# * **P**ointers between objects.
#
# However, we can add this as a fifth type generator ("PLURP") by allowing cross-references and circular references. In a PLUR system, data structures are trees with a maximum depth, limited by the type schema, but in a PLURP system, data structures may be arbitrarily deep trees or even graphs. Awkward-array is PLURP with extra features beyond just representing types.
#
# Data types in a general-purpose programming language can be constructed from the above if interpreted through the appropriate interfaces. For instance, an open file object is an integer that makes system calls, a linked list is a tree of records, and a hash-table is a list with hash-collision handling. PLURP provides a layer of abstraction between raw, serialized memory (e.g. the arrays and structs of the C programming language) and rarified types of a high-level language (e.g. classes with hidden implementations).
# ## Multi-paradigm columnar processing
#
# Awkward-array has been useful as a Numpy extension for particle physics, and I expect its role to increase. However, I don't think the array-programming paradigm is good for all problems. In fact, I'd like to provide three ways to perform computations on these same data structures:
#
# * array programming, in which the columnar nature of the arrays is visible and there is no index,
# * imperative programming in Numba, in which the columnar nature of the arrays is hidden—the user works with "lists" and "records" in compiled Python—and there is no index, and
# * declarative programming, in which the columnar nature of the arrays is hidden and there is an index to define identity for set operations.
#
# My goal is for the three paradigms to be usable on the same data structures without translation. For example, a physicist-user might apply a first transformation of their data as an array, then do something more complex in a Numba-compiled block, treating the records as Python objects (though Numba compiles their actions into array manipulations under the hood), then do something even more complex, relying on the identity of records in a way not possible in imperative programming, using a declarative language like PartiQL.
# ## Mini-awkward-array
#
# Rather than using awkward-array in this demo, I reimplemented a simple version of it so that relationships between the types and the index is more clear.
#
# This implementation has only four classes: `PrimitiveArray`, `ListArray`, `UnionArray`, and `RecordArray`. As in awkward-array, the data are stored in columns but may be thought of as nested objects.
# +
import data
events = data.RecordArray({
"muons": data.ListArray([0, 3, 3, 5], [3, 3, 5, 9], data.RecordArray({
"pt": data.PrimitiveArray([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]),
"iso": data.PrimitiveArray([0, 0, 100, 50, 30, 1, 2, 3, 4])
})),
"jets": data.ListArray([0, 5, 6, 8], [5, 6, 8, 12], data.RecordArray({
"pt": data.PrimitiveArray([1, 2, 3, 4, 5, 100, 30, 50, 1, 2, 3, 4]),
"mass": data.PrimitiveArray([10, 10, 10, 10, 10, 5, 15, 15, 9, 8, 7, 6])
})),
"met": data.PrimitiveArray([100, 200, 300, 400])
})
# -
# The semi-realistic event structure above is defined in its columnar representation—Python lists are stand-ins for the arrays we would use in an efficient system. We can extract data as jagged arrays:
events["muons"]["pt"]
# Or as the nested objects the arrays represent.
events[0]["muons"].tolist()
# In awkward-array, we would describe the high-level type of this structure as follows:
#
# ```
# [0, 4) -> "muons" -> [0, ?) -> "pt" -> float64
# "iso" -> float64
# "jets" -> [0, ?) -> "pt" -> float64
# "mass" -> float64
# "met" -> float64
# ```
#
# because an array is like a function that takes an integer or string in square brackets and returns something else—an array or a scalar primitive. The first argument after `events` can be any non-negative integer less than `4`, and this returns an array/function that takes `"muons"`, `"jets"`, or `"met"`. The return type of the next argument depends on which string was passed. The variable-length lists take a non-negative integer `[0, ?)` because its limits are too complex to encode in the type description. (The size of the type description should not be allowed to scale with the size of the array, and this limits its expressiveness for variable-length lists.) If awkward-array is based on Numpy, the leaves of its type terminate on Numpy dtypes, such as `float64`. If the array includes a cross-reference or circular reference, the type description would be a graph with interconnections, not a tree.
#
# In awkward-array, it's possible to change the order of these arguments:
# [0, 4) before "muons"/"jets"/"met"
events[0]["muons"].tolist()
# "muons"/"jets"/"met" before [0, 4)
events["muons"][0].tolist()
# Passing a string argument to a `ListArray`, as though it were a `RecordArray`, creates a `ListArray` of one of the nested `RecordArray`'s fields. It is a projection through the nested records.
events["muons"]["pt"]
events["muons"]["iso"]
# Formally, we find that string arguments can commute leftward through integer arguments, though string arguments do not commute with string arguments (you can't reverse the order of nested records) and integer arguments do not commute with integer arguments (you can't reverse the order of nested lists). A string argument cannot commute to the right in its placement in the data type: you can't say
events[0][2]["muons"]["pt"]
# because the three options `"muons"`, `"jets"`, `"met"` do not lead to the same types: `"met"` yields a scalar `float64`, which cannot take any array arguments. Because of this technicality, we have to say, "string indexes commute with integer indexes *up to their rightmost position,*" where the "rightmost position" is the position of the string in the data type.
#
# Notwithstanding that technicality, the commutation relation is an important feature and will be used to define awkward indexes.
# ## Indexes and keys
#
# The concept that an SQL-like query language would add to awkward-array is indexing—giving each element of the arrays (at all levels) a unique identifier ("key").
#
# Just as the data types are determined by what types of arguments can be placed in square brackets and what types that returns, the index is determined by a specific sequence of arguments that lead to a given data element. For example:
events[0]["muons"][2]["pt"]
# The sequence `0`, `"muons"`, `2`, `"pt"` leads to 3.3, so this value of 3.3 could be indexed by `[0, "muons", 2, "pt"]`. There may be other values of 3.3 in the dataset, but they are different entities with different index keys.
#
# In relational databases, this is called a [surrogate key](https://en.wikipedia.org/wiki/Surrogate_key), a declaration that this entity is unique by fiat, as opposed to a [natural key](https://en.wikipedia.org/wiki/Natural_key) that determines uniqueness through the value of the measured data. For instance, if we had decided that all muons with a pT of 3.3 are the same muon, those would be natural keys. Natural keys are meaningful for time measurements in time series or latitude/longitude coordinates in geographical data, but no values in a particle physics dataset would make sense as a natural key. Even if all low-level signals in a particle physics collision were perfectly repeated by a second collision, we would want to consider that second instance as a distinct event, and all of the particles it contains as distinct from the particles of the first event. Thus, we generate surrogate keys based on their location in the array structure.
#
# Because of the commutivity of integers and strings, the following are equivalent:
#
# ```
# [0, "muons", 2, "pt"]
# [0, "muons", "pt", 2]
# ["muons", 0, 2, "pt"]
# ["muons", 0, "pt", 2]
# ["muons", "pt", 0, 2]
# (but not [0, 2, "muons", "pt"])
# ```
#
# Rather than choosing one arbitrarily, we can express this commutivity by separating the index into a row index and a column index:
#
# ```
# row=[0, 2], col=["muons", "pt"]
# ```
#
# Note that we can only perform this assignment in data without cross-references—PLUR, not PLURP. To accomodate indexing, future versions of awkward-array will need to demote cross-references to a second-class status: a non-cross-referenced structure may be built from direct references, but interconnections would have to be called out as "soft links" or "weak references" so that they can be ignored while assigning indexes. (Alternatively, we could do a depth-first walk with breadcrumbs to avoid walking over the same structure twice, but indexes derived that way would depend on the order in which record fields are walked. It would be safer to call out "direct references" from "cross-references" explicitly.)
# ### Row and column indexes in Pandas
#
# This is the principle that Pandas uses to express arbitrary lists of records as a two-dimensional table. To see this in a few examples, install awkward-array and Pandas:
# !pip install awkward pandas
import awkward
awkward.topandas(awkward.fromiter(events)["muons"], flatten=True)
awkward.topandas(awkward.fromiter(events)["jets"], flatten=True)
# deeply nested records → nested column keys
awkward.topandas(awkward.fromiter([
{"left": {"x": 1, "y": {"z": 1}}, "right": {"a": 1, "b": 1, "c": 1}},
{"left": {"x": 2, "y": {"z": 2}}, "right": {"a": 2, "b": 2, "c": 2}},
{"left": {"x": 3, "y": {"z": 3}}, "right": {"a": 3, "b": 3, "c": 3}}
]), flatten=True)
# deeply nested lists → nested row keys
awkward.topandas(awkward.fromiter([
[[{"x": 100}, {"x": 100}], [{"x": 100}, {"x": 100}, {"x": 100}]],
[],
[[{"x": 300}], [], [{"x": 300}, {"x": 300}], [{"x": 300}]]
]), flatten=True)
# In Pandas, rows and columns can be assigned with a `MultiIndex`. Whenever Pandas `DataFrames` are merged, rows and columns with matching `MultiIndex` tuples are considered equivalent and matched, even if they are out of order. We will do the same with data in PartiQL, which frees it from list order and possible duplication, acting at the level of sets.
# ### Index visibility
#
# Since surrogate keys are artificial, there are two schools of thought on their visibility:
#
# * <NAME>, and Todd (1976) describe surrogate keys as an integer field in a table;
# * Wieringa and <NAME> (1991) describe them as an internal implementation detail.
#
# If surrogate keys are visible, users can express sampling without replacement through an inequality on the surrogate key:
#
# ```sql
# ... FROM muons m1 JOIN muons m2 ON m1.id < m2.id
# ```
#
# If they are not visible, constructs like this have to be provided by the language—they can't be built manually. For PartiQL, I have chosen to hide them as an implementation detail and provide syntactic constructs for sampling without replacement:
#
# ```
# muons as (m1, m2)
# ```
#
# and sampling with replacement:
#
# ```
# muons as m1 cross muons as m2
# ```
#
# This puts more separation between *what* a user wants and *how* it is implemented. (Sampling without replacement in SQL could use `m1.id < m2.id`, `m1.id > m2.id`, `(m1 + n) % COUNT(muons) < (m2 + n) % COUNT(muons)`, or `permutation(m1.id) < permutation(m2.id)` for any permutation. Detecting the intent can be hard.)
# ### Order visibility
#
# With hidden surrogate indexes, it would also be possible to entirely hide the order of lists. This is theoretically appealing because then the language would truly be a language of sets. Introducing a single function that reveals the order of a list would make it necessary to provide options to control the order in all other operations (such as SQL's `ORDER BY`).
#
# For simplicity (and to see how far we can take it), PartiQL does not reveal the order of lists. A "list" in awkward-array is a "set" in PartiQL. (Order is revealed if we apply awkward-array operations *after* a PartiQL operation, but that's beyond the PartiQL language.)
# ### Joins and index compatibility
#
# Many mathematical operations apply to scalars, such as addition, square root, exponentiation, etc. In array programming, they can be applied to all members of two or more arrays if the arrays have the same length (or n-dimensional shape) by lining up the arrays and performing the operation on each scalar element. Awkward-array extends this to arrays of variable-length lists, as long as the lengths in each input are the same when aligned. The same can be applied to sets if they have the same index.
#
# Aligning two or more sets is called a "join" ("merge" in Pandas). The internal representations of the sets might have different list orders, so "alignment" for sets includes reordering.
#
# What if a user tries to join `muons` and `jets` to add their `pt`? Such an operation should be illegal because we can never guarantee that there's the same number of `muons` and `jets` in each event and there is no natural mapping from one set to the other set. Awkward-array *usually* forbids such an operation because there is *usually* at least one event in a large dataset with a different number of `muons` than `jets`. With indexes, we can *always* forbid such an operation because they have non-overlapping indexes (no keys in common).
#
# What if a user tries to join `filteredMuons` or `muonsWithCorrections` to `muons`? Such an operation should be legal and it would often be useful. If there is a different number of `filteredMuons` or `muonsWithCorrections` than `muons`, even in one event, awkward-array cannot combine them. With indexes, we can *always* perform such operations because the filtered or transformed muons carry over their indexes—their identities—through the filtering or transformation.
#
# In particular, we can even perform computations in Directed Acyclic Graphs (DAGs) that change the number or order of collections. For example:
#
# ```python
# B = f(A)
# C = g(A)
# D = h(B, C)
# ```
#
# where `f` and `g` might change the number or order of values in `A`, `h` is possible because elements of `B` can be matched to unique elements of `C` (and vice-versa) through the index keys they both inherited from `A` (passed through `f` and `g`).
#
# To ensure that `B` and `C` are both transformed versions of the same particles, keys must be compared by reference, not by value. The second muon in the first event has row index `[0, 1]` and the second jet in the first event also has row index `[0, 1]`, but they are not the same key because they don't derive from the same index. We can't disambiguate them on their column indexes, `["muons", "pt"]` and `["jets", "pt"]`, because we should be able to join different columns, such as `["muons", "pt"]` and `["muons", "iso"]`. When indexes are created, each index should have a globally unique identifier that gets passed through operations so that we can distinguish, e.g. `#0[0, 1]` from `#1[0, 1]`.
# ## Demonstration of awkward indexes
#
# The demo code includes a `setindex` method that assigns index keys to all reachable elements in an awkward-array.
events.setindex()
events.contents["muons"].content.contents["pt"]
events.contents["muons"].content.contents["pt"].row
# Each primitive array, such as `events.contents["muons"].content.contents["pt"]`, has a `RowIndex` with a `RowKey` for every element and a `Ref` for referential identity.
#
# If you reach down to individual elements, you'll see that the `Ref` is attached to each `RowKey`.
events.contents["muons"].content.contents["pt"].row[3]
# A different primitive array in the same object, for which indexes must be compatible, has the same `Ref`.
events.contents["muons"].content.contents["pt"].row
events.contents["muons"].content.contents["iso"].row[3]
# For different objects, like muons and jets, the indexes are incompatible, even if they happened to have the same number of elements per event.
events.contents["jets"].content.contents["pt"].row
events.contents["muons"].content.contents["pt"].row.ref, events.contents["jets"].content.contents["pt"].row.ref
# There is only one `ColIndex` associated with the whole primitive array because the whole array represents a column.
#
# Columns do not need to be compared for referential equality because they are always compatible. (It is assumed that the user would be combining data from different fields of the same objects.)
events.contents["muons"].content.contents["pt"].col
# In fact, in the whole implementation of PartiQL, column indexes are never needed. They seem to be superflous: a non-toy implementation might not even need such a concept.
# ### Unreachable elements
#
# Awkward arrays can have unreachable elements—data that are inaccessible because we wish to avoid propagating slices through all layers of the hierarchy, to make multi-step append operations effectively atomic, because TLorentzVectors from ROOT include TObject bytes that aren't relevant for analysis, or any other reason.
#
# Here is an example:
example = data.ListArray([3, 999, 0], [6, 999, 2],
data.PrimitiveArray([4.4, 5.5, 12345, 1.1, 2.2, 3.3, 98765]))
example.tolist()
# In this example, not only are the inner list boundaries out of order, but there is no way to reach `12345` or `98765`.
#
# These elements do not get indexes.
example.setindex()
example.content
example.content.row
# In a non-toy implementation, in which the index is an array, these values may be filled with `-1`. (Reachable indexes are all non-negative.)
# ### Heterogeneous data
#
# Although datasets produced by reconstruction are rarely heterogeneous, intermediate steps in PartiQL frequently involve unions. Index keys have a sensible definition for heterogenous types.
egamma = data.UnionArray([0, 0, 1, 0, 1, 1, 1, 0, 0], [0, 1, 0, 2, 1, 2, 3, 3, 4], [
data.RecordArray({
"q": data.PrimitiveArray([1, -1, -1, 1, 1]),
"pt": data.PrimitiveArray([10, 20, 30, 40, 50])
}),
data.RecordArray({
"pt": data.PrimitiveArray([1.1, 2.2, 3.3, 4.4])
})
])
egamma.tolist()
egamma.setindex()
egamma.contents[0].contents["pt"], egamma.contents[1].contents["pt"]
egamma.contents[0].contents["pt"].row, egamma.contents[1].contents["pt"].row
# Note that the key values for data in different branches of the union are both non-overlapping and have different references. An electron will never be confused with a photon.
# ## Intermediate objects
#
# Objects at all levels of the tree have indexes, not just leaves.
events.contents["muons"].row
egamma.row
# ## Rowwise vs columnar calculations
#
# In a non-toy implementation, all operations should be computed on columns because it is more efficient in many ways (fewer branches in the code, less data to swap into CPU cache, and possibly even less data to load from disk). However, it is also more complicated. For simplicity in this toy implementation, the columnar data will be decomposed into rowwise objects with index keys attached.
data.instantiate(events)
data.instantiate(egamma)
# It should be clear, however, that all operations implemented on rowwise objects can be translated to columnar data for faster processing.
# # Implications for awkward 1.0
#
# Awkward-array will soon be rewritten for a variety of reasons, and that gives us opportunity to revisit fundamental questions. Knowing now what is required for set operations—a `d×N` matrix of integers where `d` is the depth and `N` is the length of an internal array, with no need for a column index—awkward 1.0 can be written to accomodate for this data structure. In fact, if an index is present, it can be passed through the array programming interface, to ensure that array programming and declarative set operations can be interleaved.
| binder/02-data-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice Problem 4
# ### Problem: Find and compare the GDP of the 10 most populous countries in 2007.
# Use plt.hist() and plt.subplots().
#
# NOTE: We saw an example of plt.subplots() in ***Finding Distribution of Data with Histograms.***
| Intro_Data_Visualization_Pluralsight/07/demos/demos/Before/Practice Problem 4 - GDP of 10 Most Populous Countries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 5-Clustering with HDBSCAN
#
# In this notebook we calculate the areas where the trips most commonly start and end. We call these the _trip endpoints_. Usually you also find these named as "_stay points_" in the literature but, due to the nature of the data that we are handling, where vehicles do not actuaally stop at these locations, we will keep the name "_endpoint_".
#
# **Requirements:**
#
# - Please run the `02-import-data.ipynb` notebook first, in case you need to buid the supporting SQLite database.
# - Required install: [ipywidgets](https://ipywidgets.readthedocs.io/en/stable/user_install.html). Enable using `jupyter nbextension enable --py widgetsnbextension --sys-prefix` for Jupyter Notebook and `jupyter labextension install @jupyter-widgets/jupyterlab-manager` for Jupyter Lab.
# +
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import folium
import hdbscan
import utm
import ipywidgets as widgets
from db.api import VedDb
from h3 import h3
from tqdm.notebook import tqdm
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
from folium.vector_layers import CircleMarker
from colour import Color
from ipywidgets import interact, interactive, fixed, interact_manual
# %matplotlib inline
# -
# Let's start by instantiating the `VedDb` object to interface with the SQLite database created and prepared in previous notebooks.
db = VedDb()
# Now, we collect the start and end locations of all the moves in the database as latitude and longitude pairs. Note that we have to join the `move` table to the `signal` table twice in order to get both start and end loctions. The result is downloaded into a DataFrame for in-memory manipulation.
sql = """
select m.move_id
, si.latitude as lat_ini
, si.longitude as lon_ini
, se.latitude as lat_end
, se.longitude as lon_end
from move m
inner join signal si on si.day_num = m.day_num and
si.vehicle_id = m.vehicle_id and
si.time_stamp = m.ts_ini
inner join signal se on se.day_num = m.day_num and
se.vehicle_id = m.vehicle_id and
se.time_stamp = m.ts_end
"""
df_pt = db.query_df(sql)
df_pt.head()
# As you can see, for each move we collect the start location `(lat_ini, lon_ini)` and destination location `(lat_end, lon_end)`. Using this data, we can determine the implied geographical clusters using [HDBSCAN](https://hdbscan.readthedocs.io/en/latest/). Before running that algorithm, we must first collect all locations into a single array, keeping the original ordering. The first half of the array contains the start locations while the second part contains the destination locations. This is important in order to guarantee that we can assign the detetcted cluster identifiers back to the `move` table.
loc_ini = df_pt[['lat_ini', 'lon_ini']].to_numpy()
loc_end = df_pt[['lat_end', 'lon_end']].to_numpy()
locations = np.vstack((loc_ini, loc_end))
# We can now cluster all the points. Here I am providing two functions to fit an HDBSCAN clusterer object. The `fit_utm_clusterer` function fits an HDBSCAN model using [UTM coordinates](https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system).
def fit_utm_clusterer(locations, min_cluster_size=20, min_samples=20):
xyzz = [utm.from_latlon(ll[0], ll[1]) for ll in locations]
pts = [[p[0], p[1]] for p in xyzz]
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,
min_samples=min_samples,
metric='euclidean')
clusterer.fit(pts)
return clusterer
# The `fit_latlon_clusterer` uses the location coordinates directly and calculates distances using the haversine function. Both functions should produce equivalent results, but this one works better for larger areas, especially for longitude renges of over 6 degrees. For a small are such as is the case, the UTM version is adequate.
def fit_latlon_clusterer(locations, min_cluster_size=20, min_samples=20):
pts = np.radians(locations)
clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,
min_samples=min_samples,
metric='haversine')
clusterer.fit(pts)
return clusterer.labels_, clusterer.outlier_scores_
# Now, we run the code against the location array and collect the cluster identifiers in the same order as that of the inputs.
clusterer = fit_utm_clusterer(locations)
# The list of unique cluster identifiers is calculated below by removing the first unique value (-1), the noise indicator.
unique_clusters = np.unique(clusterer.labels_)[1:]
print("The initial number of clusters is: {0}".format(unique_clusters.shape[0]))
# ## Detecting Outliers
# We are not done yet. The HDBSCAN algorithm can generate some odd-looking clusters with points that are seemingly misplaced, too far away from the main cluster. You will be able to judge that by yourself below while browsing through the map. Fortunately, HDBSCAN also provides a means for us to weed out such points using the concept of [outliers](https://hdbscan.readthedocs.io/en/latest/outlier_detection.html). For each point, the algorithm calculates an outlier score, where higher values mean higher likelihood of the point being an outlier.
#
# Outlier scores range from zero to one, and we can use such information to filter out the most glaring outliers. In this case, through visual inspection, I decided to filter out points with outllier scores above 0.8. Below, I try to provide a more quantitative support for this decision.
#
# Let's start by plotting a histogram of the outlier score distribution.
hh = plt.hist(clusterer.outlier_scores_, bins=50)
# As you can see, there are very few outlying points beyond _0.8_. The proportion of points with outlier scores below _0.8_ is quite large indeed:
scores = clusterer.outlier_scores_
print("Below 0.8 you can find {:.2f}% of the cluster points.".format(100.0 * scores[scores < 0.8].shape[0] / scores.shape[0]))
# Let's have another look at this distribution. In the following plot, the _x_ axis reepresents the outlier score and the _y_ axis represents the proportion of cluster points with up to the given outlier score.
plt.rcParams['figure.figsize'] = [8, 8]
score_shape = [scores[scores < x].shape[0] / scores.shape[0] for x in np.arange(0.0, 1.0, 0.01)]
plt.plot(np.arange(0.0, 1.0, 0.01), score_shape)
plt.xlabel("Outlier Score")
plt.ylabel("Point Proportion")
plt.title("Outlier Score Cumulative Distribution")
plt.grid(True)
pd.Series(scores).quantile(0.95)
# Let's see some visuals!
#
# ## Interactive Cluster Exploration
#
# Here you can explore all the generated clusters through the interactive widget below. The `show_cluster_map` displays a cluster map with all the points colored according to their outlier score. The more a point color shifts to red, the higher its outlier score. For your convenience, each point has a tooltip with its score.
def show_cluster_map(cluster_id):
blue = Color("blue")
red = Color("red")
color_range = list(blue.range_to(red, 10))
map = folium.Map(prefer_canvas=True, tiles='CartoDB positron')
clusters = clusterer.labels_
outlier_scores = clusterer.outlier_scores_
points = locations[clusters == cluster_id]
scores = outlier_scores[clusters == cluster_id]
for i in range(points.shape[0]):
point = points[i]
color = color_range[int(scores[i] * 10)]
CircleMarker(point, radius=1, color=color.hex, tooltip="{:.2f}".format(scores[i])).add_to(map)
min_lat, max_lat = points[:, 0].min(), points[:, 0].max()
min_lon, max_lon = points[:, 1].min(), points[:, 1].max()
map.fit_bounds([[min_lat, min_lon], [max_lat, max_lon]])
return map
ii = interact(show_cluster_map, cluster_id=widgets.IntSlider(min=0, max=clusterer.labels_.max(), step=1, value=0))
# ### Outlier Filtering
#
# We now must make good on our promise and filter the offending outliers. We do this by marking as noise all the points with an outlier score above _0.8_.
filtered_clusters = np.copy(clusterer.labels_)
filtered_clusters[scores >= 0.8] = -1
# ## Serialize to the Database
#
# For the sake of future convenience, we will now create and fill in a table whith all the cluster points. This table will make our life much easier in the future, when handling clusters, their locations and shapes.
#
# We start by making sure that the table exists and is empty.
if not db.table_exists("cluster_point"):
sql = """
CREATE TABLE cluster_point (
pt_id INTEGER PRIMARY KEY ASC,
cluster_id INT NOT NULL,
latitude FLOAT NOT NULL,
longitude FLOAT NOT NULL,
h3 TEXT
)
"""
db.execute_sql(sql)
db.execute_sql("CREATE INDEX idx_cluster_point_cluster ON cluster_point (cluster_id)")
else:
db.execute_sql("DELETE FROM cluster_point")
# Now, we can insert the locations for each cluster, along with the [H3](https://eng.uber.com/h3/) hexagon codes at [resolution level 12](https://uber.github.io/h3/#/documentation/core-library/resolution-table). In the next notebook, we will display the clusters using the outline of the collated shapes of all cluster hexagons.
#
# To prepare the insert statement, we now collect all the input data into a list:
h3_level = 12
cluster_points = []
for i in tqdm(range(filtered_clusters.shape[0])):
if filtered_clusters[i] >= 0:
pt = (int(filtered_clusters[i]), locations[i, 0], locations[i, 1], h3.geo_to_h3(locations[i, 0], locations[i, 1], h3_level))
cluster_points.append(pt)
# We can now insert the cluster points into the table.
db.insert_cluster_points(cluster_points)
# The DataFrame can also be updated with the cluster identifiers like so:
n = filtered_clusters.shape[0] // 2
df_pt['cluster_ini'] = filtered_clusters[:n]
df_pt['cluster_end'] = filtered_clusters[n:]
# This is how the DataFrame looks like after cluster identifier assignment. Note that the constant `-1` means that the point was not assigned to any cluster, and was considered as noise instead. We will exclude these from the future trajectory analyses.
df_pt.head()
# ## Modifying the `move` Table
#
# Now that we have the clusters identified, we can assign them back to the `move` table. Unfortunately, this table has no columns where to store the clusters identifiers, so we must first handle that. For convenience, we also create an extra index on the new columns for more convenient search on them.
#
# **Notes**:
# - The function `table_has_column` tests whether a given table has a named column. Here we only test against one column as it is enough.
# - Depending on the time you use this code, the database may already have been created with these columns and indexes. If that is the case, the code below does nothing.
if not db.table_has_column('move', 'cluster_ini'):
db.execute_sql("alter table move add cluster_ini INT not null default -1")
db.execute_sql("alter table move add cluster_end INT not null default -1")
db.execute_sql("create index idx_move_clusters on move (cluster_ini, cluster_end)")
# We are now ready to update the `move` table using the recently calculated cluster identifiers. To do so, we use the data stored in the DataFrame to feed an update query. Note that we retrieve the data as a list of tuples with the eaxct order for consumption in the query.
clids = list(df_pt[['cluster_ini', 'cluster_end', 'move_id']].itertuples(index=False))
db.update_move_clusters(clids)
# We have now associated each move with a pair of endpoint clusters, and this allows us to perform more powerful analyses to the data, such as determine how many trips occur between two general endpoints, how many different trajectories there are and what vehicles have used them and at what time, consuming how much fuel or energy.
#
# Now, we turn to the issue of generating a geo-fence for the clusters so they can be easily displayed on a map.
| 05-clustering-hdbscan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Set WD
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
import os
os.chdir("/home/sgf2/DBMI_server/adversarial_attacks/melanoma/")
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
import scipy.stats as st
from tensorflow import keras
from sklearn import metrics
from sklearn.metrics import auc
def mean_ci(x):
mn = np.mean(x)
ci = st.t.interval(0.95, len(x)-1, loc=np.mean(x), scale=st.sem(x))
return (mn, ci[0], ci[1])
def printResults(model_preds, y_test):
acc = np.mean(np.round(model_preds)[:,0] == y_test[:,0])
print('Test accuracy: %0.4f' % acc)
fpr, tpr, thresholds = metrics.roc_curve(y_test[:,1], model_preds[:,1])
auc_score = auc(fpr,tpr)
print('AUC: %0.4f' % auc_score)
conf = mean_ci(np.max(model_preds, axis = 1))
print('Avg. Confidence: ' + '{0:.6f} '.format(conf[0]) + \
'({0:.6f}'.format(conf[1]) + ' - {0:.6f})'.format(conf[2]))
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Set up
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from craft_attack_patch import *
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# # Adversarial Patch Attack
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Train Patches
# +
# Hyperparameters
epochs = 7
learning_rate = 5.0
# Load the models
resnet1 = ModelContainer('resnet1')
resnet2 = ModelContainer('resnet2')
# White Box
model = resnet1
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# Target 0
train(model, target_label=0, epochs=epochs, learning_rate=learning_rate)
file_name = './patches/resnet1_patch_target0_epoch' + str(epochs) + '_wb.npy'
np.save(file_name, model.patch())
# Target 1
train(model, target_label=1, epochs=epochs, learning_rate=learning_rate)
file_name = './patches/resnet1_patch_target1_epoch' + str(epochs) + '_wb.npy'
np.save(file_name, model.patch())
# Black Box
model = resnet2
# Target 0
train(model, target_label=0, epochs=epochs, learning_rate=learning_rate)
file_name = './patches/resnet1_patch_target0_epoch' + str(epochs) + '_bb.npy'
np.save(file_name, model.patch())
# Target 1
train(model, target_label=1, epochs=epochs, learning_rate=learning_rate)
file_name = './patches/resnet1_patch_target1_epoch' + str(epochs) + '_bb.npy'
np.save(file_name, model.patch())
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# ## Display Results
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
################################# THIS IS THE ONLY CELL THAT WAS ADDED #############
# Pick the images to show (attack). Here I randomly chose 2 from each class
show(image_loader.X_test[3]) # label 0
# show(image_loader.X_test[200]) # label 0
# show(image_loader.X_test[401]) # label 1
# show(image_loader.X_test[453]) # label 1
# +
################################# THIS IS THE ONLY CELL THAT WAS MODIFIED #############
epochs = 7
# Load the models
resnet1 = ModelContainer('resnet1')
resnet2 = ModelContainer('resnet2')
# Loading the patch file
resnet1_patch_target1_wb = np.load('./patches/resnet1_patch_target1_epoch' + str(epochs) + '_wb.npy')
resnet1_patch_target0_wb = np.load('./patches/resnet1_patch_target0_epoch' + str(epochs) + '_wb.npy')
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# Indices of images to show (use n_show=0 when using show_indices feature)
show_indices0 = [8] # attack with target label 0
show_indices1 = [0]
# Apply
scale = 0.4
probs_patched_images, probs_original_images,indices, true_labels, winp = attack_combined(resnet1,patch_for_0=resnet1_patch_target0_wb,
patch_for_1=resnet1_patch_target1_wb,
n_show=0, scale=scale,
show_indices0=show_indices0,
show_indices1=show_indices1,
predict_original=False
)
# Print
print("White Box:")
printResults(probs_patched_images, keras.utils.to_categorical(true_labels))
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# Loading the patch file
resnet1_patch_target1_bb = np.load('./patches/resnet1_patch_target1_epoch7_bb.npy')
resnet1_patch_target0_bb = np.load('./patches/resnet1_patch_target0_epoch7_bb.npy')
# Apply
probs_patched_images, probs_original_images, indices, true_labels, winp = attack_combined(resnet1,patch_for_0=resnet1_patch_target0_bb,
patch_for_1=resnet1_patch_target1_bb,
n_show=0, scale=scale,
show_indices0=show_indices0,
show_indices1=show_indices1,
predict_original=False
)
# Print
print("\nBlack Box:")
printResults(probs_patched_images, keras.utils.to_categorical(true_labels))
# + [markdown] ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# # Photoshop Patch Attacks
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# Combined attack with mole-patches
file_name0 = './patches/natural_patch_min_p.npy'
file_name1 = './patches/natural_patch_max_p.npy'
mole_patch0 = np.load(file_name0)
mole_patch1 = np.load(file_name1)
scale = 0.4
probs_patched_images, probs_original_images, indices, true_labels, winp = attack_combined(resnet1,patch_for_0=mole_patch0,
patch_for_1=mole_patch1,
n_show=0, scale=scale,
show_indices0=show_indices0,
show_indices1=show_indices1,
predict_original=False
)
printResults(probs_patched_images, keras.utils.to_categorical(true_labels))
# -
# ## Display just patch
# + ein.hycell=false ein.tags="worksheet-0" slideshow={"slide_type": "-"}
import numpy as np
resnet1_patch_target1_bb = np.load('./patches/resnet1_patch_target1_epoch7_bb.npy')
resnet1_patch_target0_bb = np.load('./patches/resnet1_patch_target0_epoch7_bb.npy')
nat_min = './patches/natural_patch_min_p.npy'
nat_max = './patches/natural_patch_max_p.npy'
# +
from copy import copy
def deprocess_inception(y):
x = copy(y).astype(np.float)
x += 1.
x /= 2.
#x *= 255.
return x
import matplotlib.pyplot as plt
img = plt.imshow(deprocess_inception(resnet1_patch_target1_bb))
img.set_cmap('hot')
plt.axis('off')
plt.show()
img = plt.imshow(deprocess_inception(resnet1_patch_target0_bb))
img.set_cmap('hot')
plt.axis('off')
plt.show()
# img = plt.imshow(deprocess_inception(np.load(nat_min)))
# img.set_cmap('hot')
# plt.axis('off')
# plt.show()
# img = plt.imshow(deprocess_inception(np.load(nat_max)))
# img.set_cmap('hot')
# plt.axis('off')
# plt.show()
# -
| patch_attacks/0_generate_patch_results_derm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# <img style='float: left' width="150px" src="http://bostonlightswim.org/wp/wp-content/uploads/2011/08/BLS-front_4-color.jpg">
# <br><br>
#
# ## [The Boston Light Swim](http://bostonlightswim.org/)
#
# ### Sea Surface Temperature time-series maps
# ### Load configuration
# +
import os
import sys
ioos_tools_path = os.path.join(os.path.pardir, os.path.pardir)
sys.path.append(ioos_tools_path)
# +
from ioos_tools.ioos import parse_config, load_ncs
config = parse_config('config.yaml')
save_dir = os.path.join(os.path.abspath(config['run_name']))
# -
# ### Load skill_score
# +
import json
fname = os.path.join(config['run_name'], 'skill_score.json')
with open(fname, 'r') as f:
skill_score = json.loads(f.read())
# +
import pandas as pd
mean_bias = pd.DataFrame.from_dict(skill_score['mean_bias'])
mean_bias = mean_bias.applymap('{:.2f}'.format).replace('nan', '--')
skill_score = pd.DataFrame.from_dict(skill_score['rmse'])
skill_score = skill_score.applymap('{:.2f}'.format).replace('nan', '--')
# +
from ioos_tools.ioos import make_map
bbox = config['region']['bbox']
units = config['units']
run_name = config['run_name']
kw = dict(zoom_start=11, line=True, states=False,
secoora_stations=False, layers=False)
mapa = make_map(bbox, **kw)
# -
# ### Clusters
# +
from ioos_tools.ioos import stations_keys
all_obs = stations_keys(config)
# +
from glob import glob
from operator import itemgetter
import iris
import folium
from folium.plugins import MarkerCluster
iris.FUTURE.netcdf_promote = True
big_list = []
for fname in glob(os.path.join(save_dir, "*.nc")):
if 'OBS_DATA' in fname:
continue
cube = iris.load_cube(fname)
model = fname.split('-')[-1].split('.')[0]
lons = cube.coord(axis='X').points
lats = cube.coord(axis='Y').points
stations = cube.coord('station_code').points
models = [model]*lons.size
lista = zip(models, lons.tolist(), lats.tolist(), stations.tolist())
big_list.extend(lista)
big_list.sort(key=itemgetter(3))
df = pd.DataFrame(big_list, columns=['name', 'lon', 'lat', 'station'])
df.set_index('station', drop=True, inplace=True)
groups = df.groupby(df.index)
locations, popups = [], []
for station, info in groups:
sta_name = all_obs[station]
for lat, lon, name in zip(info.lat, info.lon, info.name):
locations.append([lat, lon])
popups.append('[{}]: {}'.format(name, sta_name))
MarkerCluster(locations=locations, popups=popups).add_to(mapa)
# -
# ### Model and observations plots
# +
import warnings
# Suppresing warnings for a "pretty output."
# Remove this line to debug any possible issues.
warnings.simplefilter("ignore")
# +
# Legend dictionary. If any new model is found we will use its filename as legend.
# Here we only provide some nice names for the models we expect to find.
titles = {
'coawst_4_use_best': 'COAWST_4',
'global': 'HYCOM',
'NECOFS_GOM3_FORECAST': 'NECOFS_GOM3',
'NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST': 'NECOFS_MassBay',
'OBS_DATA': 'Observations'
}
# +
from bokeh.resources import CDN
from bokeh.plotting import figure
from bokeh.embed import file_html
from bokeh.models import HoverTool
from itertools import cycle
from bokeh.palettes import Spectral6
from folium.element import IFrame
# Plot defaults.
colors = Spectral6
colorcycler = cycle(colors)
tools = "pan,box_zoom,reset"
width, height = 750, 250
def make_plot(df, station):
p = figure(toolbar_location="above",
x_axis_type="datetime",
width=width,
height=height,
tools=tools,
title=str(station))
for column, series in df.iteritems():
series.dropna(inplace=True)
if not series.empty:
line = p.line(
x=series.index,
y=series.values,
legend="%s" % titles.get(column, column),
line_color=next(colorcycler),
line_width=5,
line_cap='round',
line_join='round'
)
if 'OBS_DATA' not in column:
bias = mean_bias[str(station)][column]
skill = skill_score[str(station)][column]
else:
skill = bias = 'NA'
p.add_tools(HoverTool(tooltips=[("Name", "%s" % column),
("Bias", bias),
("Skill", skill)],
renderers=[line]))
return p
def make_marker(p, station):
from ioos_tools.ioos import stations_keys
lons = stations_keys(config, key='lon')
lats = stations_keys(config, key='lat')
lon, lat = lons[station], lats[station]
html = file_html(p, CDN, station)
iframe = IFrame(html, width=width+40, height=height+80)
popup = folium.Popup(iframe, max_width=2650)
icon = folium.Icon(color='green', icon='stats')
marker = folium.Marker(location=[lat, lon],
popup=popup,
icon=icon)
return marker
# +
dfs = load_ncs(config)
for station in dfs:
sta_name = all_obs[station]
df = dfs[station]
if df.empty:
continue
p = make_plot(df, station)
maker = make_marker(p, station)
maker.add_to(mapa)
# -
mapa
| notebooks/boston_light_swim/02-create_map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/1carlosd1/daa_2021_1/blob/master/pilas2tare.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="4YrDnwfc0y62"
class Stack:
def __init__(self):
self.__datos=[]
def is_empty(self):
return len(self.__datos)==0
def get_top(self):
return self.__datos[-1]
def pop(self):
return self.__datos.pop()
def push(self,valor):
self.__datos.append(valor)
def get_length(self):
return len(self.__datos)
def to_string(self):
print("|------------------|")
for j in self.__datos[-1::-1]:
print(j)
print("|------------------|\n")
# + id="j6VCVfrf06sd" outputId="b03a526b-669d-482b-f94f-07b2d3357864" colab={"base_uri": "https://localhost:8080/"}
pila2=Stack()
bandera=0
prueba="""arr=[]
palindromos=[]
contador=0
for i in range(len(h)):
for j in range(len(m)):
arr.append(h[i]+":"+m[j])
print(arr)
for k in range(0,len(arr)):
if (arr[k][0]==arr[k][4]) and (arr[k][1]==arr[k][3]):
print(arr[k])
contador+=1
print("los palindromos encontrados fueron:")
print(contador)"""
for i in range(0,len(prueba)):
if prueba[i]==("(" or "{" or "["):
pila2.push('@')
elif prueba[i]==(")" or "}" or "]"):
try:
pila2.pop()
except:
bandera=1
print("la ecuacion esta desbalanceada")
x=pila2.get_length()
if x == 0 and bandera==0:
print("la ecuacion esta balanceada")
elif bandera==0:
print("la ecuacion esta desbalanceada")
| pilas2tare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Exercise
#
# In the folder "Working with Images" (same folder this notebook is located in) there are two images we will be working with:
# * word_matrix.png
# * mask.png
#
# The word_matrix is a .png image that contains a spreadsheet of words with a hidden message in it.
#
# Your task is to use the mask.png image to reveal the hidden message inside the word_matrix.png. Keep in mind, you may need to make changes to the mask.png in order for this to work. That is all we'll say for now, since we really want you to discover this on your own!
from PIL import Image
words = Image.open('word_matrix.png')
mask = Image.open('mask.png')
words
mask
words.size
mask = mask.resize((1015, 559))
mask
mask.putalpha(100)
mask
words.paste(mask, (0,0), mask)
words
| docs/Notebooks/20.image-exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8nxGXrQtq1rt" executionInfo={"status": "ok", "timestamp": 1614973852977, "user_tz": 180, "elapsed": 1871, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "", "userId": "16114882800029312634"}}
class Animal():
def __init__(self):
print("Animal created!")
def what_is_it(self):
print("This is a animal")
def eat(self):
print("It´s eating...")
# + id="NDenkmFVre4P" executionInfo={"status": "ok", "timestamp": 1614973854586, "user_tz": 180, "elapsed": 1033, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "", "userId": "16114882800029312634"}}
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print("DOG created!!")
# + colab={"base_uri": "https://localhost:8080/"} id="gSGLzcjS8QQy" executionInfo={"status": "ok", "timestamp": 1614973856528, "user_tz": 180, "elapsed": 1171, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "", "userId": "16114882800029312634"}} outputId="7c69410b-3613-4b70-ab6d-f99f0cbfa8a6"
mydog = Dog()
| Python Notebooks/OOP2 Inheritance - Bootcamp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ada] *
# language: python
# name: conda-env-ada-py
# ---
# **Reducing the Swiss "Foodprint": How an Individual can Readjust their Carbon Output**
# This notebook contains detailed data analysis and visualizations of the [Swiss Foodprint](https://valentinoli.github.io/swiss-foodprint/) project. Please view the [`README`](https://github.com/valentinoli/swiss-foodprint/blob/master/README.md) for overall project goals and background information. The project is hosted on [GitHub](https://github.com/valentinoli/swiss-foodprint).
# +
# import external libraries
# %matplotlib inline
import collections
import inspect
import pickle
import re
import pandas as pd
import numpy as np
# prevent internal problem with networkx from showing error
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# %load_ext autoreload
# %autoreload 2
# +
# import local dependencies
import sys
sys.path.insert(1, "scripts")
from plots import *
from impex_data_manipulation import *
from impex_countries import *
from fao_data_manipulation import *
from emissions_data_manipulation import *
# -
# The first step is to calculate, for each type of food, how much of what is consumed by the Swiss population is produced within Switzerland versus imported. To do this, we will use data on imports, exports, and domestic production. The imports and exports data are sourced from [Swiss Impex](https://www.gate.ezv.admin.ch/swissimpex/index.xhtml), a website hosted by the Swiss Federal Customs Administration which provides data on Switzerland's global trade activity. Domestic production data comes from [FAO](http://www.fao.org/faostat/en/#data), the Food and Agriculture Organization of the United Nations which offers a variety of agricultural-related data. We fetched fishery data from [here](http://www.fao.org/fishery/statistics/global-commodities-production/query/en).
#
# In theory, the total amount of food consumed in Switzerland (including food waste) can be calculated from these figures:
#
# consumption = domestic production + imports - exports
# ***
# **Data loading and manipulation**
# ***
# In this section, we load and manipulate data on Swiss imports, exports, and domestic production of various food types.
#
# **Note**: All quantities are measured in kilograms, unless otherwise stated.
# *Imports and Exports*
#
# First, we load imports-exports data from the [Swiss Impex](https://www.gate.ezv.admin.ch/swissimpex/index.xhtml) into the `impex` dataframe.
impex = load_impex()
impex.head()
# To ensure that our dataframe is properly processed and handled as is goes through the changes we are about to expose it to, we will pull out one value from it (quantity of kiwi fruits imported from New Zealand) and check at the end to make sure its value is still properly matched with the country and fruit.
impex.loc["New Zealand"].fruits.kiwi_fruit.imports
# We then create a dataframe `impex_total` for storing total quantities of indicator variables (such as imports and exports):
# 1. Select only first row (total), creates a series
# 2. Unstack first level (indicator) to create a dataframe
impex_total = impex.iloc[0].unstack("indicator")
impex_total.head()
# We can safely drop the total row from `impex`:
impex = impex.drop("total")
# Let's now further manipulate the `impex` dataframe and add index levels for continents and sub-continents that the countries belong to:
# +
# We load the country-continent info
continents = load_countries_continents()
# We compute the new index
countries_mindex = pd.MultiIndex.from_arrays(continents.values.T, names=continents.columns)
impex_mindex = countries_mindex[countries_mindex.get_level_values(2).isin(impex.index)]
# Reindex such that it matches the incoming multi-index
impex = impex.reindex(impex_mindex.get_level_values(2))
# Finally, apply the new multi-index
impex.set_index(impex_mindex, inplace=True)
impex.index[:5]
# -
# We check that the numbers stay with their indices:
impex.xs("New Zealand", level="country", drop_level=False).fruits.kiwi_fruit.imports
# The imports of kiwi fruits from New Zealand is correct and we conclude that applying the new index was successful.
#
# <p style="font-size: 12px"><strong>Note:</strong> The impex dataframe should only contain information about food destined for humans. During the data collection phase, we were careful to ensure this was true: all items which can also be imported for the use of animal feed were selected by their subcategory of being specifically for human consumption. </p>
# *Domestic Production*
#
# Now, let's load the data on Swiss domestic `production` from [FAO](http://www.fao.org/faostat/en/#data):
production = load_fao()
production.head()
# <p style="font-size: 12px"><strong>Note</strong>: We were careful that only crops produced for human consumption are measured. This is specified in the FAO metadata.</p>
# *Combining imports, exports and domestic production*
#
# Next, we perform an outer join of `impex_total` and `production` to create a meta-dataframe, `suisse` with all total macroeconomic indicators for each commodity subtype:
suisse = impex_total.join(production, how="outer")
suisse.head()
# We have three possibilities for each commodity
# 1. Neither production or import/export values are given or they all sum up to 0
# 2. Either only production values are given or only import/export values are given
# 3. Production, import and export values are all given (and are non-zero)
#
# Commodities fulfilling the first condition will be removed and others will be kept.
#
# <p style="font-size: 12px"><strong>Note</strong>: Missing or 0-valued data may be due to either the values actually being 0 or the data not being collected on these items. Since there is no way of knowing which is the case, we will assume that the values are indeed truly 0 to enable their utilization in the analysis.</p>
# Remove subtypes for which all given quantities sum up to 0
subtypes_no_info = suisse.index[suisse.sum(axis=1) == 0]
suisse.drop(subtypes_no_info, inplace=True)
# *Computing Estimated Consumption*
#
# Next, we shall add columns for `domestic_consumption`, `imported_consumption` and total `consumption`.
#
# Definitions:
# * **domestic consumption**: goods and services consumed in the country where they are produced
# * **imported consumption**: goods and services consumed in the country to which they are imported
#
# **Note**: For the calculation of the above quantities we make the following assumptions:
#
# * food waste is not taken into account and is included as being "consumed"
# * exported quantity is first satisfied by available produced quantity, and then imported quantity
# +
# If exports > production, then we say all produced quantity
# is exported and the rest of exported quantity is satisfied by imports:
# 1. Set domestic_consumption to zero
# 2. Set imported_consumption to imports-(exports-production)
# If exports <= production, we say all exports
# is satisfied by domestic production:
# 1. Set domestic_consumption to production-exports
# 2. Set imported_consumption to imports
suisse["domestic_consumption"] = np.where(
suisse.exports > suisse.production,
0,
suisse.production - suisse.exports
)
suisse["imported_consumption"] = np.where(
suisse.exports > suisse.production,
suisse.imports - suisse.exports + suisse.production,
suisse.imports
)
suisse["consumption"] = suisse.domestic_consumption + suisse.imported_consumption
suisse.head()
# -
# Let's make sure no consumption values are negative:
suisse[suisse.consumption < 0]
# Apparently, recorded *whey* exports exceed the combined imports and production. We handle this special case by setting the consumption value to zero:
suisse.loc[("animal_products", "whey")]["imported_consumption", "consumption"] = 0
# *Carbon Emissions*
# Now that we have information on Switzerland's imports, exports, production and consumption, we want to have a look at how this translates in terms of equivalent CO$_2$ production. To do this, we could look at Swiss-specific values for the greenhouse gas production for different food types. However, data from other countries is very sparse and reliable data for many food types is only available for certain countries.
#
# Comprehensive data is available on meat and cereal production emissions worldwide, however fruit and vegetable figures are much harder to obtain and only a limited number of studies have been carried out. These studies have been accumulated in a [systematic review](https://www.sciencedirect.com/science/article/pii/S0959652616303584) and these values have been used to calculate averages for a number of different kinds of produce. To ensure our results are consistent, we will use this generalization of global average values for all food types.
#
# We load the emission data:
emissions = load_emissions()
emissions.head()
# Then we integrate the median emissions values into the `suisse` dataframe:
suisse = add_emissions_data(suisse, emissions)
suisse.head()
# <p style="font-size: 12px"><strong>Note:</strong> The emissions categories available were less comprehensive than the FAO/Impex categories. The items lacking emissions values were estimated using the closest available emissions category. For a full list, please see the last 15 lines of <a href="https://drive.google.com/open?id=1KDBiuJ4la_vW3X2_KnD18IQFb00Qoh6B">this file</a>.</p>
#
# So, assuming that domestic transport is negligible, we can estimate the equivalent CO$_2$ emissions for each product type, using the global average values.
# calculate the emissions (resulting from production of the products)
suisse = production_emissions(suisse)
suisse.head()
# The final column, `emissions_sans_transport`, shows the total CO$_2$ equivalent (kg) that would be produced if everything that was consumed in Switzerland was domestically produced, i.e. no transport emissions were considered and Swiss-specific CO$_2$ emissions were used for meat and cereal production. Evidently, it is not possible to produce everything that a current consumer buys locally, so in the following analysis we will consider the effect that these imported products and 'food miles' have on the CO$_2$ emissions resulting from Swiss consumption.
# *Transportation*
#
# Let's load the distances between Switzerland and other countries. To prepare for an augmentation of our analysis later on, we will also load distances to the Netherlands.
# +
try:
countries = pd.read_pickle("countries.pkl")
except:
countries = country_distances()
countries.head()
# -
# Next, we load data about transports of imported products from the Impex:
transport = load_impex_transport()
transport.tail(10)
# We remove the totals for each country and store in a separate dataframe:
transport_total = transport.xs("total", level=1)
transport = transport.drop(index="total", level=1)
# Next, we calculate the fractions for each food group transported by each method of transport:
# +
np.seterr("ignore") # ignore divisions by 0 (when the total imports is 0)
transport[
[
"cereals",
"potatoes",
"other_fresh_fruits_vegetables",
"fish",
"meat",
"dairy_products",
]
] = transport.apply(calculate_percent_by_method, args=(transport_total,), axis=1)
transport.head(10)
# -
# The following image provides carbon emissions by transport method, which we use for the analysis:
# <img width="400" height="400" src="https://icmattermost.epfl.ch/files/5zr1jyriupfsfgmr4dtg155ssw/public?h=_GPk0xYK1I16gWsY3GuIsrFC5bTb3Ioh4_W3h3oYDs8">
# divide all the values by 1000 to convert to kg CO2e / kg km
transportCO2 = {
"Air traffic": 0.000733,
"Rail traffic": 0.000037,
"Road traffic": 0.000303,
"Inland waterways": 0.000019,
}
# *Combining Emissions and Transportation Data*
#
# We will now use the country-specific and food item-specific data to calculate carbon costs. For each country and food item pair, we will first calculate how much of that food item imported from that country is consumed by Swiss consumers. This relies upon assumptions that we made previously (mentioned here). The two categories this data processing can fall into are:
#
# * If the exported amount of a given food item is less than what is domestically produced, we assume that all of the Swiss produce is exported first before any of the imports start getting exported. That means there will still be some leftover Swiss-produced food available for Swiss consumption, in addition to all of the imported food from all other countries.
#
# * If, however, there is more of a given food item exported than domestically produced, we must transition to the situation where all of the Swiss-produced goods are exported and some of the imported goods are also exported. In this case, we will assume that each country's imports are exported in an equal percentage (e.g. if Switzerland imports 100 kg of bananas from Country X and 50 kg of bananas from Country Y, and the deficit between Switzerland's production and its exports is 10 kg, a fixed percentage of bananas from each country will be assumed to be exported while the remainder is consumed in Switzerland). Hence, the fraction of that food item from that country consumed in Switzerland is (the total amount of that food item imported from that country) / (the total imports across all countries for that food item). This fraction is multiplied by the total amount of that food item consumed in Switzerland to get results by country.
# First, manipulate the data to prepare for using it
impex_countries = (
impex.stack(["type", "subtype"])
.drop(columns=["exports"])
.reset_index()
)
impex_countries["product"] = impex_countries["subtype"]
impex_countries = impex_countries.set_index(
["type", "subtype", "continent", "country"]
).drop(columns="subcontinent")
# Then, we calculate, for each country and food item pair, how much of
# that food item imported from that country is consumed by Swiss consumers
impex_countries['swiss_consumption'] = impex_countries.apply(
find_consumption, args=(suisse,), axis=1
)
# Below, we can see that there are no rows of data from Libya which have a non-zero `swiss_consumption`. This means there is nothing imported from Libya which is consumed in Switzerland (it is either all exported or there are no imports).
libya = impex_countries.xs("Libya", level="country")
libya = libya[libya.swiss_consumption != 0]
len(libya)
# This is also true for the following countries. In addition, evidently none of them have any transport data because nothing is imported, therefore we'll drop them from the impex_countries dataframe:
# +
countries_no_transport_data = [
"Libya",
"Angola",
"Eritrea",
"Sudan",
"St Lucia",
"Seychelles",
"Amer. Virgin",
"Curaçao",
"Greenland",
"Guiana, French",
"Faeroe Islands",
]
impex_countries.reset_index(inplace=True)
impex_countries = impex_countries[~impex_countries.country.isin(countries_no_transport_data)]
# -
# The next step is to convert the `impex_countries` dataframe, the amount by country and by food item which is eaten in Switzerland, to the carbon costs associated with each country/food item pair.
# map the categories used by the transport data to the categories used in
# our dataframe of imports per country and food
colmap = {
"animal_products": "dairy_products",
"meat": "meat",
"fruits": "other_fresh_fruits_vegetables",
"vegetables": "other_fresh_fruits_vegetables",
"cereals": "cereals",
"seafood": "fish",
}
# We will then apply functions which calculate the overall carbon emissions for a given food product in Switzerland, taking into account the origins of the products and the proportion which was transported by each method of transport.
# +
pd.options.mode.chained_assignment = None
impex_countries["kg_CO2e_transport"] = impex_countries.apply(
swiss_consumption_transport,
axis=1,
args=(transport, transportCO2, countries, colmap),
)
impex_countries["kg_CO2e_transport_via_nl"] = impex_countries.apply(
swiss_consumption_transport,
axis=1,
args=(transport, transportCO2, countries, colmap),
NL=True,
)
# create the index again to get back to a multiindex dataframe
impex_countries.set_index(["type", "subtype", "continent", "country"], inplace=True)
# -
# now add a column which is carbon cost of transport + inherent carbon cost of producing the item
food_list = np.array(impex_countries.index.get_level_values(level=1))
impex_countries["total_kg_CO2e"] = (
impex_countries["kg_CO2e_transport"]
+ suisse.iloc[suisse.index.get_level_values(1).isin(food_list)].median_emissions
* impex_countries["swiss_consumption"]
)
# ***
# **Data analysis and visualization**
# ***
# *Imports and Exports*
# 1. Group by meta-type
# 2. Sum the totals
# 3. Unstack the columns to create a Series
impex_total_metatype = impex_total.groupby("type").sum().unstack()
plot_total_impex(impex_total_metatype)
# Switzerland imports many more fruits, vegetables and meats than it exports. The `animal_products` category is interesting, because import and export quantities are about equal. What would make the most sense is if Switzerland imports different animal products than it exports (rather than the same products being both imported and exported). Let's test this theory by looking at imports and exports for the subcategories of `animal_products`:
impex_total_animal_prods = (
impex_total.loc["animal_products"]
.groupby("subtype")
.sum()
.unstack()
)
plot_impex_animal_prods(impex_total_animal_prods)
# This graph brings more light to the topic. Cheese, a category of which there are many different flavors and consumers like variety, is both heavily imported and exported, likely giving consumers access to a wider variety of cheese types. Eggs, on the other hand, are nearly exclusively imported, while whey is mostly exported.
#
# The butter category of this graph is a good transition to the continuation of the analysis; just because butter is hardly imported nor exported does not mean the Swiss do not eat butter! Rather, imports and exports are only part of the broader picture since domestic production is another important consideration. One possible hypothesis for why butter is neither imported nor exported in large quantities is because domestic production is nearly equal to domestic consumption.
# *Swiss Consumption*
# Here we visualize the difference between the ratio of domestic vs. imported consumption
consumption = suisse.copy()
consumption = consumption[["domestic_consumption", "imported_consumption", "consumption"]]
# Compute ratio of domestic vs. imported consumption per metatype
consumption_by_type = consumption.groupby("type").sum()
consumption_by_type["domestic_consumption"] = 100 * (
consumption_by_type.domestic_consumption / consumption_by_type.consumption
)
consumption_by_type["imported_consumption"] = 100 * (
consumption_by_type.imported_consumption / consumption_by_type.consumption
)
consumption_by_type.drop("consumption", axis=1, inplace=True)
consumption_by_type = consumption_by_type.reindex(
["fruits", "vegetables", "cereals", "animal_products", "meat", "seafood"]
)
consumption_by_type
plot_consumption_per_type(consumption_by_type)
# *Bipartite graph between continents and meta food groups, weighted by amounts of food imported from those continents*
bipartite_food_continent(impex)
# **[View bipartite graph here](../docs/_includes/sankey_diagram.html)**
# ***
percentage = glimpse()
print(str(round(percentage)) +
"% of Switzerland's total imports come from countries within a 1000km radius.")
# ***
meta_food_CO2 = impex_countries.total_kg_CO2e.groupby("type").sum()
meta_food_consumption = impex_countries.swiss_consumption.groupby('type').sum()
meta_normalized_CO2 = meta_food_CO2 / meta_food_consumption
plot_carbon_cost_norm_by_consumption(meta_normalized_CO2)
# Let's now look at a plot which compares the inherent carbon cost of a food item (that is, its global average emission value for production multiplied by how much of that item is consumed in Switzerland) with its Swiss-specific carbon cost (the inherent cost plus the transport carbon emissions).
meat = impex_countries.xs("meat")
vegetables = impex_countries.xs("vegetables")
meat_emissions = preprocess_for_stacked_plot(meat, emissions)
vegetables_emissions = preprocess_for_stacked_plot(vegetables, emissions)
plot_stacked(meat_emissions)
plot_stacked(vegetables_emissions)
veggies_transport, veggies_inherent = preprocess_for_double_stacked_plot(vegetables_emissions)
plot_stacked_double(veggies_transport, veggies_inherent)
# *Fruits analysis*
#
# Let's look at fruits in detail
# import amounts, amounts consumed in Switzerland, and kg of CO2e by each fruit
fruits_co2 = (
impex_countries.iloc[impex_countries.index.get_level_values(0) == "fruits"]
.groupby("subtype")
.sum()
)
fruits_co2.head()
plot_fruits_co2(fruits_co2)
# We observe that exotic fruits have slightly higher carbon output, which is not surprising as they're usually imported from distant countries.
# *Considering seasonality...*
# Let's compare tomatoes grown by different methods. For example, tomatoes grown outside of a greenhouse have a lower carbon impact than tomatoes grown in greenhouses. But how does the carbon cost of transport compare to the carbon cost of these higher intensity methods? We can quantify the comparison by looking at the carbon costs of the amount of tomatoes consumed in Switzerland if they were grown in Switzerland or grown in other countries, then transported. For each country, we will compare three growing methods. The countries included in this analysis are the warmer neighboring countries of Spain and Morocco, the biggest tomato trading partners of Switzerland within and outside of Europe, respectively. (We chose to pick one country outside of Europe even though it is the third biggest tomato trading partner, to account for a higher distance of transport.)
# +
# first quantify how many tomatoes come from these two countries, just to show how large of tomato trading partners they are
spain_produced_swiss_consumed_tomatoes = int(
impex_countries.loc["vegetables"]
.loc["tomatoes"]
.loc["Europe"]
.loc["Spain"]
.swiss_consumption
)
morocco_produced_swiss_consumed_tomatoes = int(
impex_countries.loc["vegetables"]
.loc["tomatoes"]
.loc["Africa"]
.loc["Morocco"]
.swiss_consumption
)
total_tomatoes = int(
impex_countries.loc["vegetables"].loc["tomatoes"].sum().swiss_consumption
)
spain_tomatoes_perc = round(
spain_produced_swiss_consumed_tomatoes * 100 / total_tomatoes, 1
)
morocco_tomatoes_perc = round(
morocco_produced_swiss_consumed_tomatoes * 100 / total_tomatoes, 1
)
print(
"Spain produces {}% of swiss-consumed tomatoes and Morocco produces {}%, out of a total of {} kg of tomatoes.".format(
spain_tomatoes_perc, morocco_tomatoes_perc, total_tomatoes
)
)
# -
tomatoes = load_tomatoes(suisse, transport, transportCO2, countries)
tomatoes
# In this dataframe, the columns are:
# * `Median`: the inherent median carbon emission value for tomatoes grown by a given production method (in kg CO2e)
# * `produced_in_CH_month`: kg of CO2e produced if the amount of tomatoes consumed in Switzerland were all produced within Switzerland by that method of production
# * `imported_from_ES_month`: kg of CO2e produced if the amount of tomatoes consumed in Switzerland were all produced in Spain by that method of production and then imported
# * `imported_from_MO_month`: same as `imported_from_ES_month` except produced in and imported from Morocco
# * `transport_percent`: the percentage of the CO2e which results from the transport cost from Spain, in comparison to the inherent production cost
tomatoes_for_graph = tomatoes.drop(columns=["transport_percent", "Median"])
tomatoes_for_graph = tomatoes_for_graph.rename(
columns={
"produced_in_CH_month": "Switzerland",
"imported_from_ES_month": "Spain",
"imported_from_MO_month": "Morocco",
}
)
tomatoes_for_graph = tomatoes_for_graph.stack()
plot_tomatoes(tomatoes_for_graph)
# *Carbon emissions by food group*
histogram_df = (
impex_countries.groupby(["type", "subtype"])
.sum()
.drop(columns=["imports", "kg_CO2e_transport", "kg_CO2e_transport_via_nl"])
)
histogram_df["kg_CO2_per_kg_food"] = (
histogram_df["total_kg_CO2e"] / histogram_df["swiss_consumption"]
)
histogram_df.drop(columns=["swiss_consumption", "total_kg_CO2e"], inplace=True)
histogram_df.head()
animal_prods = np.array(histogram_df.loc["animal_products"])
meat_seafood = np.array(histogram_df.loc[["seafood", "meat"]])
vegan_sourced = np.array(histogram_df.loc[["fruits", "vegetables", "cereals"]])
plot_emmissions_foodgroup(animal_prods, meat_seafood, vegan_sourced)
| src/project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cesar-claros/curso_ciencia_datos/blob/main/test2_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="O6TDYdItCleV" outputId="24bffadd-684c-4fdc-829c-100fb3cee8c4"
# !git clone https://github.com/cesar-claros/curso_ciencia_datos.git
# %cd curso_ciencia_datos/
# + [markdown] id="0_azc99djZP7"
# # 1. Import Libraries
# + id="8_Jf0tsVqI4t"
import numpy as np
import pandas as pd
from sklearn import linear_model
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from matplotlib.cm import get_cmap
# + [markdown] id="b4Am7wKPjh4c"
# # 2. Load dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="MM42czesqS5f" outputId="4c1c8558-e79d-4e1f-be20-e7fb7bc88f0e"
diabetes_data = pd.read_csv("DiabetesData1.csv", sep=",")
diabetes_data
# + [markdown] id="76UUy5W2rGUU"
# ## Set all numeric data types to float32
# + id="_ki7sejbqqza"
df = diabetes_data.select_dtypes(exclude=object).astype("float32")
X = df.drop(['Resultado'], axis=1)
Y = df[['Resultado']]
# + [markdown] id="S2VySM-pjwKq"
# # 3. Exploration of raw data
# + [markdown] id="oUuGE-5kkChk"
# ## 3.1. Correlation of numerical atributes
# + colab={"base_uri": "https://localhost:8080/", "height": 354} id="2Z4OAkf_A8nm" outputId="8857fbb5-98eb-4adf-f1e6-b3f429643760"
ax = sns.heatmap(df.corr(), annot=True, lw=1)
# + [markdown] id="pfy0yKVhkOyH"
# ## 3.3. Countplot
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="lvhr1d5TkQz_" outputId="434d6508-f271-4957-be58-ac412dbe4ff4"
ax = sns.countplot(x="Resultado", data=df)
# + [markdown] id="Zq7ZmCZbkYXE"
# ### This dataset is unbalanced. Therefore, the feature selection and training processes have to be carried out keeping the proportions given by the class label.
# + [markdown] id="nwRAjV7P-Uhy"
# # 4. Feature selection
#
# + [markdown] id="Qfai8bZXE2AD"
# ## 4.1. Stratified K-fold Cross-validated Recursive Feature Elimination
#
# + id="ex6E5fWDFAZI"
n_features = 3
estimator = linear_model.LogisticRegression(max_iter=500)
selector = RFE(estimator=estimator, n_features_to_select=n_features)
skf = StratifiedKFold(n_splits=10)
ranking_list = []
for train_index, test_index in skf.split(X, Y):
X_train, Y_train = X.iloc[train_index], Y.iloc[train_index]
selector = selector.fit(X_train, Y_train.values.ravel())
ranking_list.append(selector.ranking_)
ranking_mat = np.array(ranking_list)
ranking_mat = np.where(ranking_mat==1, ranking_mat, 0)
# + colab={"base_uri": "https://localhost:8080/", "height": 383} id="Lu-uIeZmQMSQ" outputId="012f3468-6768-4be8-8711-ad49ba464ac5"
ranking_df = pd.DataFrame(ranking_mat, columns=X.columns.values)
mask = (-ranking_df.sum(axis=0)).argsort()[:n_features]
ax = ranking_df.sum(axis=0).plot.bar()
ax.set_ylabel('counts')
ax.set_title('Number of times a feature get selected')
# + colab={"base_uri": "https://localhost:8080/"} id="xf1v1UEPvycK" outputId="61aabbb8-5276-4161-c41c-235cfaa1f0ba"
print('Selected features: ', X.columns[mask.values].values)
# + [markdown] id="NBuH4NYjq5Ym"
# # Question a)
# Crea un modelo de regresión logística a 3 variables para predecir la diabetes. Justifica el porqué de tu elección.
# + [markdown] id="7IuS6vGmnE_T"
# We chose 'Embarazos', 'BMI', and 'Funcion_predictora' because they are the ones that get selected more consistently across splits of a stratified K-fold cross validation procedure using RFE as feature selection mechanism.
# + [markdown] id="qb9OU6iqvgd5"
# # 5. Training the model
# + colab={"base_uri": "https://localhost:8080/"} id="HUDS1VCX21ZK" outputId="fe213b1c-92e4-41b9-ce73-2b72801e2589"
X_fs = X[X.columns[mask.values].values]
model_lr = linear_model.LogisticRegression(max_iter=500)
model_lr.fit(X_fs,Y.values.ravel())
# + id="J-h2ae6Ivjpw"
# cross-validation procedure
model = linear_model.LogisticRegression(max_iter=500)
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1)
n_scores = cross_val_score(model, X_fs, Y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
# + [markdown] id="Qza4ad8EyLPY"
# # Question b)
#
# Realiza una validacion cruzada para justificar la certitud de tu
# trabajo. ¿Cuál es el promedio del score de tu validación?
# + colab={"base_uri": "https://localhost:8080/"} id="6y4yM_3NwVF9" outputId="10d14d07-dc9b-48da-d572-de600af8bf7d"
print('Mean accuracy: %.3f:' % (np.mean(n_scores)))
# + [markdown] id="b4B3j_xU3O0J"
# # 6. Evaluate the model
# + [markdown] id="m9NPgZd43mNm"
# ## 6.1. Load test instances
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="ThbSTCfK3o8T" outputId="bb0e7892-1d81-4dc7-bf5e-8f6022a07bb0"
diabetes_pred = pd.read_csv("DiabetesPredicciones.csv", sep=",")
diabetes_pred
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="dQMJlDWK3uaR" outputId="bdaf1eee-e32d-4cda-db19-f5f43b41a87e"
# Pick the features that were selected previously
X_pred = diabetes_pred[X.columns[mask.values].values]
Y_pred = model_lr.predict(X_pred)
Y_pred_df = pd.DataFrame(Y_pred, columns=['Resultado'])
ax = sns.countplot(x="Resultado", data=Y_pred_df)
for p in ax.patches:
ax.annotate(f'\n{p.get_height()}', (p.get_x()+0.4, p.get_height()), ha='center', va='top', color='white', size=18)
plt.show()
# + [markdown] id="VthVivFR6Inv"
# # Question c)
#
# En el documento "DiabetesPredicciones.csv" estan una lista de
# pacientes que potencialmente pueden tener esta enfermedad, haz
# una predicción con este conjunto. ¿Cuántos de ellos dan un
# resultado positivo?
# + colab={"base_uri": "https://localhost:8080/"} id="OgZ9mKUN6HlN" outputId="372a2fc2-72b3-4887-9f34-f8d6798dcc80"
print(Y_pred_df.value_counts())
print('\n',Y_pred_df.value_counts()[1].values[0], 'personas dan un resultado positivo')
# + [markdown] id="I-6Ocit97lAJ"
# ## 6.2. Load test labels
# + colab={"base_uri": "https://localhost:8080/"} id="ZvHdx25t7VhU" outputId="bbc09a88-1332-4ba4-b9f7-927cb3f1e342"
diabetes_res = pd.read_csv("DiabetesResultados.csv", sep=",")
diabetes_res.value_counts()
# + [markdown] id="NyYHUVD5_QDl"
# # Question d)
#
# Si los resultados de los pacientes anteriores se encuentran en el dataset "DiabetesResultados.csv". ¿Cuál es la exactitud, la precisión y la taza de error de la predicción ?
# + colab={"base_uri": "https://localhost:8080/"} id="6RYBOqGo-x_T" outputId="b0d23a3a-544b-41d2-f579-4925cc20d415"
tn, fp, fn, tp = confusion_matrix(diabetes_res,Y_pred_df ).ravel()
print('Exactitud=',(tp/(tp+fp)))
print('Precisión=',(tp/(tp+fn)))
print('Taza de error= %.3f%%' % (100*(1-(tp+tn)/(tp+tn+fp+fn))))
| test2_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import logging
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
# # Introduction
# this is a tutorial for how to implement an sklearn compatible gaussian naive bayes classifier from scratch. Above, we've imported the libraries we'll need to implement the classifier as well as the built in sklearn GaussianNB classifier we'll use as a reference for the performance of our classifier. If you haven't already, make sure you run the cell above to import the libraries.
#
# # Getting Some Data
# We'll use data from the ECE 523 github page for this demo. The datafiles can be found [here](https://github.com/gditzler/UA-ECE-523-Sp2018/tree/master/data). Note that in the function below we use the path to the raw data on github for downloading.
#
def load_course_data(dataset_name = "abalone.csv"):
"""
loads data from the ece_523 github page.
:param dataset_name: filename of the dataset to load (default is "abalone.csv"
:return: numpy array containing the csv data
"""
data_url_format = "https://raw.githubusercontent.com/gditzler/UA-ECE-523-Sp2018/master/data/{0:s}"
data = pd.read_csv(data_url_format.format(dataset_name))
return data.values
# ## Try it Out!
# let's just load the default abalone.csv dataset and take a look!
myData = load_course_data()
print(myData.shape)
print(myData)
# So we can see we get a numpy array containing 4176 rows of data in 9 columns. The last column contains the labels. Typically SKLearn Classifiers take the features and labels as separate arguments, so let's write a function to break the data down.
def split_data(data):
"""
splits a n-by-m numpy array containing n datapoints with m - 1 features, and a last column containing labels
into data (X) and labels (Y)
:param data: the numpy array to split up
:return:
"""
Y = data[:, -1]
X = data[:, 0:data.shape[1] - 1]
return X, Y
# Now let's see how it works...
data, labels = split_data(myData)
# let's split the data into training and testing data
X, X_t, Y, Y_t = train_test_split(
data,
labels,
test_size=.5,
random_state=42
)
print(data.shape)
print(data)
print(X.shape)
print(X)
print(Y.shape)
print(Y)
# # Getting Started with Naive Bayes
# for a Naive Bayes Classifier $P(Y_k | X) = \frac{P(Y_k) \times P(X | Y_k)}{P(X)}$ or the $Posterior = \frac{prior \times liklihood}{evidence}$ now $evidence$ can be hard to find, but fortunately, it's also the least important, since it's the same for all $Y_i$, and therefore scales all of the posterior probablities proportionally. So we can simply leave it out. That leaves us with $P(Y_k | X) \propto P(Y_k) \times P(X | Y_k)$ as the fundamental equation of our naive bayes classifier.
#
# ## Calculating the prior probability
# to create our classifier, it's easiest to start by calculating the prior probabilities for all of the classes in the dataset. $P(Y_k) = \frac{number\ of\ occurences\ of\ Y_k\ in\ the\ dataset}{total\ size\ of\ the\ data\ set}$
#
# ## Calculating the liklihood or class conditional probability
# This is where the "Gaussian" and "Naive" from the name of the classifier come into play. First, we're going to assume the class conditional probabilities are normally distributed, second we're going to assume that they're independent from one another. This makes our posterior probability equation $P(Y_k | X) \propto P(Y_k)\prod_{i=1}^{n-features}P(X_i | Y_k)$. For model training purposes this means for each class we're going to want the mean and variance for each feature that is part of a data point in that class.
def fit_model(X,Y):
# store the number of classes
n_classes = np.unique(Y).size
print("n_classes:{0:d}".format(n_classes))
# store the number of occurences of each class in the training data
class_count = np.zeros(n_classes)
for label in range(0,n_classes):
class_count[label] = Y[Y == label].size
print(("class_count:" + np.array_str(class_count)))
# compute the prior probability of each class from the training data
prior = np.zeros(n_classes)
for idx, value in enumerate(class_count):
prior[idx] = value / np.sum(class_count)
print("prior:" + np.array_str(prior))
print("sum or prior = {0:f}".format(np.sum(prior)))
n_features = X.shape[1]
print("n_features:{0:d}".format(n_features))
# compute the mean and variance of each feature per class
mean = np.zeros((n_classes, n_features))
variance = np.zeros(mean.shape)
for lbl in range(0, n_classes):
# group training data by label
X_lbl = X[Y == lbl, :]
mean[lbl] = np.mean(X_lbl, axis = 0)
variance[lbl] = np.var(X_lbl, axis = 0)
print("mean:\n" + np.array_str(mean))
print("variance:\n" + np.array_str(variance))
return n_classes, class_count, prior, n_features, mean, variance
# Once again, let's give it a try. I've thrown some print statements into the function so you can see what's going on.
n_classes, class_count, prior, n_features, mean, variance = fit_model(X,Y)
# # Making Predictions
# to make predictions with this model, we'll be finding the label with the maximum posterior probability as produced by the equation above. Howver, we'll be making one small tweak. We'll be using the log probabilities. This is useful for preventing floating point underflows, because otherwise we could potentially be multiplying a lot of numbers that are $<1$ together. Note that this makes the equation for the posterior: $log(P(Y_k | x)) \propto log(P(Y_k)) + \sum_{i=1}^{n-features}log(P(X_i | Y_k)$. To start with we'll need a function to compute $\sum log(P(X_i|Y_k)$ from the gaussian distribution.
# +
def log_liklihood(X_test, n_classes, mean, variance):
class_log_liklihoods = []
for idx in np.arange(0, n_classes):
liklihood = - 0.5 * np.sum(np.log(2 * np.pi * variance[idx,:]))
liklihood += -0.5 * np.sum(np.square((X - mean[idx, :])) / ( variance[idx,:]) , 1)
class_log_liklihoods.append(liklihood)
class_log_liklihoods = np.array(class_log_liklihoods).T
print("log_liklihood:class_log_liklihood:\n" + np.array_str(class_log_liklihoods))
return class_log_liklihoods
l_liklihood = log_liklihood(X_t, n_classes, mean, variance)
# -
# finally, we'll combine these with the posterior to get our prediction probabilities
# +
def predict_log_proba(prior, log_liklihood):
log_prior = np.log(prior)
log_probas = np.zeros(log_liklihood.shape)
for idx, row in enumerate(log_liklihood):
log_probas[idx] = np.add(row, log_prior)
print("log_probas\n" + np.array_str(log_probas))
return log_probas
log_probas = predict_log_proba(prior, l_liklihood)
# -
# ## Final Prediction:
# the last thing to do is select the column corresponding to the label with the maximum posterior probability...
# +
def predict(log_probas):
predictions = np.zeros(log_probas.shape[0])
for idx, row in enumerate(log_probas):
predictions[idx] = np.argmax(row)
print("predictions: " + np.array_str(predictions))
return predictions
predict(log_probas)
# -
# # An SKLearn Classifier Comparison
# Below I've wrapped the functions from the examples above into an sklearn compatible classifier, complete with "score" function that can be used in k-fold validation.
# +
from sklearn.base import BaseEstimator, ClassifierMixin
class GaussianNaiveBayesFromScratch(BaseEstimator, ClassifierMixin):
def __init__(self):
self.n_classes = None
self.prior = None
self.n_features = None
self.class_count = None
self.mean = None
self.variance = None
def fit(self, X, Y):
# store the number of classes
self.n_classes = np.unique(Y).size
#logging.debug("GaussianNaiveBayesFromScratch:n_classes:{0:d}".format(self.n_classes))
# store the number of occurences of each class in the training data
self.class_count = np.zeros(self.n_classes)
for label in range(0,self.n_classes):
self.class_count[label] = Y[Y == label].size
#logging.debug(("GaussianNaiveBayesFromScratch:class_count:" + np.array_str(self.class_count)))
# compute the prior probability of each class from the training data
self.prior = np.zeros(self.n_classes)
for idx, value in enumerate(self.class_count):
self.prior[idx] = value / np.sum(self.class_count)
#logging.debug("GaussianNaiveBayesFromScratch:prior:" + np.array_str(self.prior))
#logging.debug("GaussianNaiveBayesFromScratch:sum or prior = {0:f}".format(np.sum(self.prior)))
self.n_features = X.shape[1]
#logging.debug("GaussianNaiveBayesFromScratch:n_features:{0:d}".format(self.n_features))
# compute the mean and variance of each feature per class
self.mean = np.zeros((self.n_classes, self.n_features))
self.variance = np.zeros(self.mean.shape)
for lbl in np.arange(0, self.n_classes):
# group training data by label
X_lbl = X[Y == lbl, :]
mean = np.mean(X_lbl, axis = 0)
variance = np.var(X_lbl, axis = 0)
self.mean[lbl] = mean
self.variance[lbl] = variance
#logging.debug("GaussianNaiveBayesFromScratch:mean:\n" + np.array_str(self.mean))
#logging.debug("GaussianNaiveBayesFromScratch:variance:\n" + np.array_str(self.variance))
return self
def _log_liklihood(self, X):
class_log_liklihoods = []
for idx in np.arange(0, self.n_classes):
liklihood = - 0.5 * np.sum(np.log(2 * np.pi * self.variance[idx,:]))
liklihood += -0.5 * np.sum(np.square((X - self.mean[idx, :])) / ( self.variance[idx,:]) , 1)
class_log_liklihoods.append(liklihood)
class_log_liklihoods = np.array(class_log_liklihoods).T
#logging.debug("GaussianNaiveBayesFromScratch:_log_liklihood:class_log_liklihood:\n" + np.array_str(class_log_liklihoods))
return class_log_liklihoods
@property
def _log_prior(self):
log_prior = np.log(self.prior)
#logging.debug("GaussianNaiveBayesFromScratch:_log_prior:log_prior\n" + np.array_str(log_prior))
return log_prior
def predict_log_proba(self, X):
log_liklihood = self._log_liklihood(X)
log_probas = np.zeros(log_liklihood.shape)
for idx, row in enumerate(log_liklihood):
# this is the sum log(p(Y)) + log(p(X | Y))
log_probas[idx] = np.add(row , self._log_prior)
#logging.debug("GaussinaNaiveBayesFromScratch:predict_log_probas:log_probas:\n" + np.array_str(log_probas))
return log_probas
def predict(self, X):
prob = self.predict_log_proba(X)
predictions = np.zeros(prob.shape[0])
for idx, row in enumerate(prob):
predictions[idx] = np.argmax(row)
return predictions
def score(self, X, Y):
predicted = self.predict(X)
correct = predicted[predicted == Y]
return correct.shape[0] / Y.shape[0]
# -
# let's see how this compares to the built in SKLearn Naive Bayes Classifier.
# +
from sklearn.model_selection import cross_val_score
def compare_classifiers(data_set = "abalone.csv"):
data = load_course_data(data_set)
data, labels = split_data(data)
GNB_from_scratch = GaussianNaiveBayesFromScratch()
GNB_fs_scores = cross_val_score(GNB_from_scratch, data, labels, cv=5)
print("GaussianNaiveBayesFromScratch 5-fold cross validation score: {0:f}".format(np.mean(GNB_fs_scores)))
sk_GNB = GaussianNB()
sk_scores = cross_val_score(sk_GNB, data, labels, cv=5)
print("GaussianNB 5-fold cross validation score: {0:f}".format(np.mean(sk_scores)))
# -
# Try out some of the different data-sets on the github page by changing out the filename in the command below
compare_classifiers("adult_test.csv")
| code/Bayes/Naive Bayes Classifier from Scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# # Classificador utilizando Redes Neurais Convolucionais
# ***
# ## Instalação dos pacotes
# ***
# pacote de inteligência artificial
import tensorflow as tf
# pacote de gerenciamento de conjuntos de dados
import pandas as pd
# pacote de procedimentos numéricos
import numpy as np
# pacote de visualização gráfica
import matplotlib.pyplot as plt
import seaborn as sn
# pacote com funções para auxiliar o aprendizado de máquina
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# ## Organizando o Dataframe
# ***
# conjunto de dados de treinamento, validação e teste
dataframe1 = pd.read_csv('../csv_data/braille_character1.csv', delimiter = ',',
header = 0, usecols = ['Path', 'Labels'])
dataframe2 = pd.read_csv('../csv_data/braille_character2.csv', delimiter = ',',
header = 0, usecols = ['Path', 'Labels'])
dataframe3 = pd.read_csv('../csv_data/braille_character3.csv', delimiter = ',',
header = 0, usecols = ['Path', 'Labels'])
# organizando a referência aos arquivos do primeiro dataframe
dataframe1_files = dataframe1['Path']
for i in range(0, len(dataframe1_files)):
dataframe1_files[i] = '../files/images1/'+dataframe1_files[i]
dataframe1['Path'] = dataframe1_files
# organizando a referência aos arquivos do segundo dataframe
dataframe2_files = dataframe2['Path']
for i in range(0, len(dataframe2_files)):
dataframe2_files[i] = '../files/images2/'+dataframe2_files[i]
dataframe2['Path'] = dataframe2_files
# organizando a referência aos arquivos do terceiro dataframe
dataframe3_files = dataframe3['Path']
for i in range(0, len(dataframe3_files)):
dataframe3_files[i] = '../files/images3/'+dataframe3_files[i]
dataframe3['Path'] = dataframe3_files
# criando um dataframe com todas as imagens selecionadas
dataframe = pd.concat((dataframe1, dataframe2, dataframe3))
# aplicando um codifiação one-hot nos rótulos das imagens
df = pd.get_dummies(dataframe['Labels'])
df['Path'] = dataframe['Path']
df['Labels'] = dataframe['Labels']
# ## Separando as Imagens de Treinamento, Validação e Teste
#
# separando o dataframe de forma estratificada entre imagens de treinamento, validação e teste
train_df, validation_df = train_test_split(df, train_size = 0.8, random_state = 42,
stratify = df['Labels'], shuffle = True)
validation_df, test_df = train_test_split(validation_df, test_size = 0.5, random_state = 42,
stratify = validation_df['Labels'], shuffle = True)
# visualizando a dimensão dos dados
print('Dimensão dos Dados de Treinamento: ', train_df.shape)
print('Dimensão dos Dados de Validação:', validation_df.shape)
print('Dimensão dos Dados de Teste:', test_df.shape)
# organizando a legenda do eixo x no gráfico de barra
xticks = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
xticks = np.array(xticks)
# +
# gráfico de visualização da quantidade de imagens de treinamento
labels = train_df['Labels']
labels = np.array(labels)
count = np.zeros((26,1))
for i in range(0, len(labels)):
count[labels[i]-1] += 1
plt.figure(figsize=(15, 10))
plt.bar(xticks, np.squeeze(count), color ='green', width = 0.7)
plt.title('Quantidade de imagens em cada um dos caracteres (Treinamento)')
plt.xlabel('Caracteres')
plt.ylabel('Quantidade de imagens')
# +
# gráfico de visualização da quantidade de imagens de validação
labels = validation_df['Labels']
labels = np.array(labels)
count = np.zeros((26,1))
for i in range(0, len(labels)):
count[labels[i]-1] += 1
plt.figure(figsize=(15, 10))
plt.bar(xticks, np.squeeze(count), color ='green', width = 0.7)
plt.title('Quantidade de imagens em cada um dos caracteres (Validação)')
plt.xlabel('Caracteres')
plt.ylabel('Quantidade de imagens')
# +
# gráfico de visualização da quantidade de imagens de teste
labels = test_df['Labels']
labels = np.array(labels)
count = np.zeros((26,1))
for i in range(0, len(labels)):
count[labels[i]-1] += 1
plt.figure(figsize=(15, 10))
plt.bar(xticks, np.squeeze(count), color ='green', width = 0.7)
plt.title('Quantidade de imagens em cada um dos caracteres (Teste)')
plt.xlabel('Caracteres')
plt.ylabel('Quantidade de imagens')
# -
# ## Organizando o Gerador de Imagens
#
def binarize(image):
'''
Garantindo que as imagens estaram binarizada no limite discreto [0,1]
'''
image = image / np.max(image)
return image
# criando um array com os rótulos
labels = []
for i in range(0, 26):
labels.append(i+1)
# organizando o gerador de imagens para os dados de treinamento
train_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function = binarize)
train_generator = train_gen.flow_from_dataframe(dataframe = train_df,
directory='',
x_col = 'Path',
y_col = labels,
target_size = (33,24),
color_mode = 'grayscale',
class_mode = 'raw',
batch_size = 128,
shuffle = True, seed = 42,
interpolation = 'constant')
# +
# visualizando as imagens de treinamento
x, y = train_generator[0]
fig, axs = plt.subplots(nrows = 4, ncols = 8, figsize = (15, 10))
for i in range (0, 4):
for j in range(0, 8):
axs[i][j].imshow(x[i], vmin = np.min(x[i]), vmax = np.max(x[i]), cmap = 'gray')
# -
# organizando o gerador de imagens para os dados de validação
validation_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function = binarize)
validation_generator = validation_gen.flow_from_dataframe(dataframe = validation_df,
directory='',
x_col = 'Path',
y_col = labels,
target_size = (33,24),
color_mode = 'grayscale',
class_mode = 'raw',
batch_size = 128,
shuffle = True, seed = 42,
interpolation = 'constant')
# +
# visualizando as imagens de validação
x, y = validation_generator[0]
fig, axs = plt.subplots(nrows = 4, ncols = 8, figsize = (15, 10))
for i in range (0, 4):
for j in range(0, 8):
axs[i][j].imshow(x[i], vmin = np.min(x[i]), vmax = np.max(x[i]), cmap = 'gray')
# -
# organizando o gerador de imagens para os dados de teste
test_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function = binarize)
test_generator = test_gen.flow_from_dataframe(dataframe = test_df,
directory='',
x_col = 'Path',
y_col = labels,
target_size = (33,24),
color_mode = 'grayscale',
class_mode = 'raw',
batch_size = 128,
shuffle = True, seed = 42,
interpolation = 'constant')
# +
# visualizando as imagens de teste
x, y = test_generator[0]
fig, axs = plt.subplots(nrows = 4, ncols = 8, figsize = (15, 10))
for i in range (0, 4):
for j in range(0, 8):
axs[i][j].imshow(x[i], vmin = np.min(x[i]), vmax = np.max(x[i]), cmap = 'gray')
# -
# ## Organizando a Arquitetura da Rede
# +
# definindo a arquitetura da rede neural convolucional
input = tf.keras.layers.Input(shape = (33, 24, 1))
x = tf.keras.layers.Conv2D(filters = 32, kernel_size = (2,2), strides = (1,1),
activation = tf.nn.relu, name = 'Conv2D-1')(input)
x = tf.keras.layers.MaxPooling2D(pool_size = (2,2), name = 'Pooling-1')(x)
x = tf.keras.layers.Conv2D(filters = 64, kernel_size = (2,2), strides = (1, 1),
activation = tf.nn.relu, name = 'Conv2D-2')(x)
x = tf.keras.layers.MaxPooling2D(pool_size = (2, 2), name = 'Pooling-2')(x)
x = tf.keras.layers.Flatten(name = 'Flatten-1')(x)
x = tf.keras.layers.Dense(units = 128, activation = tf.nn.relu, name = 'Dense-1')(x)
x = tf.keras.layers.Dense(units = 64, activation = tf.nn.relu, name = 'Dense-2')(x)
output = tf.keras.layers.Dense(units = 26, activation = tf.nn.softmax)(x)
model = tf.keras.models.Model(inputs = input, outputs = output)
# -
# visualizando um resumo da arquitetura de rede definida
model.summary()
# compilação do modelo
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc'])
# ## Definição dos Callbacks
# salvando os melhores pesos obtidos no treinamento com base nos dados de validação
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath = 'weights.h5', monitor = 'val_acc',
verbose = 1, save_best_only = True, save_weights_only = True,
mode = 'max')
# armazenando o histórico de treinamento em um arquivo .csv
csv_logger = tf.keras.callbacks.CSVLogger(filename = 'history.csv', separator = ",",
append=False)
# ## Treinamento da Rede Neural Convolucional
# épocas utilizadas durante o treinamento
epochs = 15
# passos por época nos dados de treinamento
stes_per_epochs = len(train_df) // 128
# passos por época nos dados de validação
validation_steps_per_epoch = (len(validation_df) // 128) + 1
# criando um array de callbacks
callbacks = [checkpoint, csv_logger]
# treinando o modelo com os dados de treinamento e validação
model.fit(train_generator, steps_per_epoch = stes_per_epochs, epochs = epochs,
verbose = 1, callbacks = callbacks, validation_data = validation_generator,
validation_steps = validation_steps_per_epoch, )
# ## Avaliando o Desempenho da Rede com os dados de teste
# carregando no modelo o melhor peso obtido durante o treinamento
model.load_weights('weights.h5')
# +
# analisando o desempenho da rede com os dados de teste
evaluate = model.evaluate(test_generator)
print('Acurácia com os Dados de Teste:', evaluate[1] * 100)
print('Função de Perda com os Dados de Teste:', evaluate[0] * 100)
# +
# obtendo os dados do gerador de imagens
x1, y1 = test_generator[0]
x2, y2 = test_generator[1]
x3, y3 = test_generator[2]
# concatenando os dados obtidos do gerados
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
# -
# realizando a predição com os dados de teste
predict_y = model.predict(x)
# plotando a matriz de confusão
confusion_ma = confusion_matrix(np.argmax(y, axis = 1), np.argmax(predict_y, axis=1))
df_cm = pd.DataFrame(confusion_ma, index = [i for i in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"],
columns = [i for i in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"])
plt.figure(figsize = (20,14))
sn.set(font_scale=1.4)
sn.heatmap(df_cm, annot=True, annot_kws={"size": 20})
plt.title('Matriz de Confusão', size = 30)
# carregando os dados obtidos durante a etapa de treinamento
history = pd.read_csv('history.csv', delimiter = ',')
# plotando as informações obtidas durante o treinamento
plt.figure(figsize = (15, 10))
plt.plot(np.arange(0, epochs), history['acc'] * 100, label = 'Acurácia - Treinamento')
plt.plot(np.arange(0, epochs), history['val_acc'] * 100, label = 'Acurácia - Validação')
plt.plot(np.arange(0, epochs), history['loss'] * 100, label = 'Erro - Treinamento')
plt.plot(np.arange(0, epochs), history['val_loss'] * 100, label = 'Erro - Validação')
plt.legend()
plt.title('Histórico de Treinamento/Validação', size = 25)
plt.xlabel('Épocas')
# ## Salvando as informações obtidas
# salvando os dados para plotar uma melhor matriz de confusão
np.savetxt('confusion_ma.txt', confusion_ma)
np.savetxt('y_true.txt', np.argmax(y, axis = 1))
np.savetxt('y_predict.txt', np.argmax(predict_y, axis=1))
# salvando o modelo obtido
model.save('obr-classifier')
| classifier-cnn/cnn_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fitting
# +
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import glob
from GNN import BinarizationGNN
# %matplotlib inline
# %load_ext autoreload
# %autoreload 1
# %aimport GNN
# -
def readdata(trainlabelpath):
trainlabel = list()
traingraph = list()
for path in trainlabelpath:
with open(path, 'r') as f:
trainlabel.append(int(f.readline()))
graphpath = path.replace('_label', '_graph')
graph = list()
with open(graphpath, 'r') as g:
g = list(g)
for l in g[1:]:
graph.append(list(map(int, l.split())))
traingraph.append(graph)
traingraph = np.array(traingraph)
return traingraph, trainlabel
TRAINLABELPATH = "../datasets/train/*_label.txt"
trainlabelpath = np.random.permutation(glob.glob(TRAINLABELPATH))
print(len(trainlabelpath))
# ### Use 1500 datas for train and 500 datas for validation
traingraph, trainlabel = readdata(trainlabelpath)
learngraph = traingraph[:1500]
learnlabel = trainlabel[:1500]
validgraph = traingraph[1500:]
validlabel = trainlabel[1500:]
# ### Check decay
# +
# %%time
gnn = BinarizationGNN(epoch=1, batch_size=10)
learn_loss = list()
valid_loss = list()
learn_prob = list()
valid_prob = list()
epoch = 200
learn_loss.append(gnn.loss(learngraph, learnlabel))
valid_loss.append(gnn.loss(validgraph, validlabel))
learn_prob.append(gnn.predict_prob(learngraph, learnlabel))
valid_prob.append(gnn.predict_prob(validgraph, validlabel))
for i in range(epoch):
print(f'epoch {i+1} start')
gnn.fit(learngraph, learnlabel)
learn_loss.append(gnn.loss(learngraph, learnlabel))
valid_loss.append(gnn.loss(validgraph, validlabel))
learn_prob.append(gnn.predict_prob(learngraph, learnlabel))
valid_prob.append(gnn.predict_prob(validgraph, validlabel))
# +
plt.style.use('ggplot')
plt.figure(figsize=(18, 5))
plt.subplot(1, 3, 1)
plt.plot(learn_loss, label='learning data')
plt.plot(valid_loss, label ='validation data')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Average loss')
plt.legend()
plt.subplot(1, 3, 2)
plt.plot(learn_loss[1:], label='learning data')
plt.plot(valid_loss[1:], label ='validation data')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Average loss')
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(learn_prob, label='learning data')
plt.plot(valid_prob, label='validation data')
plt.xlabel('epoch')
plt.ylabel('correct rate')
plt.title('Correct answer rate')
plt.legend()
plt.show()
print(f'learn loss: {learn_loss[-1]}')
print(f'valid loss: {valid_loss[-1]}')
print(f'learn prob: {learn_prob[-1] * 100} %')
print(f'valid prob: {valid_prob[-1] * 100} %')
# -
# ### ReLU / Swish
# +
# %%time
s_gnn = BinarizationGNN(epoch=1, batch_size=10, aggregate_activate_func='swish')
s_learn_loss = list()
s_valid_loss = list()
s_learn_prob = list()
s_valid_prob = list()
epoch = 200
s_learn_loss.append(s_gnn.loss(learngraph, learnlabel))
s_valid_loss.append(s_gnn.loss(validgraph, validlabel))
s_learn_prob.append(s_gnn.predict_prob(learngraph, learnlabel))
s_valid_prob.append(s_gnn.predict_prob(validgraph, validlabel))
for i in range(epoch):
print(f'epoch {i+1} start')
s_gnn.fit(learngraph, learnlabel)
s_learn_loss.append(s_gnn.loss(learngraph, learnlabel))
s_valid_loss.append(s_gnn.loss(validgraph, validlabel))
s_learn_prob.append(s_gnn.predict_prob(learngraph, learnlabel))
s_valid_prob.append(s_gnn.predict_prob(validgraph, validlabel))
# +
plt.style.use('ggplot')
plt.figure(figsize=(18, 5))
plt.subplot(1, 3, 1)
plt.plot(s_learn_loss, label='learning data')
plt.plot(s_valid_loss, label ='validation data')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Average loss by Swish')
plt.legend()
plt.subplot(1, 3, 2)
plt.plot(s_learn_loss[1:], label='learning data')
plt.plot(s_valid_loss[1:], label ='validation data')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Average loss by Swish')
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(s_learn_prob, label='learning data')
plt.plot(s_valid_prob, label='validation data')
plt.xlabel('epoch')
plt.ylabel('correct rate by Swish')
plt.title('Correct answer rate')
plt.legend()
plt.show()
print(f'learn loss: {s_learn_loss[-1]}')
print(f'valid loss: {s_valid_loss[-1]}')
print(f'learn prob: {s_learn_prob[-1] * 100} %')
print(f'valid prob: {s_valid_prob[-1] * 100} %')
# -
# ### Compare both
# +
plt.style.use('ggplot')
plt.figure(figsize=(18, 5))
plt.subplot(1, 3, 1)
plt.plot(learn_loss, label='ReLU learning')
plt.plot(valid_loss, label='ReLU validation')
plt.plot(s_learn_loss, label='Swish learning')
plt.plot(s_valid_loss, label ='Swish validation')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Average loss')
plt.legend()
plt.subplot(1, 3, 2)
plt.plot(learn_loss[1:], label='ReLU learning')
plt.plot(valid_loss[1:], label='ReLU validation')
plt.plot(s_learn_loss[1:], label='Swish learning')
plt.plot(s_valid_loss[1:], label ='Swish validation')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Average loss')
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(learn_prob, label='ReLU learning')
plt.plot(valid_prob, label='ReLU validation')
plt.plot(s_learn_prob, label='Swish learning')
plt.plot(s_valid_prob, label='Swish validation')
plt.xlabel('epoch')
plt.ylabel('correct rate')
plt.title('Correct answer rate')
plt.legend()
plt.show()
print(f'ReLU learn loss: {learn_loss[-1]}')
print(f'ReLU valid loss: {valid_loss[-1]}')
print(f'ReLU learn prob: {learn_prob[-1] * 100} %')
print(f'ReLU valid prob: {valid_prob[-1] * 100} %')
print(f'Swish learn loss: {s_learn_loss[-1]}')
print(f'Swish valid loss: {s_valid_loss[-1]}')
print(f'Swish learn prob: {s_learn_prob[-1] * 100} %')
print(f'Swish valid prob: {s_valid_prob[-1] * 100} %')
| src/fitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cluster Analysis
#
# The Iris dataset was used in <NAME>'s classic 1936 paper, The Use of Multiple Measurements in Taxonomic Problems, and can also be found on the [UCI](https://archive.ics.uci.edu/ml/datasets/iris) Machine Learning Repository.
#
# It includes three iris species with 50 samples each as well as some properties about each flower. One flower species is linearly separable from the other two, but the other two are not linearly separable from each other.
#
# The columns in this dataset are:
#
# - Id
# - SepalLengthCm
# - SepalWidthCm
# - PetalLengthCm
# - PetalWidthCm
# - Species
#
#
# ## Load and Inspect Data
#
# 1. Load the file for this week's analysis:
# ```
# iris.csv
# ```
# 2. Measure the correlation coefficient between the features
# - plot a correlation heatmap and/or a scatter matrix
#
#
# ## K-Means Cluster Analysis
#
# 1. Use K-Means clustering with a cluster count of k=3
# 2. Compare the results of the clustering to the actual labels
# 3. Evaluate the results using the following metrics:
# - Homogeneity
# - Completeness
# - V Measure
# - Silhouette
# 3. Use the elbow method to decide the optimal k (time permitting)
#
# ## Hierarchical Clustering
#
# Use 3 clusters to perform a Hierarchical clustering analysis. Run the cluster analysis using the following Linkage techniques:
# - **Ward**: minimize variance within clusters
# - **Complete**: minimize the maximum distances between pairs
# - **Average**: minimize average distances between points
# - **Single**: minimize distance between closest points from a pair of clusters
#
# ## Summarize Results
#
# Compare the results of all techniques.
import os
import pandas as pd
import numpy as np
import statsmodels.api as sm
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import prettytable
# %matplotlib inline
# ## Custom Evaluation Function
#
# You can use the following function to evaluate the accuracy of the model:
from sklearn import metrics
def evaluate(model, s, labels_true):
labels_pred = model.labels_
homogeneity = metrics.homogeneity_score(labels_true, labels_pred)
completeness = metrics.completeness_score(labels_true, labels_pred)
v_measure = metrics.v_measure_score(labels_true, labels_pred)
silhouette = metrics.silhouette_score(s,labels_pred)
pt = prettytable.PrettyTable(['metric','value'])
pt.add_row(['Homogeneity', homogeneity])
pt.add_row(['Completeness', completeness])
pt.add_row(['V Measure', v_measure])
pt.add_row(['Silhouette', silhouette])
print(pt)
d = {'homogeneity': homogeneity,
'completeness':completeness,
'v_measure':v_measure,
'silhouette':silhouette
}
return d
location = '../../data/'
files = os.listdir(location)
files
# ## Load and Inspect Data
#
# 1. Load the file for this week's analysis:
# ```
# iris.csv
# ```
# 2. Measure the correlation coefficient between the features
# - plot a correlation heatmap and/or a scatter matrix
df = pd.read_csv(location + 'iris.csv')
df.head()
df['class'] = df['class'].astype('category')
# ### Compute Correlation Matrix
#
# Take the following steps:
#
# 1. Create a dataframe ```s``` containing the raw data without a class label
# 2. Compute the correlation matrix rounded to 2 digits to produce a nicely annotated correlation plot
s = df[['sepal_length','sepal_width','petal_length','petal_width']]
C = round(s.corr(),2)
C
labels = list(C.columns.values)
labels
# +
import plotly.figure_factory as ff
fig = ff.create_annotated_heatmap(C.values, x=labels, y=labels,
colorscale=["blue", "white", "red"])
fig.show()
# -
# ### Scatter Matrix
#
# Produce a scatter matrix with color separation by class.
fig = px.scatter_matrix(df,
color='class',
title='Scatter Matrix of Iris Features')
fig.update_layout(
dragmode='select',
width=800,
height=800,
hovermode='closest',
)
fig.show()
# ## K-Means Cluster Analysis
#
# 1. Use K-Means clustering with a cluster count of k=3
# 2. Compare the results of the clustering to the actual labels
# 3. Use the elbow method to decide the optimal k
from sklearn.cluster import KMeans
k=3
model = KMeans(n_clusters=k).fit(s)
model.cluster_centers_
# ### Compare The Results to Actual Labels
#
# We'll place the centroids into a dataframe. Append that DataFrame to the data and plot a new scatter plot.
k_means_df = pd.DataFrame(model.cluster_centers_, columns=s.columns)
k_means_df
# ### Note
#
# The centroids appear quite distinct for each class.
k_means_df['class'] = ['k_center_0','k_center_1','k_center_2']
k_means_df
tmp = df.append(k_means_df)
tmp.tail()
fig = px.scatter_matrix(tmp,
color='class',
title='Scatter Matrix of Iris Features')
fig.update_layout(
dragmode='select',
width=800,
height=800,
hovermode='closest',
)
fig.show()
# ### Interpretation
#
# It looks like the following labels were inferred by the K-Means algorithm:
#
# - 0: <NAME>
# - 1: <NAME>
# - 2: <NAME>
# # Evaluate The Results
#
# 1. Homogeneity -- expect 0 to 1
# 2. Completeness -- expect 0 to 1
# 3. V Measure -- expect 0 to 1
# 4. Silhouette -- expect -1 to 1
labels_pred = model.predict(s)
labels_true = df['class'].values
# ### Homogeneity
#
# Each cluster contains only members of a single class:
metrics.homogeneity_score(labels_true, labels_pred)
# ### Completeness
metrics.completeness_score(labels_true, labels_pred)
# ### V Measure
#
# Balance between homogeneity and completeness
#
# $$
# v = \frac{\text{homogeneity} \times \text{completeness}}{\text{homogeneity} + \text{completeness}}
# $$
metrics.v_measure_score(labels_true, labels_pred, beta=1.0)
# ### Silhouette
#
# The silhouette looks at the ratio of
#
# $$
# s_i = \frac{\bar{b}_i - \bar{a}_i}{max(a_i,b_i)}
# $$
labels_true_code = df['class'].cat.codes.values
labels_true_code
metrics.silhouette_score(s,labels_pred)
# +
from yellowbrick.cluster import SilhouetteVisualizer
# Specify the features to use for clustering
features = list(s.columns)
X = s.values
# Instantiate the clustering model and visualizer
#model = KMeans(5, random_state=42)
visualizer = SilhouetteVisualizer(model, colors='yellowbrick')
visualizer.fit(X) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
# -
# # Interpretation
#
# If we recall the labels inferred above:
#
# - 0: <NAME>
# - 1: <NAME>
# - 2: <NAME>
#
# It looks like the Iris Setosa is the most clearly delineated category.
#
#
# ### Use the Evaluate Function
k_means_eval = evaluate(model, s, labels_true)
# # Elbow method
# # Inertia
#
# Distance within cluster sum of squares
#
# $$
# \sum_{i=0}^n min(||x_i - \mu_j||^2)
# $$
#
# +
K = list(range(2,10))
I = [] # Inertia
for k in K:
kmeans = KMeans(n_clusters=k).fit(s)
labels_pred = kmeans.predict(s)
I += [kmeans.inertia_]
ch = metrics.calinski_harabasz_score(s, kmeans.labels_)
CH += [ch]
# -
fig = px.line(x=K,y=I,
labels={'x':'Cluster Count (k)',
'y':'Inertia'},
title='Elbow Method Using Inertia')
fig.show()
# # Hierarchical Clustering
#
# Use 3 clusters to perform a Hierarchical clustering analysis. Run the cluster analysis using the following Linkage techniques:
#
# - **Ward**: minimize variance within clusters
# - **Complete**: minimize the maximum distances between pairs
# - **Average**: minimize average distances between points
# - **Single**: minimize distance between closest points from a pair of clusters
#
from sklearn.cluster import AgglomerativeClustering
# +
results = []
k=3
techniques = ['ward','single','average','complete']
for technique in techniques:
model = AgglomerativeClustering(n_clusters=k,
linkage=technique).fit(s)
print('-------\n%s\nbin count: %s' % (technique, np.bincount(model.labels_)))
fig = px.scatter_matrix(s,
color=model.labels_,
color_continuous_scale=['red','green'],
title='Scatter Matrix of Iris Features<br>Linkage: %s' % technique)
fig.update_layout(
dragmode='select',
width=600,
height=600,
hovermode='closest',
)
fig.show()
print('------\nresult\n%s' % technique)
r = evaluate(model,s, df['class'].values)
r['technique'] = technique
results += [r]
print('======')
# -
# # Summarize
#
# Let's summarize the performance of the various models:
k_means_eval['technique'] = 'K-Means'
results += [k_means_eval]
res = pd.DataFrame(results)
res
fig = px.bar(res, x='technique',y='v_measure')
fig.show()
| 2_post_class/week_7/7_0_cluster_done.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
fifa = pd.read_csv("D:/Work/Project FIFA/fifaset1.csv",low_memory=False)
fifa.head()
fifa.shape
fifa.info()
fifa.loc[fifa['VER'] == 'Icon']
fifa.loc[fifa['RAT'] == '90']
fifa.loc[fifa['WR'] == 'H \ H']
| Personal Projects/FIFA21/FIFA Player Lookout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
import matplotlib.patches as mpatches
import lightgbm as lgb
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
df_train = pd.read_csv("statistics_10_train.csv" , sep = ',')
df_test = pd.read_csv("statistics_10_test.csv" , sep = ',')
X_train = df_train[['Kurtosis', 'Impulse factor', 'RMS', 'Margin factor', 'Skewness',
'Shape factor', 'Peak to peak', 'Crest factor']].values
y_train = df_train['Tipo'].values
X_test = df_test[['Kurtosis', 'Impulse factor', 'RMS', 'Margin factor', 'Skewness',
'Shape factor', 'Peak to peak', 'Crest factor']].values
y_test = df_test['Tipo'].values
from hyperopt import fmin, atpe, tpe, STATUS_OK, STATUS_FAIL, Trials
from hyperopt import hp
from hyperopt import space_eval
class HPOpt(object):
def __init__(self, x_train, x_test, y_train, y_test):
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
def process(self, fn_name, space, trials, algo, max_evals):
fn = getattr(self, fn_name)
try:
result = fmin(fn=fn, space=space, algo=algo, max_evals=max_evals, trials=trials)
except Exception as e:
return {'status': STATUS_FAIL,
'exception': str(e)}
return result, trials
def lgb_clas(self, para):
clf = lgb.LGBMClassifier(**para['clas_params'])
return self.train_clf(clf, para)
def train_clf(self, clf, para):
clf.fit(self.x_train, self.y_train,
eval_set=[(self.x_train, self.y_train), (self.x_test, self.y_test)],
verbose = False, early_stopping_rounds = 20)
pred = clf.predict(self.x_test)
loss = para['loss_func'](self.y_test, pred)
return {'loss': loss, 'status': STATUS_OK}
# +
from sklearn.metrics import accuracy_score
lgb_clas_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.001, 0.5, 0.001)),
'max_depth': hp.choice('max_depth', np.arange(5, 10, 1, dtype=int)),
'min_child_weight': hp.choice('min_child_weight', np.arange(0, 10, 1)),
'min_data_in_leaf': hp.choice('min_data_in_leaf', np.arange(0, 10, 1)),
'subsample': hp.choice('subsample', np.arange(0.1, 1, 0.05)),
'n_estimators': hp.choice('n_estimators', np.arange(10, 200, 10, dtype=int)),
'num_leaves': hp.choice('num_leaves', np.arange(5, 51, 1, dtype=int)),
}
lgb_para = dict()
lgb_para['clas_params'] = lgb_clas_params
lgb_para['loss_func' ] = lambda y, pred: accuracy_score(y, pred)# squared = False)
lgb_para["max_evals"] = 100
# +
# Optimización
obj = HPOpt(X_train, X_test, y_train, y_test)
lgb_opt = obj.process(fn_name='lgb_clas', space=lgb_para, trials=Trials(), algo=tpe.suggest, max_evals=lgb_para["max_evals"])
parametros = space_eval(lgb_clas_params, lgb_opt[0])
# -
clf = lgb.LGBMClassifier()
clf.set_params(**parametros)
clf.fit(X_train, y_train)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
pred = clf.predict(X_test)
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
clf = lgb.LGBMClassifier(n_estimators = 100, learning_rate = 0.01, min_data_in_leaf = 0)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
target_names = ['Inner', 'Outer', 'Healthy']
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred, target_names = target_names))
pred_train = clf.predict(X_train)
print(confusion_matrix(y_train, pred_train))
print(classification_report(y_train, pred_train, target_names = target_names))
| FailurePrediction/VariableRotationalSpeed/MachineLearningModels/lightgbm_hyperopt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mariajessica-garcia/CPEN-21A-ECE-2-1/blob/main/Demo1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="3LoaNUuw2nDL"
# ##Intro to Python Programming
# + colab={"base_uri": "https://localhost:8080/"} id="8fCuvjho2q7L" outputId="6d53841a-e3fe-4439-b42a-25e7b4145efc"
#Python Indentation
if 5>2:
print("five is greater than two")
# + [markdown] id="_euQCBPK76TA"
# ##Python Variable
#
# + colab={"base_uri": "https://localhost:8080/"} id="UaMfMFO27-HA" outputId="aa473e1f-6c75-4dd5-8e37-0ad8d774ca5c"
x=1
a, b=0, 1
a,b,c="zero","one","two"
print(x)
print(a)
print(b)
print(c)
# + colab={"base_uri": "https://localhost:8080/"} id="PeRUG_VL3Qa3" outputId="4a52f727-1d80-4eee-90bb-eb7a761df50d"
d="Maria" #This is a string
D="Jessica"
print(d)
e="Meri"
print(e)
print(D)
# + colab={"base_uri": "https://localhost:8080/"} id="SVKnAwfK4bUx" outputId="d1b76d27-a97f-4642-f3fa-df9483509d58"
print(type(d)) #This is a type function
print(type(x))
# + [markdown] id="v5vabO6U4mc9"
# ##Casting
# + colab={"base_uri": "https://localhost:8080/"} id="OgiSjq0T4u9s" outputId="882dc075-b244-4fea-f951-76f3cbae2893"
f=float(4)
print(f)
g=float(5)
g=int(5)
print(g)
# + [markdown] id="FfI9sY9k47gy"
# ##Multiple Variables in One Value
# + colab={"base_uri": "https://localhost:8080/"} id="8mRIG0Qz4-rJ" outputId="74a6d238-bcac-477f-e3d7-e592cf1aeebc"
x=y=z="four"
print(x)
print(y)
print(z)
# + colab={"base_uri": "https://localhost:8080/"} id="QZzb4PwM5L5J" outputId="ae2f6f82-2a3e-4cfb-a73e-33b98ddc3477"
x="enjoying"
print("Python Programming is" " " + x)
# + [markdown] id="8Y1zg5Qe5LYw"
# ##Operation in Python
# + colab={"base_uri": "https://localhost:8080/"} id="7mC-sSaN5Vvw" outputId="d46af8a4-abc5-4345-99d5-6ebab6418e15"
x=5
y=7
x+= y #This is the same as x = x + y
print(x+y)
print(x*y)
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="7lTXvvhG5k5Y" outputId="517a16c9-0b63-40bd-fac3-e01f7989c3f0"
x=5
y=7
not(x>y or y==x)
# + colab={"base_uri": "https://localhost:8080/"} id="VLp48HZ25vcj" outputId="828c1f64-1f60-4542-d46d-4c62425aec37"
x is not y
# + colab={"base_uri": "https://localhost:8080/"} id="I67ftVA-5sY3" outputId="8ef8b0e4-0a37-4d17-ea3b-22ecd0a5437c"
x is y
| Demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import datasets
from sklearn.datasets import make_classification
from dpm.models import SoftmaxRegression
from dpm.distributions import Categorical
from dpm.visualize import plot_stats
import torch
iris = datasets.load_iris()
X = iris.data
y = iris.target
X.shape, y.shape
model = SoftmaxRegression(input_dim=4, output_shape=3)
stats = model.fit(X, y, epochs=5000)
plot_stats(stats)
model(X)[0].shape
y_pred = model.predict(X)
(y_pred.numpy() == y).mean()
y_pred
x, y = make_classification(n_samples=500, n_features=50, n_informative=45, n_classes=3)
x = torch.tensor(x).float()
y = torch.tensor(y).view(-1).float()
x.shape, y.shape
model = SoftmaxRegression(input_dim=x.size(1), output_shape=[3])
stats = model.fit(x, y, epochs=5000)
plot_stats(stats)
y_pred = model.predict(x)
(y_pred.float() == y.float()).float().mean()
y_pred
y
| Notebooks/Models/SoftmaxRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (AnomalyCableDetection)
# language: python
# name: anomalycabledetection
# ---
# # Time Series Analysis
# +
from pathlib import Path
import os
os.chdir(Path(os.getcwd()).parent)
# -
from AnomalyCableDetection.load import Loader, Preprocessor
from AnomalyCableDetection.stl import CableSTL
from AnomalyCableDetection.plot import *
# ### Load dataset
loader = Loader(iqr=True, pre_version=-1)
dates = loader.get_dates()
tension_dict = loader.load_raw_dataset()
first_df = tension_dict[dates[0]]
cable_names = first_df.columns.tolist()
length = len(first_df.index.to_list())
# ### STL
worker = CableSTL(3600, 0, length, 'raw')
dates = ['2006-05-13', '2006-05-14', '2006-05-15', '2006-05-16']
# +
cable = 'SJS08'
for date in dates:
tension_df = tension_dict[date]
stl_df = worker.get_cable_stl(tension_df, cable, date)
# +
cable = 'SJX08'
for date in dates:
tension_df = tension_dict[date]
stl_df = worker.get_cable_stl(tension_df, cable, date)
# -
| example/STL/2. stl decomposition - SJ08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Basic Tensorflow
#
# This notebook will familiarize you with the **basic concepts**
# of Tensorflow. Each of these concepts could be extended into
# its own notebook(s) but because we want to do some actual
# machine learning later on, we only briefly touch on each of
# the concepts.
#
# Table of Contents:
#
# - [ 1 The Graph](#1-The-Graph)
# - [ 2 The Session](#2-The-Session)
# - [ 3 The Shapes](#3-The-Shapes)
# - [ 4 Variables – bonus!](#4-Variables-%E2%80%93-bonus!)
import tensorflow as tf
# Always make sure you are using running the expected version.
# There are considerable differences between versions...
# We tested this with version 1.4.X
tf.__version__
# # 1 The Graph
#
# Most important concept with Tensorflow : There is a Graph to which
# tensors are attached. This graph is never specified explicitly but
# has important consequences for the tensors that are attached to it
# (e.g. you cannot connect two tensors that are in different graphs).
#
# The python variable "tensor" is simply a reference to the actual
# tensor in the Graph. More precisely, it is a reference to an operation
# that will produce a tensor (in the Tensorflow Graph, the nodes are
# actually operations and the tensors "flow" on the edges between
# the nodes...)
#
# Important note : There is a new simplification of the execution theme
# presented in this notebook :
# [Tensorflow Eager](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/1_basics.ipynb)
# -- But since this feature is currently in alpha state and most code
# still uses the graph/session paradigm, we won't use the new execution
# style (anyways, if you can use the old style the new simplified style
# should be a welcome and straight forward simplification...)
# There is always a "graph" even if you haven't defined one.
tf.get_default_graph()
# Store the default graph in a variable for exploration.
graph = tf.get_default_graph()
# Ok let's try to get all "operations" that are currently defined in this
# default graph.
# Remember : Placing the caret at the end of the line and typing <tab> will
# show an auto-completed list of methods...
graph.get_
# +
# Let's create a separate graph:
graph2 = tf.Graph()
# Try to predict what these statements will output.
print tf.get_default_graph() == graph
print tf.get_default_graph() == graph2
with graph2.as_default():
print tf.get_default_graph() == graph
print tf.get_default_graph() == graph2
# +
# We define our first TENSOR. Fill in your favourite numbers
# You can find documentation to this function here:
# https://www.tensorflow.org/versions/master/api_docs/python/tf/constant
# Try to change data type and shape of the tensor...
favorite_numbers = tf.constant([13, 22, 83])
print favorite_numbers
# (Note that this only prints the "properties" of the tensor
# and not its actual value -- more about this strange behavior
# in the section "The Session".)
# +
# Remember that graph that is always in the background? All the
# tensors that you defined above have been duefully attached to the
# graph by Tensorflow -- check this out:
# (Also note how the operations are named by default)
graph.get_operations() # Show graph operations.
# -
# Note that above are the OPERATIONS that are the nodes in the
# graph (in our the case the "Const" operation creates a constant
# tensor). The tensors themselves are the EDGES between the nodes,
# and their name is usually the operation's name + ":0".
favorite_numbers.name
# +
# Let's say we want to clean up our experimental mess...
# Search on Tensorflow homepage for a command to "reset" the graph:
# https://www.tensorflow.org/api_docs/
# YOUR ACTION REQUIRED:
# Find the right Tensorflow command to reset the graph.
tf.
tf.get_default_graph().get_operations()
# -
# Important note: "resetting" didn't clear our original graph but
# rather replace it with a new graph:
tf.get_default_graph() == graph
# +
# Because we cannot define operations across graphs, we need to
# redefine our favorite numbers in the context of the new
# graph:
favorite_numbers = tf.constant([13, 22, 83])
# +
# Now let's do some computations. Actually we don't really execute
# any computation yet (see next section "The Session" for that), but
# rather define how we intend to do computation later on...
# We first multiply our favorite numbers with our favorite multiplier:
favorite_multiplier = tf.constant(7)
# Do you have an idea how to write below multiplication more succinctly?
# Try it! (Hint: operator overloading)
favorite_products = tf.multiply(favorite_multiplier, favorite_numbers)
print 'favorite_products.shape=', favorite_products.shape
# Now we want to add up all the favorite numbers to a single scalar
# (0-dim tensor).
# There is a Tensorflow function for this. It starts with "reduce"...
# (Use <tab> auto-completion and/or tensorflow documentation)
# YOUR ACTION REQUIRED:
# Find the correct Tensorflow command to sum up the numbers.
favorite_sum = tf.
print 'favorite_sum.shape=', favorite_sum.shape
# +
# Because we really like our "first" favorite number we add this number
# again to the sum:
favorite_sum_enhanced = favorite_sum + favorite_numbers[0]
# See how we used Python's overloaded "+" and "[]" operators?
# You could also define the same computation using Tensorflow
# functions only:
# favorite_sum_enhanced = tf.add(favorite_sum, tf.slice(favorite_numbers, [0], [1]))
# +
# Of course, it's good practice to avoid a global invisible graph, and
# you can use a Python "with" block to explicitly specify the graph for
# a codeblock:
with tf.Graph().as_default():
within_with = tf.constant([1, 2, 3], name='within_with')
print 'within with:'
print tf.get_default_graph()
print within_with
print tf.get_default_graph().get_operations()
print '\noutside with:'
print tf.get_default_graph()
print within_with
print tf.get_default_graph().get_operations()
# You can execute this cell multiple times without messing up any graph.
# Note that you won't be able to connect the tensor to other tensors
# because we didn't store a reference to the graph of the with statement.
# +
# %%writefile _derived/2_visualize_graph.py
# (Written into separate file for sharing between notebooks.)
# Let's visualize our graph!
# Tip: to make your graph more readable you can add a
# name="..." parameter to the individual Ops.
# src: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb
import numpy as np
import tensorflow as tf
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# +
# (Load code from previous cell -- make sure to have executed above cell first.)
# %run -i _derived/2_visualize_graph.py
show_graph(tf.get_default_graph())
# -
# # 2 The Session
#
# So far we have only setup our computational Graph -- If you want to actually
# *do* any computations, you need to attach the graph to a Session.
# The only difference to a "normal" session is that the interactive
# session registers itself as default so .eval() and .run() methods
# know which session to use...
interactive_session = tf.InteractiveSession()
# Hooray -- try printing other tensors of above to see the intermediate
# steps. What is their type and shape ?
print favorite_sum.eval()
# +
# Note that the session is also connected to a Graph, and if no Graph
# is specified then it will connect to the default Graph. Try to fix
# the following code snippet:
graph2 = tf.Graph()
with graph2.as_default():
graph2_tensor = tf.constant([1])
with tf.Session() as sess:
print graph2_tensor.eval()
# -
# Providing input to the graph: The value of any tensor can be overwritten
# by the "feed_dict" parameter provided to Session's run() method:
a = tf.constant(1)
b = tf.constant(2)
a_plus_b = tf.add(a, b)
print interactive_session.run(a_plus_b)
print interactive_session.run(a_plus_b, feed_dict={a: 123000, b:456})
# It's good practice not to override just any tensor in the graph, but to
# rather use "tf.placeholder" that indicates that this tensor must be
# provided through the feed_dict:
placeholder = tf.placeholder(tf.int32)
placeholder_double = 2 * placeholder
# YOUR ACTION REQUIRED:
# Modify below command to make it work.
print placeholder_double.eval()
# # 3 The Shapes
#
# Another basic skill with Tensorflow is the handling of shapes. This
# sounds pretty simple but you will be surprised by how much time of
# your Tensorflow coding you will spend on massaging Tensors in the
# right form...
#
# Here we go with a couple of exercises with increasing difficulty...
#
# Please refer to the Tensorflow documentation
# [Tensor Transformations](https://www.tensorflow.org/versions/master/api_guides/python/array_ops#Shapes_and_Shaping)
# for useful functions.
tensor12 = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
print tensor12
batch = tf.placeholder(tf.int32, shape=[None, 3])
print batch
# Tensor must be of same datatype. Try to change the datatype
# of one of the tensors to fix the ValueError...
multiplier = tf.constant(1.5)
# YOUR ACTION REQUIRED:
# Fix error below.
tensor12 * multiplier
# What does tf.squeeze() do? Try it out on tensor12_3!
tensor12_3 = tf.reshape(tensor12, [3, 2, 2, 1])
# YOUR ACTION REQUIRED:
# Checkout the effects of tf.squeeze()
print tensor12_3.shape
# +
# This cell is about accessing individual elements of a 2D tensor:
batch = tf.constant([[1, 2, 3, 0, 0],
[2, 4, 6, 8, 0],
[3, 6, 0, 0, 0]])
# Note that individual elements have lengths < batch.shape[1] but
# are zero padded.
lengths = tf.constant([3, 4, 2])
# The FIRST elements can be accessed by using Python's
# overloaded bracket indexing OR the related tf.slice():
print 'first elements:'
print batch[:, 0].eval()
print tf.slice(batch, [0, 0], [3, 1]).eval()
# -
# Accessing the LAST (non-padded) element within every sequence is
# somewhat more involved -- you need to specify both the indices in
# the first and the second dimension and then use tf.gather_nd():
# YOUR ACTION REQUIRED:
# Define provide the correct expression for indices_0 and indices_1.
indices_0 =
indices_1 =
print 'last elements:'
print tf.gather_nd(batch, tf.transpose([indices_0, indices_1])).eval()
# Below you have an integer tensor and then an expression that is set True
# for all elements that are odd. Try to print those elements using the
# operations tf.where() and tf.gather()
numbers = tf.range(1, 11)
odd_condition = tf.logical_not(tf.equal(0, tf.mod(numbers, 2)))
# YOUR ACTION REQUIRED:
# Define provide the correct expression for odd_indices and odd_numbers.
odd_indices =
odd_numbers =
print odd_numbers.eval()
# +
# "Dynamic shapes" : This feature is mainly used for variable size batches.
# "Dynamic" means that one (or multiple) dimensions are not specified
# before graph execution time (when running the graph with a session).
batch_of_pairs = tf.placeholder(dtype=tf.int32, shape=(None, 2))
# Note how the "unknown" dimension displays as a "?".
print batch_of_pairs
# +
# So we want to reshape the batch of pairs into a batch of quadruples.
# Since we don't know the batch size at runtime we will use the special
# value "-1" (meaning "as many as needed") for the first dimension.
# (Note that this wouldn't work for batch_of_triplets.)
# YOUR ACTION REQUIRED:
# Complete next line.
batch_of_quadruples = tf.reshape(batch_of_pairs,
# Test run our batch of quadruples:
print batch_of_quadruples.eval(feed_dict={
batch_of_pairs: [[1,2], [3,4], [5,6], [7,8]]})
# +
# Dynamic shapes cannot be accessed at graph construction time;
# accessing the ".shape" attribute (which is equivalent to the
# .get_shape() method) will return a "TensorShape" with "Dimension(None)".
batch_of_pairs.shape
# i.e. .shape is a property of every tensor that can contain
# values that are not specified -- Dimension(None)
# -
# i.e. first dimension is dynamic and only known at runtime
batch_of_pairs.shape[0].value == None
# +
# The actual dimensions can only be determined at runtime
# by calling tf.shape() -- the output of the tf.shape() Op
# is a tensor like any other tensor whose value is only known
# at runtime (when also all dynamic shapes are known).
batch_of_pairs_shape = tf.shape(batch_of_pairs)
batch_of_pairs_shape.eval(feed_dict={
batch_of_pairs: [[1, 2]]
})
# i.e. tf.shape() is an Op that takes a tensor (that might have
# a dynamic shape or not) as input and outputs another tensor
# that fully specifies the shape of the input tensor.
# +
# So you think shapes are easy, right?
# Well... Then here we go with a real-world shape challenge!
#
# (You probably won't have time to finish this challenge during
# the workshop; come back to this later and don't feel bad about
# consulting the solution...)
#
# Imagine you have a recurrent neural network that outputs a "sequence"
# tensor with dimension [?, max_len, ?], where
# - the first (dynamic) dimension is the number of elements in the batch
# - the second dimension is the maximum sequence length
# - the third (dynamic) dimension is the number of number per element
#
# The actual length of every sequence in the batch (<= max_len) is also
# specified in the tensor "lens" (length=number of elements in batch).
#
# The task at hand is to extract the "nth" element of every sequence.
# The resulting tensor "last_elements" should have the shape [?, ?],
# matching the first and third dimension of tensor "sequence".
#
# Hint: The idea is to reshape the "sequence" to "partially_flattened"
# and then construct a "idxs" tensor (within this partially flattened
# tensor) that returns the requested elements.
#
# Handy functions:
# tf.gather()
# tf.range()
# tf.reshape()
# tf.shape()
lens = tf.placeholder(dtype=tf.int32, shape=(None,))
max_len = 5
sequences = tf.placeholder(dtype=tf.int32, shape=(None, max_len, None))
# YOUR ACTION REQUIRED:
# Find the correct expression for below tensors.
batch_size =
hidden_state_size =
idxs =
partially_flattened =
last_elements =
sequences_data = [
[[1,1], [1,1], [2,2], [0,0], [0,0]],
[[1,1], [1,1], [1,1], [3,3], [0,0]],
[[1,1], [1,1], [1,1], [1,1], [4,4]],
]
lens_data = [3, 4, 5]
# Should output [[2,2], [3,3], [4,4]]
last_elements.eval(feed_dict={sequences: sequences_data, lens: lens_data})
# -
# # 4 Variables – bonus!
#
# So far all our computations have been purely stateless. Obviously,
# programming become much more fun once we add some state to our code...
# Tensorflow's **variables** encode state that persists between calls to
# `Session.run()`.
#
# The confusion with Tensorflow and variables comes from the fact that we
# usually "execute" the graph from within Python by running some nodes of
# the graph -- via `Session.run()` -- and that variable assignments are also
# encoded through nodes in the graph that only get executed if we ask the
# value of one of its descendants (see explanatory code below).
#
# Tensorflow's overview of
# [variable related functions](https://www.tensorflow.org/versions/r1.0/api_guides/python/state_ops#Variables),
# the
# [variable HOWTO](https://www.tensorflow.org/versions/r1.0/programmers_guide/variables),
# and the
# [variable guide](https://www.tensorflow.org/programmers_guide/variables).
#
# And finally some notes on [sharing variables](https://www.tensorflow.org/api_guides/python/state_ops#Sharing_Variables).
counter = tf.Variable(0)
increment_counter = tf.assign_add(counter, 1)
with tf.Session() as sess:
# Something is missing here...
# -> Search the world wide web for the error message...
# YOUR ACTION REQUIRED:
# Add a statement that fixes the error.
print increment_counter.eval()
print increment_counter.eval()
print increment_counter.eval()
# Same conditions apply when we use our global interactive session...
interactive_session.run([tf.global_variables_initializer()])
print increment_counter.eval()
# Execute this cell multiple times and note how our global interactive
# sessions keeps state between cell executions.
print increment_counter.eval()
# +
# Usually you would create variables with tf.get_variable() which makes
# it possible to "look up" variables later on.
# For a change let's not try to fix a code snippet but rather to make it
# fail:
# 1. What happens if the block is not wrapped in a tf.Graph()?
# 2. What happens if reuse= is not set?
# 3. What happens if dtype= is not set?
with tf.Graph().as_default():
with tf.variable_scope('counters'):
counter1 = tf.get_variable('counter1', initializer=1)
counter2 = tf.get_variable('counter2', initializer=2)
counter3 = tf.get_variable('counter3', initializer=3)
with tf.Session() as sess:
sess.run([tf.global_variables_initializer()])
print counter1.eval()
with tf.variable_scope('counters', reuse=True):
print tf.get_variable('counter2', dtype=tf.int32).eval()
| 60_Advanced_TensorFlow/workshops/extras/amld/notebooks/exercises/2_tf_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="EyNkbpW7ouEf"
#
# <td>
# <a target="_blank" href="https://labelbox.com" ><img src="https://labelbox.com/blog/content/images/2021/02/logo-v4.svg" width=256/></a>
# </td>
#
#
#
# -
# ----
# # Model Diagnostics Guide
#
#
# Throughout the process of training your machine learning (ML) model, you may want to investigate your model's failures in order to understand which areas need improvement. Looking at an error analysis after each training iteration can help you understand whether you need to revise your annotations, make your ontology more clear, or create more training data that targets a specific area.
# Labelbox now offers a Model Diagnostics tool that analyzes the performance of your model's predictions in a single interface.
# With Model Diagnostics, you can:
# * Inspect model behavior across experiments
# * Adjust model hyperparameters and visualize model failures
# * Use the Python SDK to create the analysis pipeline
#
# ## How it works
#
# Configuring Model Diagnostics is all done via the SDK. We have created a Google colab notebook to demonstrate this process. The notebook also includes a section that leverages MAL in order to quickly create ground truth annotations.
# An Experiment is a specific instance of a model generating output in the form of predictions.
# In Labelbox, the `Model` object represents your ML model and it is what you'll be performing experiments on. It references a set of annotations specified by an ontology.
# The `Model Run` object represents the experiment itself. It is a specific instance of a `Model` with preconfigured hyperparameters (training data). You can upload inferences across each `Model Run`, filter by IoU score, and compare your model's predictions against the annotations from your training data.
# + [markdown] id="1cPUzODPjf0r"
# ## Not ready to try with your model
#
# For an end-to-end example with an existing dataset check out this [notebook](https://colab.research.google.com/drive/1ZHCd0rWqsX4_sNaOq_ZQkdrHKEWAsrnU)
#
#
#
# + [markdown] id="stupid-court"
# # Steps
# 1. Make sure you are signed up for the beta. If not navigate here https://labelbox.com/product/model-diagnostics
# 2. Select a project
# 3. Exports labels
# 4. Upload labels and predictions for Diagnostics
# ------
# + [markdown] id="subsequent-magic"
# ## Environment Setup
# * Install dependencies
# + id="voluntary-minister"
# !pip install "labelbox[data]"
# + id="wooden-worship"
# Run these if running in a colab notebook
COLAB = "google.colab" in str(get_ipython())
# + [markdown] id="latter-leone"
# * Import libraries
# + id="committed-richards"
import uuid
import numpy as np
from tqdm import notebook
import csv
import ndjson
import os
from labelbox import Client, LabelingFrontend, MALPredictionImport, OntologyBuilder
from labelbox.data.metrics.iou import data_row_miou
from labelbox.data.serialization import NDJsonConverter
from labelbox.data.annotation_types import (
ScalarMetric,
LabelList,
Label,
ImageData,
MaskData,
Mask,
Polygon,
Line,
Point,
Rectangle,
ObjectAnnotation
)
# + [markdown] id="alternate-promotion"
# * Configure client
# + id="economic-chase"
API_KEY = None
client = Client(api_key = API_KEY)
# + [markdown] id="blessed-venture"
# ## Select a project
# + id="SzMRhPV4J5Bb"
project_id = None
# + id="qF_pOaoDhINR"
project = client.get_project(project_id)
ontology = project.ontology()
# + [markdown] id="hwtnJRmMJlPO"
# ## Export Labels
# + id="1kbmVPSxJaHw"
MAX_LABELS = 2000
# we have a limit of 2000 labels
labels = [l for idx, l in enumerate(project.label_generator()) if idx < MAX_LABELS]
# + [markdown] id="dated-burden"
# ## Create Predictions
# * Loop over data_rows, make predictions, and annotation types
# + id="asian-savings"
predictions = LabelList()
for label in notebook.tqdm(labels):
annotations = []
image = label.data
### --- replace (start) --- ###
# Build a list of annotation objects from your model inferences
# image.value is just the numpy array representation of the image
prediction = model.predict(image.value)
# Iterate of segmentation channels or instances depending on your model architecture ( or both )
for instance, (xmin, ymin, xmax, ymax), seg, class_idx in prediction:
class_name = class_names.get(class_idx)
# Construct the right annotation value (pick one of the following) and append to list of annotations
# See annotation types notebooks for more on how to construct these objects
# https://github.com/Labelbox/labelbox-python/tree/develop/examples/annotation_types
value = Polygon(points = [Point(x = x, y = y) for x,y in instance])
value = Rectangle(start = Point(x = xmin, y = ymin), end = Point(x=xmax, y=ymax))
value = Point(x=x, y =y)
value = Line(points = [Point(x = x, y = y) for x,y in instance])
value = Mask(mask = MaskData.from_2D_arr(seg * grayscale_color), color = (grayscale_color,)* 3)
annotations.append(ObjectAnnotation(name =class_name, value = value))
### --- replace (end) --- ###
predictions.append(Label(data = image, annotations = annotations))
# -
# ## Prepare for upload
# * Add any mising urls or references to labelbox (data row ids)
signer = lambda _bytes: client.upload_data(content=_bytes, sign=True)
predictions.add_url_to_masks(signer) \
.add_url_to_data(signer) \
.assign_feature_schema_ids(OntologyBuilder.from_project(project)) \
.add_to_dataset(dataset, client.upload_data)
# + [markdown] id="smoking-catering"
# ## MEA
#
# Throughout the process of training your machine learning (ML) model, you may want to investigate your model’s failures in order to understand which areas need improvement. Looking at an error analysis after each training iteration can help you understand whether you need to revise your annotations, make your ontology more clear, or create more training data that targets a specific area.
#
#
#
#
#
# 1. Create a model
# * Think of this as a model that you want to perform experiments on
# 2. Create a model run
# * Think of this as a single experiment for a particular model.
# * E.g. this model run is for an instance of a model with particular hyperparameters
# 3. Select the ground truth annotations for analysis
# 4. Compute metrics
# 4. Upload model predictions and metrics to labelbox
# + id="mental-minnesota"
lb_model = client.create_model(name = f"{project.name}-model", ontology_id = project.ontology().uid)
lb_model_run = lb_model.create_model_run("0.0.0")
lb_model_run.upsert_labels([label.uid for label in labels])
# + id="committed-fairy"
label_lookup = {label.data.uid : label for label in labels}
for pred in predictions:
label = label_lookup.get(pred.data.uid)
if label is None:
# No label for the prediction..
continue
score = data_row_miou(label, pred)
if score is None:
continue
pred.annotations.append(
ScalarMetric(value = score)
)
# + id="anonymous-addition"
upload_task = lb_model_run.add_predictions(f'mea-import-{uuid.uuid4()}', NDJsonConverter.serialize(predictions))
# + id="imAIXxCV_fG_"
upload_task.wait_until_done()
upload_task.state
# -
# ### Open Model Run
for idx, annotation_group in enumerate(lb_model_run.annotation_groups()):
if idx == 5:
break
print(annotation_group.url)
| examples/model_diagnostics/model_diagnostics_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Essential Operations of pandas
import pandas as pd
# #### 1. Read External Data
# +
# Reading a comma-separated file
df_gpus = pd.read_csv('All_GPUs.csv')
df_gpus.head()
# +
# Reading an Excel spreadsheet
df_sales = pd.read_excel('RegionSales.xlsx')
df_sales.head()
# -
# #### 2. Create Series
# +
# Create a Series from an iterable
series_integers = pd.Series(range(10))
# Create a Series from a dictionary object
squares = {x: x*x for x in range(1, 5)}
series_squares = pd.Series(squares, name='Squares')
series_squares
# -
# #### 3. Construct DataFrame
# +
# Create a DataFrame using a dictionary of list
# Keys of dictionary become column names of DataFrame
dict_data = {'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}
df_data = pd.DataFrame(dict_data)
df_data
# +
# Create a DataFrame from a list of lists
# Each inner list becomes a row of the DataFrame
list_data = [[1, 4, 7], [2, 5, 8], [3, 6, 9]]
df_data = pd.DataFrame(list_data)
df_data
# -
# #### 4. Overview of a DataFrame
# How many rows and columns
df_sales.shape
# Display first 5 rows
df_sales.head()
# Display last 5 rows
df_sales.tail()
# Display random 5 rows
df_sales.sample(5)
# Get information on column data types and item counts
df_sales.info()
# Get descriptive stats of numeric values in dataset
df_sales.describe()
# #### 5. Rename Columns
# Rename columns using a dictionary
df_sales.rename({'OrderDate': 'Order_Date', 'UnitPrice': 'Unit_Price', 'TotalPrice': 'Total_Price'}, axis=1)
# - `axis=1` specifies that you are renaming columns
#
#
# - You can assign the dictionary to the `columns` argument alternatively.
#
#
# - The `rename` function creates a new DataFrame by default. Use `inplace=True` argument to rename the DataFrame inplace.
# +
df_sales.rename(columns={'Order_Date': 'OrderDate', 'Unit_Price': 'UnitPrice', 'Total_Price': 'TotalPrice'}, inplace=True)
df_sales.head()
# -
# #### 6. Sort Data
df_sales.sort_values(by=['Product','TotalPrice']).head(20)
# - The `sort_values` function sorts by rows (`axis=0`) by default.
#
#
# - The `sort_values` function creates a new sorted DataFrame object. For sorting to persist in the original DataFrame, use the `inplace=Ture` argument.
#
#
# - By default, sort order is ascending for all sorting keys. Use the `ascending=False` argument to switch to descending sort order. To specify mixed sort orders (i.e. some sort keys are ascending, and some are descending), you need to supply a list of boolean values to match the number of sorting keys (`by=['col1', 'col2', 'col3'], ascending=[True, False, True]`
# #### 7. Duplicates
# Check for duplicates using all columns in DataFrame
df_sales.duplicated().any()
# Check for duplicates using particular columns
df_sales.duplicated(['Product', 'Quantity'])
# Get the number of duplicates
df_sales.duplicated(['Product', 'Quantity']).sum()
# - When the argument `keep=False`, any duplicate will be marked as `True`. Suppose there are 3 duplicate rows, when `keep=False`, the second and thrid record will be marked `True` (i.e. they are duplicates). If the argument `keep="first"` or `keep="last"`, only the first or third row will be marked as `True`.
df_sales.duplicated(['Product', 'Quantity'], keep=False).sum()
df_sales.duplicated(['Product', 'Quantity'], keep='first').sum()
# +
duplicated_indices = df_sales.duplicated(['Product', 'Quantity'], keep=False)
df_sales.loc[duplicated_indices, :].sort_values(by=['Product', 'Quantity'])
# -
duplicated_indices.index
| essential_operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Text Normalization
# +
# Input text
paragraph="""<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
# Converting paragraph in lowercase
print(paragraph.lower())
# -
# ## Tokenization
# +
# Loading NLTK module
import nltk
# downloading punkt
nltk.download('punkt')
# downloading stopwords
nltk.download('stopwords')
# downloading wordnet
nltk.download('wordnet')
# downloading average_perception_tagger
nltk.download('averaged_perceptron_tagger')
# +
# Sentence Tokenization
from nltk.tokenize import sent_tokenize
paragraph="""<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
tokenized_sentences=sent_tokenize(paragraph)
print(tokenized_sentences)
# +
# Import spacy
import spacy
# Loading english language model
nlp = spacy.load("en_core_web_sm")
# Build the nlp pipe using 'sentencizer'
#sent_pipe = nlp.create_pipe('sentencizer')
# Append the sentencizer pipe to the nlp pipeline
#nlp.add_pipe(sent_pipe)
nlp.add_pipe('sentencizer')
paragraph = """<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
# Create nlp Object to handle linguistic annotations in a documents.
nlp_doc = nlp(paragraph)
# Generate list of tokenized sentence
tokenized_sentences = []
for sentence in nlp_doc.sents:
tokenized_sentences.append(sentence.text)
print(tokenized_sentences)
# +
# Import nltk word_tokenize method
from nltk.tokenize import word_tokenize
# Split paragraph into words
tokenized_words=word_tokenize(paragraph)
print(tokenized_words)
# +
# Import spacy
import spacy
# Loading english language model
nlp = spacy.load("en_core_web_sm")
paragraph = """<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
# Create nlp Object to handle linguistic annotations in a documents.
my_doc = nlp(paragraph)
# tokenize paragraph into words
tokenized_words = []
for token in my_doc:
tokenized_words.append(token.text)
print(tokenized_words)
# +
# Import frequency distribution
from nltk.probability import FreqDist
# Find frequency distribution of paragraph
fdist = FreqDist(tokenized_words)
# Check top 5 common words
fdist.most_common(5)
# +
# Import matplotlib
import matplotlib.pyplot as plt
# Plot Frequency Distribution
fdist.plot(20, cumulative=False)
plt.show()
# -
# ## Stopwords
# +
# import the nltk stopwords
from nltk.corpus import stopwords
# Load english stopwords list
stopwords_set=set(stopwords.words("english"))
# Removing stopwords from text
filtered_word_list=[]
for word in tokenized_words:
# filter stopwords
if word not in stopwords_set:
filtered_word_list.append(word)
# print tokenized words
print("Tokenized Word List:", tokenized_words)
# print filtered words
print("Filtered Word List:", filtered_word_list)
filtered_word_list2 = [word for word in tokenized_words if word not in stopwords_set]
print("Filtered Word Lis2:", filtered_word_list2)
# + pycharm={"name": "#%%\n"}
# Import spacy
import spacy
# Loading english language model
nlp = spacy.load("en_core_web_sm")
# text paragraph
paragraph = """<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
# Create nlp Object to handle linguistic annotations in a documents.
my_doc = nlp(paragraph)
# Removing stopwords from text
filtered_token_list=[]
for token in my_doc:
# filter stopwords
if token.is_stop==False:
filtered_token_list.append(token)
print("Filtered Word List:",filtered_token_list)
# -
# ## Stemming and Lemmatization
# +
# Import Lemmatizer
from nltk.stem.wordnet import WordNetLemmatizer
# Create lemmatizer object
lemmatizer = WordNetLemmatizer()
# Import Porter Stemmer
from nltk.stem.porter import PorterStemmer
# Create stemmer object
stemmer = PorterStemmer()
# take a sample word
sample_word = "crying"
print("Lemmatized Sample Word:", lemmatizer.lemmatize(sample_word,"v"))
print("Stemmed Sample Word:", stemmer.stem(sample_word))
# +
# Import english language model
import spacy
# Loading english language model
nlp = spacy.load("en_core_web_sm")
# Create nlp Object to handle linguistic annotations in documents.
words = nlp("cry cries crying")
# Find lemmatized word
for w in words:
print('Original Word: ', w.text)
print('Lemmatized Word: ',w.lemma_)
# -
# ## PoS Tagging
# +
# import Word Tokenizer and Pos Tagger
from nltk.tokenize import word_tokenize
from nltk import pos_tag
# Sample sentence
sentence = "<NAME> is one of the beautiful monument."
# Tokenize the sentence
sent_tokens = word_tokenize(sentence)
# Create PoS tags
sent_pos = pos_tag(sent_tokens)
# Print tokens with PoS
print(sent_pos)
# +
# Import english language model
import spacy
# Loading small english language model
nlp = spacy.load("en_core_web_sm")
# Create nlp Object to handle linguistic annotations in a documents.
sentence = nlp(u"<NAME> is one of the beautiful monument.")
for token in sentence:
print(token.text, token.pos_)
# -
# ## Entity Recognition
# +
# Import spacy
import spacy
# Load English model for tokenizer, tagger, parser, and NER
nlp = spacy.load('en_core_web_sm')
# Sample paragraph
paragraph = """<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
# Create nlp Object to handle linguistic annotations in documents.
docs=nlp(paragraph)
entities=[(i.text, i.label_) for i in docs.ents]
print(entities)
# -
# ## Dependency Parsing
# +
# Import display for visualizing the Entities
from spacy import displacy
# Visualize the entities using render function
displacy.render(docs, style = "ent",jupyter = True)
# +
# Import spacy
import spacy
# Load English model for tokenizer, tagger, parser, and NER
nlp = spacy.load('en_core_web_sm')
# Sample sentence
sentence="<NAME> is one of the beautiful monument."
# Create nlp Object to handle linguistic annotations in a documents.
docs=nlp(sentence)
# Visualize the using render function
displacy.render(docs, style="dep", jupyter= True, options={'distance': 150})
# -
# !pip install wordcloud
# ## WordCloud
# +
# importing all necessary modules
from wordcloud import WordCloud
from wordcloud import STOPWORDS
import matplotlib.pyplot as plt
stopword_list = set(STOPWORDS)
paragraph="""<NAME> is one of the beautiful monuments. It is one of the wonders of the world. It was built by <NAME> in 1631 in memory of his third beloved wife <NAME>."""
word_cloud = WordCloud(width = 550, height = 550,
background_color ='white',
stopwords = stopword_list,
min_font_size = 10).generate(paragraph)
# Visualize the WordCloud Plot
# Set wordcloud figure size
plt.figure(figsize = (8, 6))
# Show image
plt.imshow(word_cloud)
# Remove Axis
plt.axis("off")
# show plot
plt.show()
# -
# ## Sentiment Analysis using Text Classification
# ### Classification using Bag of Words
# #### Load the Dataset
# +
# Import libraries
import pandas as pd
# read the dataset
df=pd.read_csv('amazon_alexa.tsv', sep='\t')
# Show top 5-records
df.head()
# -
# #### Explore the dataset
# +
# Import seaborn
import seaborn as sns
import matplotlib.pyplot as plt
# Count plot
sns.countplot(x='feedback', data=df)
# Set X-axis and Y-axis labels
plt.xlabel('Sentiment Score')
plt.ylabel('Number of Records')
# Show the plot using show() function
plt.show()
# -
# #### Feature Generation using CountVectorizer
# +
# Import CountVectorizer and RegexTokenizer
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import CountVectorizer
# Create Regex tokenizer for removing special symbols and numeric values
regex_tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
# Initialize CountVectorizer object
count_vectorizer = CountVectorizer(lowercase=True,
stop_words='english',
ngram_range = (1,1),
tokenizer = regex_tokenizer.tokenize)
# Fit and transform the dataset
count_vectors = count_vectorizer.fit_transform(df['verified_reviews'])
# -
# #### Split train and test set
# +
# Import train_test_split
from sklearn.model_selection import train_test_split
# Partition data into training and testing set
from sklearn.model_selection import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(
count_vectors, df['feedback'], test_size=0.3, random_state=1)
# Mike scratch
import numpy as np
import seaborn as sns
bar = count_vectors[count_vectors != 0]
sns.displot(bar.transpose())
plt.show()
# -
# #### Classification Model Building using Logistic Regression
# +
# import logistic regression scikit-learn model
from sklearn.linear_model import LogisticRegression
# instantiate the model
logreg = LogisticRegression(solver='lbfgs')
# fit the model with data
logreg.fit(feature_train,target_train)
# Forecast the target variable for given test dataset
predictions = logreg.predict(feature_test)
# -
# #### Evaluate the Classification Model
# +
# Import metrics module for performance evaluation
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
# Assess model performance using accuracy measure
print("Logistic Regression Model Accuracy:",accuracy_score(target_test, predictions))
# Calculate model precision
print("Logistic Regression Model Precision:",precision_score(target_test, predictions))
# Calculate model recall
print("Logistic Regression Model Recall:",recall_score(target_test, predictions))
# Calculate model f1 score
print("Logistic Regression Model F1-Score:",f1_score(target_test, predictions))
# -
# ### Classification using TF-IDF
# +
# Import TfidfVectorizer and RegexTokenizer
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
# Create Regex tokenizer for removing special symbols and numeric values
regex_tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
# Initialize TfidfVectorizer object
tfidf = TfidfVectorizer(lowercase=True,stop_words='english',ngram_range = (1,1),tokenizer = regex_tokenizer.tokenize)
# Fit and transform the dataset
text_tfidf= tfidf.fit_transform(df['verified_reviews'])
# Mike scratch
import numpy as np
import seaborn as sns
bar = text_tfidf[text_tfidf != 0].transpose()
#bar = text_tfidf.toarray().ravel()
sns.displot(bar)
plt.show()
# +
# Import train_test_split
from sklearn.model_selection import train_test_split
# Partition data into training and testing set
from sklearn.model_selection import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(
text_tfidf, df['feedback'], test_size=0.3, random_state=1)
# +
# import logistic regression scikit-learn model
from sklearn.linear_model import LogisticRegression
# instantiate the model
logreg = LogisticRegression(solver='lbfgs')
# fit the model with data
logreg.fit(feature_train,target_train)
# Forecast the target variable for given test dataset
predictions = logreg.predict(feature_test)
# +
# Import metrics module for performance evaluation
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
# Assess model performance using accuracy measure
print("Logistic Regression Model Accuracy:",accuracy_score(target_test, predictions))
# Calculate model precision
print("Logistic Regression Model Precision:",precision_score(target_test, predictions))
# Calculate model recall
print("Logistic Regression Model Recall:",recall_score(target_test, predictions))
# Calculate model f1 score
print("Logistic Regression Model F1-Score:",f1_score(target_test, predictions))
# -
# ## Text Similarity
# +
# Import spacy
import spacy
# Load English model for tokenizer, tagger, parser, and NER
nlp = spacy.load('en_core_web_sm')
# Create documents
doc1 = nlp(u'I love pets.')
doc2 = nlp(u'I hate pets')
# Find similarity
print(doc1.similarity(doc2))
# +
def jaccard_similarity(sent1, sent2):
"""Find text similarity using jaccard similarity"""
# Tokenize sentences
token1 = set(sent1.split())
token2 = set(sent2.split())
# intersection between tokens of two sentences
intersection_tokens = token1.intersection(token2)
# Union between tokens of two sentences
union_tokens=token1.union(token2)
# Cosine Similarity
sim_= float(len(intersection_tokens) / len(union_tokens))
return sim_
# Call function
jaccard_similarity('I love pets.','I hate pets.')
# +
# Let's import text feature extraction TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
docs=['I love pets.','I hate pets.']
# Initialize TfidfVectorizer object
tfidf= TfidfVectorizer()
# Fit and transform the given data
tfidf_vector = tfidf.fit_transform(docs)
# Import cosine_similarity metrics
from sklearn.metrics.pairwise import cosine_similarity
# compute similarity using cosine similarity
cos_sim=cosine_similarity(tfidf_vector, tfidf_vector)
print(cos_sim)
# -
| Chapter12/Ch-12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seasonal Autoregressive Integrated Moving Average (SARIMA)
#
# The <a href="https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average">ARIMA</a> model is a generalisation of an ARMA model that can be applied to non-stationary time series.
#
# The SARIMA model is an modified version of ARIMA that accounts for seasonality in the time series.
# +
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from time import time
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
# -
matplotlib.rcParams['figure.figsize'] = (16, 9)
pd.options.display.max_columns = 999
# ## Load Dataset
df = pd.read_csv('../datasets/hourly-weather-wind_direction.csv', parse_dates=[0], index_col='DateTime')
print(df.shape)
df.head()
# ## Define Parameters
#
# Make predictions for 24-hour period using a training period of four weeks.
dataset_name = 'Hourly Weather Wind Direction'
dataset_abbr = 'HWD'
model_name = 'SARIMA'
context_length = 24*7*4 # Four weeks
prediction_length = 24
# ## Define Error Metric
#
# The seasonal variant of the mean absolute scaled error (MASE) will be used to evaluate the forecasts.
def calc_sMASE(training_series, testing_series, prediction_series, seasonality=prediction_length):
a = training_series.iloc[seasonality:].values
b = training_series.iloc[:-seasonality].values
d = np.sum(np.abs(a-b)) / len(a)
errors = np.abs(testing_series - prediction_series)
return np.mean(errors) / d
# ## Example SARIMA Model
#
# Exploration of how SARIMA models work using a single example time series.
# +
ts_ex = 'ts10'
df_ex = df.loc[:, ts_ex]
# Plot data from first five days
df_ex.iloc[:24*5].plot();
# -
# ### Time Series Decomposition
#
# Decompose the example time series into trend, seasonal, and residual components.
fig = seasonal_decompose(df_ex.iloc[-500:], model='additive').plot()
# There doesn't appear to be a consistent trend. We can run a Dicky-Fuller test to confirm the stationarity.
dftest = adfuller(df_ex.iloc[-500:], autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
dfoutput
# The very low p-value confirms that the data is stationary. We can see that there is daily seasonality which we will capture in our SARIMA model.
# ### Plot ACF and PACF
#
# The <a href="https://en.wikipedia.org/wiki/Autocorrelation">Autocorrelation Function</a> (ACF) is the correlation of a signal with a delayed copy of itself as a function of delay.
#
# The <a href="https://en.wikipedia.org/wiki/Partial_autocorrelation_function">Partial Autocorrelation Function</a> (PACF) is the partial correlation of a signal with a delayed copy of itself, controlling for the values of the time series at all shorter delays, as a function of delay.
fig, ax = plt.subplots(2)
ax[0] = sm.graphics.tsa.plot_acf(df_ex, lags=50, ax=ax[0])
ax[1] = sm.graphics.tsa.plot_pacf(df_ex, lags=50, ax=ax[1])
# There is clearly daily seasonality. A seasonality of 24 hours will be used for the SARIMA model. Differencing by 24 hours helps remove the seasonality:
fig, ax = plt.subplots(2)
ax[0] = sm.graphics.tsa.plot_acf(df_ex.diff(24).dropna(), lags=50, ax=ax[0])
ax[1] = sm.graphics.tsa.plot_pacf(df_ex.diff(24).dropna(), lags=50, ax=ax[1])
fig = seasonal_decompose(df_ex.diff(24).dropna(), model='additive').plot()
# ### Build Model
#
# As SARIMA models can be slow to train, a SARIMA(1,1,1)(1,1,1)24 model will be used, as this should provide reasonable performance across the time series. Optimised forecasts could be obtained by using a grid search methodology to derive the best performining parameters, as demonstrated in the ARIMA and ARIMAX notebooks, but this would be at the expense of much greater training times.
def runSARIMA(time_series, test_length=prediction_length, train_length=context_length):
ts = time_series.iloc[-(test_length+train_length):]
ts_train = ts.iloc[:-test_length]
ts_test = ts.iloc[-test_length:]
sarima = sm.tsa.SARIMAX(ts_train, order=(1,1,1),
seasonal_order=(1,1,1,24),
enforce_stationarity=False,
enforce_invertibility=False).fit()
summary = sarima.summary()
fcst = sarima.predict(start=ts_train.index[2], end=ts_test.index[-1])
first_pred = fcst[0]
fcst = np.concatenate([np.array([first_pred, first_pred]), fcst])
fcst = pd.DataFrame(data=fcst, index=ts.index, columns=['pred%s' % ts.name[2:]])
return fcst, summary
import warnings
warnings.filterwarnings('ignore')
# %%time
fcst, summary = runSARIMA(df_ex)
df_ex = pd.concat([df_ex, fcst], axis=1)
print(summary)
# +
# Example forecast
fcst0 = df_ex.copy()
fcst0['pred%s' % ts_ex[2:]][fcst0['pred%s' % ts_ex[2:]] < 0] = 0
fcst0.iloc[-4*prediction_length:, 0].plot(label='Actual', c='k', alpha=0.5)
fcst0.iloc[-4*prediction_length:, 1].plot(label='SARIMA(1,1,1)(1,1,1)24', c='b', alpha=0.5)
plt.axvline(x=fcst0.index[-prediction_length], linestyle=':', linewidth=2, color='r', label='Start of test data')
plt.legend()
plt.title(ts_ex);
# -
# ## Evaluating SARIMA
#
# To evaluate SARIMA, forecasts will be generated for each time series using the SARIMA(1,1,1)(1,1,1)24 approach shown above (with subsequent zeroing of the negative values). sMASE will be calculated for each individual time series, and the mean of all these scores will be used as the overall accuracy metric for SARIMA on this dataset.
# +
results = df.iloc[-(prediction_length+context_length):].copy()
tic = time()
for i, col in enumerate(df.columns):
if i % 10 == 0:
toc = time()
print("Running predictions for {}. Cumulative time: {:.1f} minutes.".format(col, (toc-tic)/60))
# Prepare DataFrame for selected column
dft = df.loc[:, col]
# Find best model
fcst, summary = runSARIMA(dft)
# Add predictions to results DataFrame
results['pred%s' % col[2:]] = fcst.values
toc = time()
print("Finished! Total run time: {:.1f} minutes.".format((toc-tic)/60))
# -
results0 = results.copy()
results0[results0 < 0] = 0
results0.head()
sMASEs = []
for i, col in enumerate(df.columns):
sMASEs.append(calc_sMASE(results0[col].iloc[-(context_length + prediction_length):-prediction_length],
results0[col].iloc[-prediction_length:],
results0['pred%s' % str(i+1)].iloc[-prediction_length:]))
fig, ax = plt.subplots()
ax.hist(sMASEs, bins=20)
ax.set_title('Distributions of sMASEs for {} dataset'.format(dataset_name))
ax.set_xlabel('sMASE')
ax.set_ylabel('Count');
sMASE = np.mean(sMASEs)
print("Overall sMASE: {:.4f}".format(sMASE))
# Show some example forecasts.
# +
fig, ax = plt.subplots(5, 2, sharex=True)
ax = ax.ravel()
for col in range(1, 11):
ax[col-1].plot(results0.index[-prediction_length:], results0['ts%s' % col].iloc[-prediction_length:],
label='Actual', c='k', linestyle='--', linewidth=1)
ax[col-1].plot(results0.index[-prediction_length:], results0['pred%s' % col].iloc[-prediction_length:],
label='SARIMA(1,1,1)(1,1,1)24', c='b')
ax[9].legend()
fig.suptitle('{} Predictions'.format(dataset_name));
# -
# Store the predictions and accuracy score for the SARIMA models.
# +
import pickle
with open('{}-sMASE.pkl'.format(dataset_abbr), 'wb') as f:
pickle.dump(sMASE, f)
with open('../_results/{}/{}-results.pkl'.format(model_name, dataset_abbr), 'wb') as f:
pickle.dump(results.iloc[-prediction_length:], f)
# -
| SARIMA/hourly-weather-wind_direction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # data review
import os
import pandas as pd
app_class = pd.read_csv('/home/opendata/mobile/raw_data/app_class.csv',header=None)
app_class
app_class_catelog = pd.read_csv('/home/opendata/mobile/raw_data/app_class_catelog.csv',header=None)
app_class_catelog
day01 = pd.read_csv('/home/opendata/mobile/raw_data/day01.txt',header=None)
day01
columns = ['uid','appid','app_type','start_day','start_time','end_day','end_time','duration','up_flow','down_flow']
day01.columns = columns
day01.head()
day02 = pd.read_csv('/home/opendata/mobile/raw_data/day02.txt',header=None)
day02.head()
columns = ['uid','appid','app_type','start_day','start_time','end_day','end_time','duration','up_flow','down_flow']
day02.columns = columns
day02.head()
day_list = [day01,day02]
columns = ['uid','appid','app_type','start_day','start_time','end_day','end_time','duration','up_flow','down_flow']
for i in range(3,8):
tmp = pd.read_csv('/home/opendata/mobile/raw_data/day0%s.txt'%i,header=None)
tmp.columns = columns
day_list.append(tmp)
flow1 = pd.read_csv('../data/flow1.csv')
flow1
flow2 = pd.read_csv('../data/flow2.csv')
flow2
freq = pd.read_csv('../data/freq1_7.csv')
freq
mone1 = pd.read_csv('../data/mone1.csv')
mone1
mone2 = pd.read_csv('../data/mone2.csv')
mone2
# # linear regression
import statsmodels.api as sm
import numpy as np
reg_df = freq.copy(deep=True)
for df in [flow1,flow2,mone1,mone2]:
reg_df = pd.merge(reg_df,df,on='app_class')
reg_df
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
x_names = ['flow1','flow2','mone1','mone2']
y = reg_df['frequency']
X = reg_df[x_names]
X = sm.add_constant(X)
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# # 方差分析
day01.describe()
day01.boxplot()
app_count = app_class.groupby(1).count()[0]
app_class_names = app_count.index
app_count_dict = dict(zip(app_class_names,list(app_count)))
app_count_dict
appid2class = dict(zip(list(app_class[0]),list(app_class[1])))
appid2class
def find_user2(df):
appid = list(df['appid'])[list(df['duration']).index(df['duration'].max())]
if appid in appid2class.keys():
return appid2class[appid]
else:
return 'unknown'
def find_user3(df):
time = list(df['start_time'])[list(df['duration']).index(df['duration'].max())]
return int(time.split(':')[0])
def find_app1(df):
return (df.groupby('uid')['duration'].count()).sum()
def find_app8(df):
time = list(df['start_time'])[list(df['duration']).index(df['duration'].max())]
return int(time.split(':')[0])
def id_2_count(id_):
if id_ in appid2class.keys():
return app_count_dict[appid2class[id_]]
else:
return 2000
user_database = []
app_database = []
for day,df in enumerate(day_list):
user1 = df.groupby(['uid']).apply(lambda x:len(set(list(x['appid']))))
user_one_day ={
'uid':user1.index,
'user1_%d'%day:list(user1),
'user2_%d'%day:list(df.groupby('uid').apply(find_user2)),
'user3_%d'%day:list(df.groupby('uid').apply(find_user3)),
'user4_%d'%day:list(df.groupby('uid')['appid'].count()),
'user5_%d'%day:list(df.groupby('uid')['duration'].mean()),
}
user_database.append(user_one_day)
app2 = df.groupby('appid')['duration'].mean()
app_one_day ={
'appid':app2.index,
'app1_%d'%day:list(df.groupby('appid').apply(find_app1)),
'app2_%d'%day:list(app2),
'app4_%d'%day:list(df.groupby('appid')['down_flow'].mean()),
'app6_%d'%day:list(df.groupby('appid')['uid'].apply(lambda x:len(set(list(x))))),
'app7_%d'%day:[id_2_count(id_) for id_ in app2.index],
'app8_%d'%day:list(df.groupby('appid').apply(find_app8)),
'app9_%d'%day:list(df.groupby('appid')['duration'].mean()),
'app10_%d'%day:list(df.groupby('appid')['duration'].count()),
}
app_database.append(app_one_day)
res = {}
feature_dict = {'uid':['user1','user3','user4','user5'],
'appid':['app%s' % i for i in [1,2,4,6,7,8,9,10]]}
for key,database in {'uid':user_database,'appid':app_database}.items():
for day,df in enumerate(database):
df = pd.DataFrame(df)
if day == 0:
tmp = df.copy(deep = True)
else:
tmp = pd.merge(tmp,df,on=key)
for feature in feature_dict[key]:
if day == 0:
tmp[feature] = tmp[feature+'_0']
else:
tmp[feature] += tmp[feature+'_%d' % day]
if day == 6:
tmp[feature] /= 7
res[key] = tmp
res
res['uid'][['user1','user3','user4','user5']].describe()
res['uid'][['user1','user3']].boxplot()
res['appid'][['app%s' % i for i in [1,2,4,6,7,8,9,10]]].describe()
for j in [1,2,4,6,7,8]:
res['appid'][['app%s' % i for i in [j]]].boxplot()
plt.show()
map_d = ['a','b','c','n','r','d','e', 'f','g','h','i', 'j', 'k', 'l', 'm', 'o', 'p', 'q', 's', 't', 'unknown']
res['uid']['user2'] = res['uid']['user2_0'].apply(lambda x:map_d.index(x))
res['uid']['user2']
# # 画图及处理
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
import seaborn as sns
# 初始化参数
sns.set()
corr_data = np.corrcoef(res['uid'][['user1','user2','user3','user4','user5']],rowvar=0)
heatmap = sns.heatmap(corr_data)
plt.show()
corr_data = np.corrcoef(res['appid'][['app%s' % i for i in [1,2,4,6,7,8]]],rowvar=0)
heatmap = sns.heatmap(corr_data,)
plt.show()
# # 回归
y = res['uid']['user5']
X = res['uid'][['user2','user3','user4']]
X = sm.add_constant(X)
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# # 聚类
# +
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
import numpy as np
km = KMeans(n_clusters=4, random_state = 666) #将数据集分为2类
X = res['uid'][['user1','user2','user3','user4','user5']]
x_scale=scale(X=X,with_mean=True,with_std=True,copy=True)
y_pre = km.fit_predict(x_scale)
# -
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.scatter(x_scale[:,0], x_scale[:,2], x_scale[:,3],c= y_pre/2) #c 为color 使得y_pre 的数据是另一种颜色。
plt.show()
res['uid']['cluster_lable'] = y_pre
res['uid'].groupby('cluster_lable')['uid'].count()
res['uid'].groupby('cluster_lable')[['user1','user3','user4','user5']].std()
for feature in ['user1','user3','user4','user5']:
data_list = [[] for i in range(4)]
for i in [1,2,3,4,5,6,7]:
for j in range(4):
data_list[j].append(res['uid'].groupby('cluster_lable')[feature+'_%s'%(i-1)].mean()[j])
for j in range(4):
plt.plot([1,2,3,4,5,6,7],data_list[j],label=j)
plt.legend(loc='best')
plt.title(feature)
plt.show()
for feature in app_class_names+['unknown']:
data_list = [[] for i in range(4)]
for i in [1,2,3,4,5,6,7]:
for j in range(4):
data_list[j].append(res['appid'].groupby('cluster_lable')[feature+'_%s'%(i-1)].mean()[j])
for j in range(4):
plt.plot([1,2,3,4,5,6,7],data_list[j],label=j)
plt.legend(loc='best')
plt.title(feature)
plt.show()
data_list
pd.DataFrame(km.cluster_centers_)
test = pd.DataFrame({'std':day01.groupby('uid')['duration'].std(),
'mean':day01.groupby('uid')['duration'].mean()},index=)
def find_app8(df):
time = list(df['start_time'])[list(df['duration']).index(df['duration'].max())]
return int(time.split(':')[0])
# return series.index(list(series).index(series.max()))
res =day01.groupby('appid').apply(find_app8)
res
day01.head()
set(list(res))
test = day01.groupby(['uid','appid'])['duration'].sum()
test['0000348F61881026FE0036840F25309F'][2548]
day01['start_time'].apply(lambda x:x[:2])
# +
test['0000348F61881026FE0036840F25309F']
# -
x = test.index
x.levels[0][1]
import pickle
with open('../data/res.pk','wb') as f:
pickle.dump(res,f)
cluster_res = res['uid'][['uid','cluster_lable']]
cluster_res.columns = ['uid','cluster_label']
cluster_res.to_csv('../data/cluster_label.csv',index=False)
cluster_dict = dict(zip(cluster_res['uid'],cluster_res['cluster_label']))
cluster_dict
for_interact = res['uid'][['uid','user2']]
for_interact['cluster_label'] = for_interact['uid'].apply(lambda x:cluster_dict[x])
for_interact['app_type'] = for_interact['user2'].apply(lambda x:map_d[x])
for_interact
for_interact.groupby(['cluster_label','app_type'])[['uid']].count().to_csv('../data/interact_count.csv')
import pickle
with open('../data/appid2class.pk','wb') as f:
pickle.dump(appid2class,f)
| data_analysis/HW2/data_preprocess_cluster_regression_hotmap_boxplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="bOChJSNXtC9g"
# # NumPy
# + [markdown] colab_type="text" id="OLIxEDq6VhvZ"
# In this lesson we will learn the basics of numerical analysis using the NumPy package.
#
# <img src="figures/numpy.png" width=300>
#
#
#
# + [markdown] colab_type="text" id="VoMq0eFRvugb"
# # NumPy basics
# + colab={} colab_type="code" id="0-dXQiLlTIgz"
# Let's make sure the libraries are installed
# #!pip install numpy
# Now import the libraries
import numpy as np
# + colab={} colab_type="code" id="bhaOPJV7WA0m"
# Set seed for reproducibility
np.random.seed(seed=1234)
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="23tSlin9aWZ8" outputId="4df1dbc0-77a1-4776-87d2-326b0bb0f79c"
# Scalars
x = np.array(6) # scalar
print ("x: ", x)
# Number of dimensions
print ("x ndim: ", x.ndim)
# Dimensions
print ("x shape:", x.shape)
# Size of elements
print ("x size: ", x.size)
# Data type
print ("x dtype: ", x.dtype)
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="ugIZprdIabFF" outputId="485b9e5e-176a-4ac3-b5ac-71a951470bf1"
# 1-D Array
x = np.array([1.3 , 2.2 , 1.7])
print ("x: ", x)
print ("x ndim: ", x.ndim)
print ("x shape:", x.shape)
print ("x size: ", x.size)
print ("x dtype: ", x.dtype) # notice the float datatype
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="SQI-T_4MbE9J" outputId="4eede496-01c0-4f83-d0d9-ffe073de8f9f"
# 3-D array (matrix)
x = np.array([[1,2,3], [4,5,6], [7,8,9]])
print ("x:\n", x)
print ("x ndim: ", x.ndim)
print ("x shape:", x.shape)
print ("x size: ", x.size)
print ("x dtype: ", x.dtype)
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="z2Qf8EKZln9j" outputId="45b92bcd-42a4-457b-ae34-494a1f214ffe"
# Functions
print ("np.zeros((2,2)):\n", np.zeros((2,2)))
print ("np.ones((2,2)):\n", np.ones((2,2)))
print ("np.eye((2)):\n", np.eye((2)))
print ("np.random.random((2,2)):\n", np.random.random((2,2)))
# + [markdown] colab_type="text" id="qVD-MCiCdcV9"
# # Indexing
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="vyt36kFOcVDX" outputId="d65f99dd-97ba-4df5-afa1-fc4305c0dc69"
# Indexing
x = np.array([1, 2, 3])
print ("x[0]: ", x[0])
x[0] = 0
print ("x: ", x)
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="qxHww0didni6" outputId="0cccf1f5-3372-4f75-baf9-095b546e9ced"
# Slicing
x = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print (x)
print ("x column 1: ", x[:, 1])
print ("x row 0: ", x[0, :])
print ("x rows 0,1,2 & cols 1,2: \n", x[:3, 1:3])
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="A52pzB9idyDE" outputId="c9cd9a42-8cbb-4459-b878-8969aaec1e95"
# Integer array indexing
print (x)
rows_to_get = np.arange(len(x))
print ("rows_to_get: ", rows_to_get)
cols_to_get = np.array([0, 2, 1])
print ("cols_to_get: ", cols_to_get)
print ("indexed values: ", x[rows_to_get, cols_to_get])
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="_R7O5WsVfDij" outputId="ab346ca7-5959-4bd2-cf02-9d2f9b253e72"
# Boolean array indexing
x = np.array([[1,2], [3, 4], [5, 6]])
print ("x:\n", x)
print ("x > 2:\n", x > 2)
print ("x[x > 2]:\n", x[x > 2])
# + [markdown] colab_type="text" id="77RCjrQ8gvYW"
# # Array math
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="1UJVcNCLfFrV" outputId="4284fa79-10e7-428a-f30e-7a59a16cf510"
# Basic math
x = np.array([[1,2], [3,4]], dtype=np.float64)
y = np.array([[1,2], [3,4]], dtype=np.float64)
print ("x + y:\n", np.add(x, y)) # or x + y
print ("x - y:\n", np.subtract(x, y)) # or x - y
print ("x * y:\n", np.multiply(x, y)) # or x * y
# + [markdown] colab_type="text" id="1BV0nSIliMC6"
# <img src="figures/matrix.png" width=400>
#
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="XyZVF6gXhTWd" outputId="8cbc328a-b5f3-416e-cf6f-a140aa208398"
# Dot product
a = np.array([[1,2,3], [4,5,6]], dtype=np.float64) # we can specify dtype
b = np.array([[7,8], [9,10], [11, 12]], dtype=np.float64)
print (a.dot(b))
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="7pB-H-7phsku" outputId="96a21872-164a-4b47-cd35-bf031a7421d3"
# Sum across a dimension
x = np.array([[1,2],[3,4]])
print (x)
print ("sum all: ", np.sum(x)) # adds all elements
print ("sum by col: ", np.sum(x, axis=0)) # add numbers in each column
print ("sum by row: ", np.sum(x, axis=1)) # add numbers in each row
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="pLDG49LrijgA" outputId="9fa3a3e1-6a33-4052-baea-3dd91649f193"
# Transposing
print ("x:\n", x)
print ("x.T:\n", x.T)
# + [markdown] colab_type="text" id="KdPKVKtwkWnw"
# # Advanced
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="U_j2fCcjkEyo" outputId="7df2d80f-7e31-4c01-e8e7-81c6986f6bd7"
# Tile
x = np.array([[1,2], [3,4]])
y = np.array([5, 6])
addent = np.tile(y, (len(x), 1))
print ("addent: \n", addent)
z = x + addent
print ("z:\n", z)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="1NsoFVo0mfQ4" outputId="3b2a830e-abcf-4e5d-e824-2b9ce142f166"
# Broadcasting
x = np.array([[1,2], [3,4]])
y = np.array([5, 6])
z = x + y
print ("z:\n", z)
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="RdEHrnMTnO6k" outputId="881c6cf7-bfc7-4c41-ad70-68cd01b20656"
# Reshaping
x = np.array([[1,2], [3,4], [5,6]])
print (x)
print ("x.shape: ", x.shape)
y = np.reshape(x, (2, 3))
print ("y.shape: ", y.shape)
print ("y: \n", y)
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="tE1BmoJuns70" outputId="719b9e01-4428-4013-b413-d755d46a2e58"
# Removing dimensions
x = np.array([[[1,2,1]],[[2,2,3]]])
print ("x.shape: ", x.shape)
y = np.squeeze(x, 1) # squeeze dim 1
print ("y.shape: ", y.shape)
print ("y: \n", y)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="LNYJRMF4qvXN" outputId="be32eb32-2222-4178-fb52-4fc86d5f8df1"
# Adding dimensions
x = np.array([[1,2,1],[2,2,3]])
print ("x.shape: ", x.shape)
y = np.expand_dims(x, 1) # expand dim 1
print ("y.shape: ", y.shape)
print ("y: \n", y)
# + [markdown] colab_type="text" id="XthM4y7SotAH"
# # Additional Resources
# + [markdown] colab_type="text" id="3KmESFstrbFS"
# You don't have to memorize anything here and we will be taking a closer look at NumPy in the later lessons. If you are curious about more checkout the [NumPy reference manual](https://docs.scipy.org/doc/numpy-1.15.1/reference/).
| notebooks/10-steps-to-DS/03_NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LFPCA through all conditions of anesthetized monkey
# to run the monkey data through the analysis pipeline <br>
# eyes open, eyes closed, and anesthesized <br>
# write the code such that running through the notebook ONCE will reproduce all the results for all 3 datasets
# visualize the results and compare the different conditions
# +
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append('/Users/ldliao/Research/Projects/spectralCA/')
sys.path.append('/Users/ldliao/Research/Projects/spectralCA/notebooks_visualization/')
# sys.path.append('/Users/Lauren/Voytek/spectralCV')
# sys.path.append('/Users/rdgao/Documents/code/research/spectralCV')
# sys.path.append('/Users/rdgao/Documents/code/research/neurodsp/')
# sys.path.append('/Users/rdgao/Documents/code/research/fooof/')
# +
# %matplotlib inline
# imports
from sca_funcs import access_nt as asc
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import neurodsp as ndsp
# from fooof import FOOOFGroup
from sca_funcs import sca
# -
data_path ="/Users/ldliao/Research/Data/NeuroTycho/anesthesia/20120730PF_Anesthesia+and+Sleep_Chibi_Toru+Yanagawa_mat_ECoG128/Session%d/"
# data_path ="/Users/rdgao/Documents/data/NeuroTycho/Propofol/20120730PF_Anesthesia+and+Sleep_Chibi_Toru+Yanagawa_mat_ECoG128/Session%d/"
saveout_path = "../results/nt/"
# fooof wrapper
def fooof_lfpca(lfpca, background_mode, freq_range=[3,100]):
fg = FOOOFGroup(background_mode=background_mode, verbose=False)
fg.fit(lfpca.f_axis, lfpca.psd, freq_range)
slopes = np.array([r.background_params[-1] for r in fg.get_results()])
if background_mode is 'knee':
knees = np.array([r.background_params[1] for r in fg.get_results()])
else:
knees = np.ones_like(slopes)
r2s = np.array([r.r_squared for r in fg.get_results()])
return np.array([slopes, knees, r2s]).T
# +
# example of using LFPCA
fs = 1000
analysis_param = {'nperseg': 1000,
'noverlap': 0,
'spg_outlierpct': 2.,
'max_freq':200}
session_indices = [(1,0,1), (1,2,3), (2,1,2), (2,3,4), (3,0,1)]
session_labels = ['EyesOpen', 'EyesClosed', 'Anes', 'RecEyesClosed', 'RecEyesOpen']
# fooof_conds = [('fixed',[3,100]),('knee',[3,100]),('fixed',[40,70])]
for i in range(0,len(session_indices)):
session_ind = session_indices[i]
session = session_ind[0]
start_ind = session_ind[1]
end_ind = session_ind[2]
chan = range(1,129)
#chan = [1,2]
# with specified indices
indices = asc.get_cond(data_path, session, start_ind, end_ind)
data = asc.get_ECoG(data_path, session, chan, indices)
# initialize object with data and parameters
nt_sca = sca.SCA(analysis_param)
nt_sca.populate_ts_data(data, fs)
# compute all spectral attributes
nt_sca.compute_all_spectral()
# compute KS-test fit against null exponential hypothesis
nt_sca.compute_KS_expfit() # this takes a few minutes
# save out
nt_sca.save_spec_vars('../results/nt/nt_sca%i_'%i+session_labels[i], save_spg=True)
# nt_fooof = np.zeros((nt_lfpca.numchan, 3, len(fooof_conds)))
# for fc in range(len(fooof_conds)):
# nt_fooof[:,:,fc] = fooof_lfpca(nt_lfpca, fooof_conds[fc][0], freq_range=fooof_conds[fc][1])
# np.savez('../results/nt/nt_fooof%i_'%i+session_labels[i],
# fooof_res=nt_fooof,
# fooof_bg_param=np.array([fooof_conds[fc][0] for fc in range(len(fooof_conds))]),
# fooof_freq_range=np.array([fooof_conds[fc][1] for fc in range(len(fooof_conds))])
# )
# -
pwd
# fooof lfpca
def fooof_monkey(nt_lfpca_monkey):
all_bg_params = []
all_r2 = []
for i in range(5):
linear_bg_params, linear_r2 = fooof_lfpca(nt_lfpca_monkey[i],'fixed')
lz_bg_params, lz_r2 = fooof_lfpca(nt_lfpca_monkey[i],'knee')
noise_bg_params, noise_r2 = fooof_lfpca(nt_lfpca_monkey[i],'fixed', freq_range=[30,70])
bg_params = (linear_bg_params, lz_bg_params, noise_bg_params)
r2 = (linear_r2, lz_r2, noise_r2)
all_bg_params.append(bg_params)
all_r2.append(r2)
return all_bg_params, all_r2
all_bg_params, all_r2 = fooof_monkey(nt_lfpca_monkey=nt_lfpca_monkey)
| notebooks_analysis/sca_anesthetized_monkey.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Document Bootstrapping Examples
# When estimating machine learning or statistical models on your corpus, you may need to bootstrap documents (randomly sample with replacement). The `.bootstrap()` method of `DocTable` will act like a select statement but return a bootstrap object instead of a direct query result. Here I show how to do some basic bootstrapping using an example doctable.
import random
import pandas as pd
import numpy as np
import sys
sys.path.append('..')
import doctable as dt
# ### Create Example DocTable
# First we define a DocTable that will be used for examples.
schema = (
('integer','id',dict(primary_key=True, autoincrement=True)),
('string','name', dict(nullable=False, unique=True)),
('integer','age'),
('boolean', 'is_old'),
)
db = dt.DocTable(target=':memory:', schema=schema)
print(db)
# Then we add several example rows to the doctable.
# +
for i in range(10):
age = random.random() # number in [0,1]
is_old = age > 0.5
row = {'name':'user_'+str(i), 'age':age, 'is_old':is_old}
db.insert(row, ifnotunique='replace')
for doc in db.select(limit=3):
print(doc)
# -
# ### Create a Bootstrap
# We can use the doctable method `.bootstrap()` to return a bootstrap object using the keyword argument `n` to set the sample size (will use number of docs by default). This method acts like a select query, so we can specify columns and use the where argument to choose columns and rows to be bootstrapped. The bootsrap object contains the rows in the `.doc` property.
#
# Notice that while our select statement drew three documens, the sample size specified with `n` is 5. The boostrap object will always return 5 objects, even though the number of docs stays the same.
bs = db.bootstrap(['name','age'], where=db['id'] % 3 == 0, n=4)
print(type(bs))
print(len(bs.docs))
bs.n
# Use the bootstrap object as an iterator to access the bootstrapped docs. The bootstrap object draws a sample upon instantiation, so the same sample is maintained until reset.
print('first run:')
for doc in bs:
print(doc)
print('second run:')
for doc in bs:
print(doc)
# ### Draw New Sample
# You can reset the internal sample of the bootstrap object using the `.set_new_sample()` method. See that we now sample 2 docs and the output is different from previous runs. The sample will still remain the same each time we iterate until we reset the sample.
bs.set_new_sample(2)
print('first run:')
for doc in bs:
print(doc)
print('second run:')
for doc in bs:
print(doc)
# And we can iterate through a new sample using `.new_sample()`. Equivalent to calling `.set_new_sample()` and then iterating through elements.
print('drawing new sample:')
for doc in bs.new_sample(3):
print(doc)
print('repeating sample:')
for doc in bs:
print(doc)
# I may add additional functionality in the future if I use this in any projects, but that's it for now.
| examples/doctable_bootstrap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nhs-pycom/python-time/blob/master/Using_the_print_command.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="9J7p406abzgl"
# <img height="60px" src="https://colab.research.google.com/img/colab_favicon.ico" align="left" hspace="20px" vspace="5px">
#
# <h1>Welcome to Colaboratory!</h1>
# Colaboratory is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud. See our [FAQ](https://research.google.com/colaboratory/faq.html) for more info.
# + [markdown] colab_type="text" id="-Rh3-Vt9Nev9"
# ## Getting Started
# - [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb)
#
# + [markdown] id="tR7_0pJyYSrR"
# # python-time
# # Using the 'print' command
#
# The 'print' statement is one of the simplest commands in Python.
# ```python
# print("your text goes here")
# ```
# It does exactly what you might expect, prints the words "*your text goes here*" verbatim onto the screen. We are going to be using print often as it is really useful to be able to spell out the results of our programme.
#
# Working in the NHS, the print function is also important for adding context to your analysis. For example, imagine we have developed some formula and the result python gives us is `0.26`. But 26 what? 26 minutes, 26 people? Who knows?
#
# Raw data from python is not always very informative on its own, so we need to add context and formatting to these results, to give readers a better understanding of the data.
#
# #### Some things to watch out for:
#
# - The `print` command has no capital letter.
# - The text in parentheses () is called a [string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str) (i.e., a sequence of any characters such as letters, numbers and symbols).
# - Strings need to be enclosed "quotation marks".
#
# These simple rules for code are called the syntax. This is important as incorrect syntax will result in broken code that won’t work (i.e., a syntax error).
#
# # Today's Lesson:
#
# Try running the python code in this repository. You'll get some nasty looking red text with the word `SyntaxError`. Woops. Don't worry though, running into errors in your code happens all the time. Understanding errors and fixing them is integral to learning programming.
#
# This is typically how python programming works. You make a change to the code in the python file and then run the file to see what the results are. Make some changes, then run it again. Writing code is very iterative in this way, rather than more linear writing you might do for a report. You might end up running the python code hundreds of times to get it working just right. However, knowing how to deal with python code that is wrong is almost as important as getting the code right.
#
# `Task 1 - Fix the code below. Looks like someone forgot to add a closing parenthesis ')' to their print command, doh!`
# + id="-1agoA4EYNOy"
# print 'hello world' using python
print("Hello World"
# + [markdown] id="-OiQ2MrRY4Mg"
# Once you have fixed the code, try running this python file again using the run button above.
#
# `Task 2 - Modify the code to output your own message.`
#
# `Task 3 - Add a comment with the hash (#) symbol to let others know what it does.`
#
# ## Real-World Application:
#
# The `print` statement can be useful for adding context to your analysis. For example, imagine the results of a calculation is `0.26`, this is not very informative on its own. Instead, we can add text (in the form of a python string) and formatting to these results to give readers a better understanding of the data:
# ```
# 26% of the GP registered population
# ```
#
#
| Using_the_print_command.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### The purpose of this notebook is to answer question two of my analysis questions:
#
# #### Which types of marijuana crimes did SFPD report each year? Comparing the types of incidents over time.
#import modules
import pandas as pd
import altair as alt
# Import our cleaned dataset that contains all of our marijuana incidents. We made this .csv file in the data_cleaning notebook.
mari_incidents = pd.read_csv('all_data_marijuana.csv', dtype=str)
# Convert our incident dates to a datetime data format.
mari_incidents['incident_date'] = pd.to_datetime(mari_incidents['incident_date'])
# Check our date ranges
mari_incidents['incident_date'].min()
mari_incidents['incident_date'].max()
# Looks like we've got a full year of data for 2003, our earliest year. But since 2021 ends in October, we can't do full annual analysis on that year. So let's make a dataframe with our full years of data.
full_years = mari_incidents[
(mari_incidents['incident_date'] >= '2003-01-01') &
(mari_incidents['incident_date'] < '2021-01-01')
].reset_index(drop=True)
full_years['incident_description'].unique()
# We can see here that the 'incident_description' column contains the information about which type of marijuana crime the police department logged in its incident database. And we can see from the unique entries in that column that each row only contains a single crime listed in the incident_description column. So while it's true that there might be multiple rows in our dataset that describe a single incident, we don't need to drop the duplicate incident numbers for this analysis. Because to capture all the marijuana crimes in a specific incident, there will be multiple rows for that incident.
#
# So now we're going to use groupby to count up the number of marijuana incidents in each category during the full duration of our data.
description_counts_all = full_years.groupby(['incident_description']).count()
# Now let's isolate one column that we know won't have any null values: row_id
clean_description_counts_all = description_counts_all[['row_id']].copy()
clean_description_counts_all = clean_description_counts_all.reset_index()
#rename columns
clean_description_counts_all.columns = ['crime', 'number_of_incidents']
#sort by number of incidents
clean_description_counts_all = clean_description_counts_all.sort_values(by=['number_of_incidents'], ascending=False).reset_index(drop=True)
clean_description_counts_all
# Great! We can draw some conclusions from this data. It shows us that from 2003 to 2020, the San Francisco Police Department responded to thousands of marijuana-related incidents. Possession of marijuana was the type of crime that the police department dealt with the most, followed by possession of marijuana for sales.
# Here's a visualization of our data:
alt.Chart(clean_description_counts_all).mark_bar().encode(
x=alt.X('crime:O', sort='-x'),
y='number_of_incidents'
).properties(
title='San Francisco Police: Number of Marijuana Incidents 2003-2020'
)
# Now let's take a look at how the types of marijuana incidents actually changed year to year:
# Create a dataframe that has the number of incidents for each type of crime for each year:
test = full_years.groupby(['incident_description', pd.Grouper(key='incident_date', axis=0, freq='A')]).count()
test
# Clean up that dataframe:
test_2 = test['row_id'].reset_index()
test_2
# Now I want to set up my dataframe so that the year is the row label and the type of crime is the column label and the values are the number of incidents.
incidents_per_year = test_2.pivot(index='incident_date',
columns='incident_description',
values='row_id',
)
incidents_per_year
incidents_per_year = incidents_per_year.reset_index()
# Let's clean up this table a bit:
incidents_per_year['year'] = incidents_per_year['incident_date'].dt.year
incidents_per_year_final = incidents_per_year[
['year',
'furnishing marijuana',
'marijuana offense',
'planting/cultivating marijuana',
'possession of marijuana',
'possession of marijuana for sales',
'sale of marijuana',
'transportation of marijuana'
]].copy()
incidents_per_year_final
# #### There's our table! It shows us the number of incidents in the San Francisco Police Department database for each type of marijuana crime in each year from 2003 to 2020!
# Generally speaking, the table looks good. One hiccup is that the 'marijuana offense' incident description didn't come into use until 2018, so we don't have a full history of data for that bucket. Also, an additional reporting question would be: did SFPD begin to code different types of marijuana crimes into the more general description 'marijuana offense' after 2018? For example, we're no longer seeing possession of marijuana crimes after 2019, but could those be showing up in this more general category?
# #### Now let's visualize our data!
# First, we've got to massage the shape of our data using the melt function.
chart_data = incidents_per_year_final.melt('year')
chart_data
# Now that we've got our data in the correct shape, we can make a multi series line chart!
alt.Chart(chart_data).mark_line().encode(
x='year:O',
y='value',
color='incident_description'
).properties(
title='San Francisco Police: Annual Marijuana Incident Types'
)
| 03_types_of_incidents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbsphinx="hidden"
from openbci_stream.acquisition import Cyton, OpenBCIConsumer
from openbci_stream.acquisition import CytonConstants as cons
from gcpds.utils.filters import GenericButterBand, notch60
from gcpds.utils.processing import welch, fourier
from matplotlib import pyplot as plt
import numpy as np
import time
from gcpds.utils.visualizations import plot_eeg
import logging
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('kafka').setLevel(logging.ERROR)
logging.getLogger('matplotlib').setLevel(logging.ERROR)
def show(data):
plt.figure(figsize=(10, 5), dpi=90)
plt.subplot(121)
[plt.plot(ch+(i*1e6)-ch.mean()) for (i, ch) in enumerate(data)]
if len(data)>1:
plt.yticks(np.arange(len(data))*1e6, [f'ch{ch+1}' for ch in range(len(data))])
else:
plt.ylabel('$\mu V$')
# plt.xlim(0, 250)
plt.xlabel('samples')
plt.subplot(122)
# w, Y = welch(data, fs=250, axis=1)
w, Y = fourier(data, fs=250, axis=1)
[plt.plot(w, y) for y in Y]
plt.xlim(0, 90)
plt.xlabel('Frequency [Hz]')
band_2737 = GenericButterBand(27, 37, fs=250)
def filter_impedance(v):
v = notch60(v, fs=250)
return band_2737(v, fs=250)
def get_rms(v):
return np.std(v)
# return (v.max()-v.min())/(2*np.sqrt(2))
def get_z(v):
rms = get_rms(v)
z = (1e-6 * rms * np.sqrt(2) / 6e-9) - 2200
if z < 0:
return 0
return z
# -
# # Appendix 2 - Measuring Electrode Impedance
#
# The measuring of impedance in the `ADS1299` is made it by injecting a `6nA` altern current at `31.2 Hz`, in this example will be measured the impedande in the `N` inputs (like used for single reference EEG montages), and will be use the `leadoff_impedance` method to set these inputs in the correct mode.
#
# The first step is to connect correctly the Cyton board to replicate this experiment, a `10K` potentiometer will be connected between the `N` input (bottom) of channel 1 and the `SRB2` (bottom), the `BIAS` pin will not be used in this guide, *if you want to test with your head instead of a potentiometer then you must use this pin*.
# <div class="alert alert-warning"> Note
#
# * The impedance measurement does not work correctly on the current version of Cyton Library, but there is a [pull request](https://github.com/OpenBCI/OpenBCI_Cyton_Library/pull/95) that solve this issue.
# * Is possible to use versions between V3.0.0 and V3.1.2, but you must reset the board every time before measurement and NEVER change the sample frequency.
#
# </div>
# ## Offline measurement
# +
# openbci = Cyton('serial', '/dev/ttyUSB1', capture_stream=True, daisy=False)
openbci = Cyton('wifi', ['1172.16.58.3'], host='192.168.1.1', streaming_package_size=100, capture_stream=True, daisy=False)
# openbci = Cyton('wifi', '192.168.4.1', streaming_package_size=100, daisy=False, capture_stream=True)
openbci.command(cons.SAMPLE_RATE_250SPS)
openbci.command(cons.DEFAULT_CHANNELS_SETTINGS)
openbci.leadoff_impedance(range(1, 9), pchan=cons.TEST_SIGNAL_NOT_APPLIED, nchan=cons.TEST_SIGNAL_APPLIED)
openbci.stream(5)
data_raw = np.array(openbci.eeg_time_series)
# -
data_raw.shape
show([data_raw[0]])
# We still not see a sinusoidal at `31.2 Hz` but there is one, so, with a filter:
# +
band_2737 = GenericButterBand(27, 37, fs=250)
def filter_impedance(v):
# v = notch60(v, fs=250)
return band_2737(v, fs=250)
data = filter_impedance(data_raw)
# data = data[:, 100:-100]
show([data[0]])
# -
# Now we need the `RMS` voltage, there is a lot of formulas to get this value, even using the `std`, but I like to use one based on the `VPP`:
#
# $$
# V_{RMS}=\frac{V_{pp}}{2\sqrt{2}}\sim std(V)
# $$
#
# Our `Vpp` can be calculated as the *maximun* - *minimum*. In some approaches, is very common to found the usage of `standard deviation` instead of RMS.
# +
def get_rms(v):
return np.std(v)
# return (v.max()-v.min())/(2*np.sqrt(2))
rms = get_rms(data[0])
rms
# -
# $$
# Z=\frac{V_{RMS}}{I_{RMS}}
# $$
#
# We know that the `ADS1299` injects a `6nA` of alternating current, so:
#
# $$
# I_{RMS}=\frac{6nA}{\sqrt{2}}
# $$
#
# Then, considering that we have `uV` instaead of `V`:
#
# $$
# Z=\frac{\mu V_{RMS}\cdot10^{-6}\cdot\sqrt{2}}{6\cdot10^{-9}}
# $$
# +
def get_z(v):
rms = get_rms(v)
return 1e-6 * rms * np.sqrt(2) / 6e-9
z = get_z(data[0])
print(f'For {rms:.2f} uVrms the electrode impedance is {z/1000:.2f} KOhm')
# -
# The Cyton board has a 2.2K Ohm resistors in series with each electrode, so we must remove this value in way to get the real one.
# +
def get_z(v):
rms = get_rms(v)
z = (1e-6 * rms * np.sqrt(2) / 6e-9) - 2200
if z < 0:
return 0
return z
z = get_z(data[0])
print(f'For {rms:.2f} uVrms the electrode-to-head impedance is {(z)/1000:.2f} KOhm')
# -
# ## Real time measurement
#
# For this experiment we will use the Kafka consumer interface, and the same potentiometer.
# Keep in mind that this measurement uses 1 second signal, so, the variance will affect the real measure, in real-life the amplitude not change so drastically.
import time
Z = []
with OpenBCIConsumer('wifi', '192.168.1.113', host='192.168.1.1', auto_start=False, streaming_package_size=250, daisy=False) as (stream, openbci):
print(openbci.command(cons.SAMPLE_RATE_1KSPS))
print(openbci.command(cons.DEFAULT_CHANNELS_SETTINGS))
print(openbci.leadoff_impedance(range(0, 9), pchan=cons.TEST_SIGNAL_NOT_APPLIED, nchan=cons.TEST_SIGNAL_APPLIED))
time.sleep(1)
openbci.start_stream()
for i, message in enumerate(stream):
if message.topic == 'eeg':
eeg, aux = message.value['data']
eeg = filter_impedance(eeg)
# eeg = eeg[:, 100:-100]
z = get_z(eeg[0])
Z.append(z)
print(f'{z/1000:.2f} kOhm')
if i >= 600:
break
plt.figure(figsize=(10, 5), dpi=90)
plt.plot(np.array(Z)/1000)
plt.ylabel('Impedance [$K\Omega$]')
plt.xlabel('Time [s]')
plt.grid(True)
plt.show()
# ## Improve measurements
#
# Some tips for improving the impedance measurement:
#
# * Take shorts signals but enough, 1 second is fine.
# * Remove the first and last segments of the filtered signal.
# * Nonstationary signals will produce wrong measurements.
# * A single measurement is not enough, is recommended to work with trends instead.
# +
from openbci_stream.acquisition import Cyton
import time
# openbci = Cyton('wifi', '192.168.1.113', host='192.168.1.1', streaming_package_size=250, daisy=False)
openbci = Cyton('wifi', '192.168.4.1', streaming_package_size=100, capture_stream=True, daisy=False)
openbci.command(cons.DEFAULT_CHANNELS_SETTINGS)
# openbci = Cyton('serial', 'COM0')
# stream 15 seconds
openbci.start_stream()
time.sleep(15) # asynchronous call
openbci.stop_stream()
# -
data_raw = np.array(openbci.eeg_time_series)
data_raw.shape
import time
Z = []
with OpenBCIConsumer('wifi', '192.168.4.1', streaming_package_size=100, daisy=False) as (stream, openbci):
# with OpenBCIConsumer(host='192.168.1.1') as stream:
time.sleep(1)
print(openbci.command(cons.SAMPLE_RATE_250SPS))
time.sleep(1)
print(openbci.command(cons.DEFAULT_CHANNELS_SETTINGS))
time.sleep(1)
print(openbci.leadoff_impedance(range(0, 9), pchan=cons.TEST_SIGNAL_NOT_APPLIED, nchan=cons.TEST_SIGNAL_APPLIED))
time.sleep(1)
openbci.start_stream()
for i, message in enumerate(stream):
if message.topic == 'eeg':
eeg, aux = message.value['data']
# print()
eeg = filter_impedance(eeg)
# eeg = eeg[:, 100:-100]
z = get_z(eeg[0])
Z.append(z)
print(eeg.shape, f'{z/1000:.2f} kOhm')
if i >= 600:
break
from scipy.fftpack import fft, fftfreq, fftshift
# +
# fft?
# -
| docs/source/notebooks/.ipynb_checkpoints/A2-electrodes_impedance-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b2c298de-5f64-4c90-9ef5-54034a660876", "showTitle": false, "title": ""}
# d-sandbox
#
# <div style="text-align: center; line-height: 0; padding-top: 9px;">
# <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px; height: 163px">
# </div>
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6df7d5a1-a2dc-4deb-b6fe-13e26529facf", "showTitle": false, "title": ""}
# # Model Management
#
# An MLflow model is a standard format for packaging models that can be used on a variety of downstream tools. This lesson provides a generalizable way of handling machine learning models created in and deployed to a variety of environments.
#
# ##  In this lesson you:<br>
# - Introduce model management best practices
# - Store and use different flavors of models for different deployment environments
# - Apply models combined with arbitrary pre and post-processing code using Python models
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "95e5ee82-6fff-4e9a-9d88-565f65aef6ea", "showTitle": false, "title": ""}
# <iframe
# src="//fast.wistia.net/embed/iframe/bbyhkgxzoz?videoFoam=true"
# style="border:1px solid #1cb1c2;"
# allowtransparency="true" scrolling="no" class="wistia_embed"
# name="wistia_embed" allowfullscreen mozallowfullscreen webkitallowfullscreen
# oallowfullscreen msallowfullscreen width="640" height="360" ></iframe>
# <div>
# <a target="_blank" href="https://fast.wistia.net/embed/iframe/bbyhkgxzoz?seo=false">
# <img alt="Opens in new tab" src="https://files.training.databricks.com/static/images/external-link-icon-16x16.png"/> Watch full-screen.</a>
# </div>
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ea1fe7f1-e7e5-40fb-ab97-1140885b3a8c", "showTitle": false, "title": ""}
# -sandbox
# ### Managing Machine Learning Models
#
# Once a model has been trained and bundled with the environment it was trained in, the next step is to package the model so that it can be used by a variety of serving tools. The current deployment options include Docker-based REST servers, Spark using streaming or batch, and cloud platforms such as Azure ML and AWS SageMaker. Packaging the final model in a platform-agnostic way offers the most flexibility in deployment options and allows for model reuse across a number of platforms.
#
# **MLflow models is a tool for deploying models that's agnostic to both the framework the model was trained in and the environment it's being deployed to. It's convention for packaging machine learning models that offers self-contained code, environments, and models.** The main abstraction in this package is the concept of **flavors,** which are different ways the model can be used. For instance, a TensorFlow model can be loaded as a TensorFlow DAG or as a Python function: using the MLflow model convention allows for the model to be used regardless of the library that was used to train it originally.
#
# The primary difference between MLflow projects and models is that models are geared more towards inference and serving. The `python_function` flavor of models gives a generic way of bundling models regardless of whether it was `sklearn`, `keras`, or any other machine learning library that trained the model. We can thereby deploy a python function without worrying about the underlying format of the model. **MLflow therefore maps any training framework to any deployment environment**, massively reducing the complexity of inference.
#
# Finally, arbitrary pre and post-processing steps can be included in the pipeline such as data loading, cleansing, and featurization. This means that the full pipeline, not just the model, can be preserved.
#
# <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/mlflow-models-enviornments.png" style="height: 400px; margin: 20px"/></div>
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f91865ce-b543-44d6-ab86-0dfec9a5fab9", "showTitle": false, "title": ""}
# Run the following cell to set up our environment.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "33d8c9d0-a03b-4863-90fc-129cf615288d", "showTitle": false, "title": ""}
# %run "./Includes/Classroom-Setup"
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "dde15733-e25a-4f7a-8dff-ac446943c4d5", "showTitle": false, "title": ""}
# -sandbox
# ### Model Flavors
#
# Flavors offer a way of saving models in a way that's agnostic to the training development, making it significantly easier to be used in various deployment options. Some of the most popular built-in flavors include the following:<br><br>
#
# * <a href="https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#module-mlflow.pyfunc" target="_blank">mlflow.pyfunc</a>
# * <a href="https://mlflow.org/docs/latest/python_api/mlflow.keras.html#module-mlflow.keras" target="_blank">mlflow.keras</a>
# * <a href="https://mlflow.org/docs/latest/python_api/mlflow.pytorch.html#module-mlflow.pytorch" target="_blank">mlflow.pytorch</a>
# * <a href="https://mlflow.org/docs/latest/python_api/mlflow.sklearn.html#module-mlflow.sklearn" target="_blank">mlflow.sklearn</a>
# * <a href="https://mlflow.org/docs/latest/python_api/mlflow.spark.html#module-mlflow.spark" target="_blank">mlflow.spark</a>
# * <a href="https://mlflow.org/docs/latest/python_api/mlflow.tensorflow.html#module-mlflow.tensorflow" target="_blank">mlflow.tensorflow</a>
#
# Models also offer reproducibility since the run ID and the timestamp of the run are preserved as well.
#
# <a href="https://mlflow.org/docs/latest/python_api/index.html" target="_blank">You can see all of the flavors and modules here.</a>
#
# <div><img src="https://files.training.databricks.com/images/eLearning/ML-Part-4/mlflow-models.png" style="height: 400px; margin: 20px"/></div>
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "70a04601-297d-4bab-b5e4-736f95e5c623", "showTitle": false, "title": ""}
# To demonstrate the power of model flavors, let's first create two models using different frameworks.
#
# Import the data.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "88e7d4e4-6240-4a11-a986-14c2ce5abfaf", "showTitle": false, "title": ""}
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv("/dbfs/mnt/training/airbnb/sf-listings/airbnb-cleaned-mlflow.csv")
X_train, X_test, y_train, y_test = train_test_split(df.drop(["price"], axis=1), df[["price"]].values.ravel(), random_state=42)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "55659af4-d0e7-495b-9c96-687d5e892bac", "showTitle": false, "title": ""}
display(df)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "487c385a-b7fe-4be2-b15d-cc529983bfa0", "showTitle": false, "title": ""}
# Train a random forest model.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c923a2bd-14c7-4378-9556-f3d3e91a54cd", "showTitle": false, "title": ""}
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
rf = RandomForestRegressor(n_estimators=100, max_depth=5)
rf.fit(X_train, y_train)
rf_mse = mean_squared_error(y_test,rf.predict(X_test))
rf_mse
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "72ef423d-5872-4be9-9d78-90b3007b10ea", "showTitle": false, "title": ""}
# Train a neural network.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c2c89757-7456-47e4-8eb4-c15f7712f9d8", "showTitle": false, "title": ""}
import tensorflow as tf
tf.set_random_seed(42) # For reproducibility
from keras.models import Sequential
from keras.layers import Dense
nn = Sequential([
Dense(40, input_dim=21, activation='relu'),
Dense(20, activation='relu'),
Dense(1, activation='linear')
])
nn.compile(optimizer="adam", loss="mse")
nn.fit(X_train, y_train, validation_split=.2, epochs=40, verbose=2)
nn_mse = mean_squared_error(y_test, nn.predict(X_test))
nn_mse
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a483ba28-beb3-46e9-9e27-f7dbb3fae83a", "showTitle": false, "title": ""}
# Now log the two models.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "53438b02-ea7f-4afd-98ed-e0fd47c833a8", "showTitle": false, "title": ""}
import mlflow.sklearn
with mlflow.start_run(run_name="RF Model") as run:
mlflow.sklearn.log_model(rf,"model")
mlflow.log_metric("mse",rf_mse)
sklearnRunID = run.info.run_uuid
sklearnURI = run.info.artifact_uri
experimentID = run.info.experiment_id
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c1ab67e1-0c0d-4ed8-9333-811d22249f37", "showTitle": false, "title": ""}
import mlflow.keras
with mlflow.start_run(run_name="NN Model") as run:
mlflow.keras.log_model(nn,"model")
mlflow.log_metric("mse",nn_mse)
kerasRunID = run.info.run_uuid
kerasURI = run.info.artifact_uri
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "eef1044e-65ff-4bd0-b4a3-1b94362a5cab", "showTitle": false, "title": ""}
# Look at the model flavors. Both have their respective `keras` or `sklearn` flavors as well as a `python_function` flavor.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2f5933ff-18a8-4e0e-b636-e52ebccc979d", "showTitle": false, "title": ""}
print(dbutils.fs.head(sklearnURI+"/model/MLmodel"))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "657f0ba1-098d-4c1d-8466-b01e83885eca", "showTitle": false, "title": ""}
print(dbutils.fs.head(kerasURI+"/model/MLmodel"))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "258f98c8-80c1-4e61-b162-890550e1437c", "showTitle": false, "title": ""}
# Now we can use both of these models in the same way, even though they were trained by different packages. For full documentation:
# https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a3675b7a-6e4b-4b85-9963-1d855f214f03", "showTitle": false, "title": ""}
import mlflow.pyfunc
rf_pyfunc_model = mlflow.pyfunc.load_model(model_uri=(sklearnURI+"/model").replace("dbfs:","/dbfs"))
type(rf_pyfunc_model)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "72b1e1c7-ae06-4012-97ae-99850b111b30", "showTitle": false, "title": ""}
import mlflow.pyfunc
nn_pyfunc_model = mlflow.pyfunc.load_model(model_uri=(kerasURI+"/model").replace("dbfs:","/dbfs"))
type(nn_pyfunc_model)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2e61dddb-a302-43e1-b45c-836458d256ea", "showTitle": false, "title": ""}
# Both will implement a predict method. The `sklearn` model is still of type `sklearn` because this package natively implements this method.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "49e91762-2265-4996-8a26-406d8054650c", "showTitle": false, "title": ""}
rfOutput = rf_pyfunc_model.predict(X_test)
rfOutput
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fc9e4cc8-4a19-41e3-8e80-453211ee6e30", "showTitle": false, "title": ""}
nnOutput = nn_pyfunc_model.predict(X_test)
nnOutput
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "77368a46-3dd8-4d7c-8346-de93c132ec3a", "showTitle": false, "title": ""}
print('rfOutput: {}; nnOutput: {}'.format(type(rfOutput), type(nnOutput)))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "565fc84b-9637-490d-b276-d8ce26ea1afc", "showTitle": false, "title": ""}
# -sandbox
# ### Pre and Post Processing Code using `pyfunc`
#
# A `pyfunc` is a generic python model that can define any model, regardless of the libraries used to train it. As such, it's defined as a directory structure with all of the dependencies. It is then "just an object" with a predict method. Since it makes very few assumptions, it can be deployed using MLflow, SageMaker, a Spark UDF or in any other environment.
#
# <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> Check out <a href="https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-create-custom" target="_blank">the `pyfunc` documentation for details</a><br>
# <img alt="Side Note" title="Side Note" style="vertical-align: text-bottom; position: relative; height:1.75em; top:0.05em; transform:rotate(15deg)" src="https://files.training.databricks.com/static/images/icon-note.webp"/> Check out <a href="https://github.com/mlflow/mlflow/blob/master/docs/source/models.rst#example-saving-an-xgboost-model-in-mlflow-format" target="_blank">this README for generic example code and integration with `XGBoost`</a>
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bcb80dab-005a-448b-b668-c0b455f29043", "showTitle": false, "title": ""}
# To demonstrate how `pyfunc` works, create a basic class that adds `n` to the input values.
#
# Define a model class.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "864f39d6-d49a-4fb9-aae0-129af39cc130", "showTitle": false, "title": ""}
import mlflow.pyfunc
class AddN(mlflow.pyfunc.PythonModel):
def __init__(self, n):
self.n = n
def predict(self, context, model_input):
return model_input.apply(lambda column: column + self.n)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9c372fcc-c060-404d-8c60-07d9f2a243dd", "showTitle": false, "title": ""}
# Construct and save the model.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "28b81367-a4f6-4eaf-87f5-e880332ac8c8", "showTitle": false, "title": ""}
from mlflow.exceptions import MlflowException
model_path = userhome + "/add_n_model2"
add5_model = AddN(n=5)
dbutils.fs.rm(model_path, True) # Allows you to rerun the code multiple times
mlflow.pyfunc.save_model(path=model_path.replace("dbfs:", "/dbfs"), python_model=add5_model)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ea755802-5338-4535-b612-e65cef557824", "showTitle": false, "title": ""}
# Load the model in `python_function` format.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7b59938d-9eda-42f7-a86d-7222c34b0158", "showTitle": false, "title": ""}
loaded_model = mlflow.pyfunc.load_model(model_path)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2eb0f982-b3e5-4eb7-884c-2ff24ca66206", "showTitle": false, "title": ""}
# Evaluate the model.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "caf0aefa-eb1a-4294-b791-a60752b5e1a9", "showTitle": false, "title": ""}
import pandas as pd
model_input = pd.DataFrame([range(10)])
model_output = loaded_model.predict(model_input)
assert model_output.equals(pd.DataFrame([range(5, 15)]))
model_output
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a39f7188-a366-4b85-a2d1-2dc6c21ea7ac", "showTitle": false, "title": ""}
# ##  Lab
#
#
# ### [Click here to start the lab for this lesson.]($./Labs/05-Lab)
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5473f22e-1568-4bff-8b5f-7079c0b07909", "showTitle": false, "title": ""}
# ## Review
# **Question:** How do MLflow projects differ from models?
# **Answer:** The focus of MLflow projects is reproducibility of runs and packaging of code. MLflow models focuses on various deployment environments.
#
# **Question:** What is a ML model flavor?
# **Answer:** Flavors are a convention that deployment tools can use to understand the model, which makes it possible to write tools that work with models from any ML library without having to integrate each tool with each library. Instead of having to map each training environment to a deployment environment, ML model flavors manages this mapping for you.
#
# **Question:** How do I add pre and post processing logic to my models?
# **Answer:** A model class that extends `mlflow.pyfunc.PythonModel` allows you to have load, pre-processing, and post-processing logic.
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e3f7325e-50b0-4f3b-af68-1133eb89e50c", "showTitle": false, "title": ""}
# ## Additional Topics & Resources
#
# **Q:** Where can I find out more information on MLflow Models?
# **A:** Check out <a href="https://www.mlflow.org/docs/latest/models.html" target="_blank">the MLflow documentation</a>
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "caf082e3-44d4-4b19-bc50-af6e6ff01ffd", "showTitle": false, "title": ""}
# -sandbox
# © 2019 Databricks, Inc. All rights reserved.<br/>
# Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="http://www.apache.org/">Apache Software Foundation</a>.<br/>
# <br/>
# <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="http://help.databricks.com/">Support</a>
| MLflow/04-Model-Management.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlp
# language: python
# name: nlp
# ---
import sys
sys.path.append('..')
# +
from sklearn.metrics import classification_report
from baseline_logisticregression import readInData
from typing import NamedTuple, List
from bert_utils import calc_entailment_prob
from sklearn.ensemble import RandomForestClassifier
from tqdm.auto import tqdm
import os
# -
class RawInput(NamedTuple):
twit0: str
twit1: str
def load_data(fn: str)->(List[RawInput],List[bool]):
print(f"Start to read '{fn}'")
data, trends = readInData(fn)
print("Total records:", len(data))
print("True samples:", sum([1 for r in data if r[1]]))
print("False samples:", sum([1 for r in data if not r[1]]))
return [RawInput(r[2], r[3]) for r in data], [r[1] for r in data]
def featurize(x_raw: List[RawInput])->List[List[float]]:
res = []
for r in tqdm(x_raw):
p = calc_entailment_prob(r.twit0, r.twit1)
pb = calc_entailment_prob(r.twit1, r.twit0)
res.append([p[0], p[1], pb[0], pb[1]])
return res
x_train_raw, y_train = load_data('../data/train.data')
x_dev_raw, y_dev = load_data('../data/dev.data')
x_test_raw, y_test = load_data('../data/test.data')
for r in x_train_raw[:10]:
print(r)
# ## BERT features
def load_bert_features(fn: str):
with open(fn, 'rt', encoding='utf-8') as f:
res = []
for l in f:
fs = l.strip().split('\t')
res.append([float(feature) for feature in fs])
return res
# +
print("Start featurizing...")
if not os.path.isfile('../data/bert.train.data'):
x_train_bert_features = featurize(x_train_raw)
x_dev_bert_features = featurize(x_dev_raw)
x_test_bert_features = featurize(x_test_raw)
else:
x_train_bert_features = load_bert_features('../data/bert.train.data')
x_dev_bert_features = load_bert_features('../data/bert.dev.data')
x_test_bert_features = load_bert_features('../data/bert.test.data')
print("Done!")
# -
def save_bert_features(x, filename):
with open(filename, 'wt', encoding='utf-8') as f:
lines = ['\t'.join([str(row[0]),str(row[1]),str(row[2]),str(row[3])]) for row in x]
tsv_str = '\n'.join(lines)
f.write(tsv_str)
if not os.path.isfile('../data/bert.train.data'):
save_bert_features(x_train_bert_features, '../data/bert.train.data')
save_bert_features(x_dev_bert_features, '../data/bert.dev.data')
save_bert_features(x_test_bert_features, '../data/bert.test.data')
# ## LEN features
def featurize_len(x_raw: List[RawInput]) -> List[List[float]]:
res = []
for r in x_raw:
res.append([len(r.twit0)/len(r.twit1), len(r.twit0)/100, len(r.twit1)/100])
return res
print("Start featurizing...")
x_train_len_features = featurize_len(x_train_raw)
x_dev_len_features = featurize_len(x_dev_raw)
x_test_len_features = featurize_len(x_test_raw)
print("Done!")
def report(y_true, y_pred):
y_true_cleaned, y_pred_cleaned = [], []
for t, p in zip(y_true, y_pred):
if t is not None:
y_true_cleaned.append(t)
y_pred_cleaned.append(p)
print(classification_report(y_true_cleaned, y_pred_cleaned))
# ## bpemb
# +
from bpemb import BPEmb
from scipy import spatial
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from nltk.corpus import stopwords
from nltk import word_tokenize
emb = BPEmb(lang='en', dim = 300)
print(emb)
# +
STOP_WORDS = stopwords.words('english')
def tokenize_filter(text: str, trace:bool=False)->List[str]:
if trace:
print(text)
tokens = word_tokenize(text)
if trace:
print(tokens)
tokens = [t for t in tokens if t.lower() not in STOP_WORDS]
#tokens = [t for t in tokens if t.isalpha()]
if trace:
print(tokens)
return tokens
print(tokenize_filter('I like to move it move it'))
# +
def calc_emb(text, delete_stopwords: bool=False):
if delete_stopwords:
text = ' '.join(tokenize_filter(text))
res = np.zeros(emb.vectors.shape[1], dtype=np.float32)
# tokens = word_tokenize(text)
# for t in tokens:
embs = emb.embed(text.casefold())
for e in embs:
res += e
n = len(embs)
if n:
res /= n
return res
def featurize_emb(x_raw: List[RawInput]) -> List[List[float]]:
res = []
for r in x_raw:
emb0 = calc_emb(r.twit0, True)
emb1 = calc_emb(r.twit1, True)
cos_symilarity = 1 - spatial.distance.cosine(emb0, emb1)
res.append([cos_symilarity])
return res
print(featurize_emb([RawInput('Twit1 experiment', 'Some text')]))
print(featurize_emb([RawInput('I like to move it move it', 'I like to move it')]))
# -
print("Start featurizing...")
x_train_emb_features = featurize_emb(x_train_raw)
x_dev_emb_features = featurize_emb(x_dev_raw)
x_test_emb_features = featurize_emb(x_test_raw)
print("Done!")
# ## Numberbatch
from gensim.models import KeyedVectors
print('loading word vectors')
word_vectors = KeyedVectors.load_word2vec_format("d:/nlp/vectors/numberbatch-en-17.06.txt.gz", binary=False)
print('loading word vectors finished')
# +
import math
def featurize_nb(x_raw: List[RawInput]) -> List[List[float]]:
res = []
for r in x_raw:
sym = 1-word_vectors.wmdistance(r.twit0.lower(), r.twit1.lower())
if sym == -math.inf:
print("-inf for ", r)
sym = 1
res.append([sym])
return res
print(featurize_nb([RawInput('Twit1 experiment', 'Some text')]))
print(featurize_nb([RawInput('I like to move it move it', 'I like to move it')]))
print(x_train_raw[0])
print(featurize_nb([x_train_raw[2]]))
# -
print("Start featurizing...")
x_train_nb_features = featurize_nb(x_train_raw)
x_dev_nb_features = featurize_nb(x_dev_raw)
x_test_nb_features = featurize_nb(x_test_raw)
print("Done!")
print(x_train_nb_features[:10])
# ## Glue features
# +
#import copy
def glue_features(*f_lists):
#f_first, f_others = f_lists
#res = copy.deepcopy(f_first)
res = []
for rows in zip(*f_lists):
row = []
for columns in rows:
row+=columns
res.append(row)
return res
tst1, tst2 = [[1],[2],[3],[4],[5]], [[6],[7],[8],[9],[10]]
print(glue_features(tst1, tst2))
tst3 = [[11],[12],[13],[14],[15]]
print(glue_features(tst1, tst2, tst3))
# -
"""
x_train_features = glue_features(x_train_bert_features, x_train_len_features, x_train_emb_features, x_train_nb_features)
x_dev_features = glue_features(x_dev_bert_features, x_dev_len_features, x_dev_emb_features, x_dev_nb_features)
x_test_features = glue_features(x_test_bert_features, x_test_len_features, x_test_emb_features, x_test_nb_features)
"""
x_train_features = glue_features(x_train_bert_features, x_train_len_features, x_train_emb_features, x_train_nb_features)
x_dev_features = glue_features(x_dev_bert_features, x_dev_len_features, x_dev_emb_features, x_dev_nb_features)
x_test_features = glue_features(x_test_bert_features, x_test_len_features, x_test_emb_features, x_test_nb_features)
# ## Classifier
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
class_weight = {True: 1.9, False:1}
#clf = RandomForestClassifier(n_estimators=2, random_state=1974, verbose=True, class_weight='balanced')
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
#, class_weight=class_weight
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
print(list(zip(y_pred[:10], y_pred_prob[:10])))
# Store results
def store_pred(fn: str, pred, pred_prob):
with open(fn, 'wt', encoding='utf-8') as f:
for row in zip(pred, pred_prob):
b = 'true' if row[0] else 'false'
p = row[1][1]
f.write(f"{b}\t{p:.4f}\n")
store_pred('../systemoutputs/PIT2015_BASELINE_SS_ALL.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_ALL.output
y_pred_dev = clf.predict(x_dev_features)
report(y_dev, y_pred_dev)
y_pred_tr = clf.predict(x_train_features)
report(y_train, y_pred_tr)
# ## Classifier lite
x_train_features = glue_features(x_train_len_features, x_train_emb_features)
x_dev_features = glue_features(x_dev_len_features, x_dev_emb_features)
x_test_features = glue_features(x_test_len_features, x_test_emb_features)
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
class_weight = {True: 1.9, False:1}
#clf = RandomForestClassifier(n_estimators=2, random_state=1974, verbose=True, class_weight='balanced')
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
#, class_weight=class_weight
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
store_pred('../systemoutputs/PIT2015_BASELINE_SS_LITE.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_LITE.output
# ## Classifier very lite
x_train_features = glue_features(x_train_emb_features)
x_dev_features = glue_features(x_dev_emb_features)
x_test_features = glue_features(x_test_emb_features)
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
class_weight = {True: 1.9, False:1}
#clf = RandomForestClassifier(n_estimators=2, random_state=1974, verbose=True, class_weight='balanced')
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
#, class_weight=class_weight
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
store_pred('../systemoutputs/PIT2015_BASELINE_SS_VL.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_VL.output
# ## Classifier Numberbatch
x_train_features = glue_features(x_train_nb_features)
x_dev_features = glue_features(x_dev_nb_features)
x_test_features = glue_features(x_test_nb_features)
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
class_weight = {True: 1.9, False:1}
#clf = RandomForestClassifier(n_estimators=2, random_state=1974, verbose=True, class_weight='balanced')
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
#, class_weight=class_weight
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
store_pred('../systemoutputs/PIT2015_BASELINE_SS_NB.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_NB.output
# ## Classifier BERT
x_train_features = glue_features(x_train_bert_features)
x_dev_features = glue_features(x_dev_bert_features)
x_test_features = glue_features(x_test_bert_features)
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
class_weight = {True: 1.9, False:1}
#clf = RandomForestClassifier(n_estimators=2, random_state=1974, verbose=True, class_weight='balanced')
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
#, class_weight=class_weight
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
store_pred('../systemoutputs/PIT2015_BASELINE_SS_BERT.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_BERT.output
# ## Classifier Numberbatch+BERT
x_train_features = glue_features(x_train_bert_features, x_train_nb_features)
x_dev_features = glue_features(x_dev_bert_features, x_dev_nb_features)
x_test_features = glue_features(x_test_bert_features, x_test_nb_features)
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
store_pred('../systemoutputs/PIT2015_BASELINE_SS_NBBERT.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_NBBERT.output
# ## Classifier by length
x_train_features = glue_features(x_train_len_features)
x_dev_features = glue_features(x_dev_len_features)
x_test_features = glue_features(x_test_len_features)
from sklearn.linear_model import LogisticRegression
print("Start learning classifier...")
clf = LogisticRegression(random_state=1974, verbose=True, solver='saga'
, class_weight='balanced'
)
print("Done!")
clf.fit(x_train_features, y_train)
y_pred = clf.predict(x_test_features)
y_pred_prob = clf.predict_proba(x_test_features)
report(y_test, y_pred)
store_pred('../systemoutputs/PIT2015_BASELINE_SS_LEN.output', y_pred, y_pred_prob)
# !python pit2015_eval_single.py ../data/test.label ../systemoutputs/PIT2015_BASELINE_SS_LEN.output
| Practice-2019-05-11/scripts/ss_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GEE getThumbUrl example
# +
# Import the Earth Engine Python Package
import ee # !pip install earthengine-api
# Import Dependencies
import pandas as pd
import matplotlib.dates as mdates
from IPython.display import Image
from datetime import datetime
from matplotlib import dates
from pylab import *
ee.Initialize()
# -
# # Initialization using polgyon
# set variables
SCALE = 10;
polarization = 'VV'; # vertical/vertical or vertical/horizontal
# build geometry to measure
rect = ee.Geometry.Polygon(
[[[106.81591544702997, -6.33681462466857],
[106.81591544702997, -6.350036929099756],
[106.82780299738397, -6.350036929099756],
[106.82780299738397, -6.33681462466857]]])
# convert geometry to JSON format
rect_JSON = rect.getInfo()['coordinates']
# ## run gee analysis trough api
# initiate google earth engine to aquire the image
S1 = ee.ImageCollection('COPERNICUS/S1_GRD')\
.filterDate('2016-01-01', '2019-12-12')\
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', polarization))\
.filter(ee.Filter.eq('instrumentMode', 'IW'))\
.filter(ee.Filter.eq('orbitProperties_pass', 'DESCENDING'))\
.select(polarization)\
.filterBounds(rect);
# ## plotting the result
# +
# visualize resulting image
img_url = S1.first().getThumbUrl({
'region':rect_JSON,
'min':-25,
'max':0,
'palette':['0784b5', '39ace7', '9bd4e4', 'cadeef', 'ffffff']
})
Image(url = img_url)
# +
import imageio
# download image as np array
im = imageio.imread(url)
# -
# apply value mask
array_mask = np.array(im < 170).astype(int)
# plot results
plt.subplot(121)
plt.imshow(im, cmap = 'Blues')
plt.subplot(122)
plt.imshow(array_mask[:,:,0], cmap = 'Blues')
# +
from gbdxtools import Interface
gbdx = Interface()
is_del = gbdx.catalog.get_data_location("104001001BA7C400")
print(is_del)
# +
#Download a single file from a GBDX S3 location
from gbdxtools import Interface
gbdx = Interface()
#Path to file on GBDX S3 location
#location is relative to user's GBDX S3 location, ie s3://gbd-customer-data/<prefix>/
location = "011227076010_01_003"
#Path to local directory for download
local_dir = "local_dir_name"
gbdx.s3.download(location=location, local_dir=local_dir)
# -
| 00_get_highest_water_date/GEE_getThumbUrl_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
# # 一阶逻辑
#
# 本节我们将通过翻译自然语言表达式为一阶逻辑来表示它们的意思。并不是所有的自然语言语义都可以用一阶逻辑表示,但它是计算语义的一个不错的选择,因为它具有足够的表现力来表达语义的很多方面,而且另一方面,有出色的现成系统可用于开展一阶逻辑的自动推理。
#
# ## 句法
#
# 一阶逻辑保留所有命题逻辑的**布尔运算符**,但它增加了一些重要的新机制。一阶逻辑的标准构造规则包括以下术语:独立变量、独立常量、带不同数量的参数的**谓词**。例如:Angus walks 可以被形式化为 walk(angus),Angus sees Bertie 可以被形式化为 see(angus, bertie)。我们称 walk 为**一元谓词**,see 为**二元谓词**。
#
# 一阶逻辑本身没有什么实质性的关于词汇语义的表示,原子谓词如 see(angus, berties) 在某种情况下是真还是假并不是一个逻辑问题,而依赖于特定的估值,即我们为常量 see、angus 和 bertie 选择的值,因此这些表达式被称为**非逻辑常量**。与之相反,**逻辑常量**(如布尔运算符)在一阶逻辑的每个模型中的解释总是相同的。
#
# 我们应该在这里提到:有一个二元谓词具有特殊的地位,它就是等号,如在 angus = aj 这样的公式中的等号。等号被视为一个逻辑常量,因为对于任何 t1 和 t2,公式 t1 = t2 为真当且仅当 t1 和 t2 指向同一个实体。
#
# 按照 Montague 文法的约定,我们将使用两种基本类型来表示一阶逻辑:**e**为实体类型,而**t**为公式类型,即具有真值的表达式。给定这两种基本类型,我们可以构造函数表达式的**复杂类型**,也就是说给定任何类型 σ 和 τ,<σ,τ> 是一个对应与从 σ 类型到 τ 类型的函数的复杂类型。例如 <e,t> 是从实体到真值的表达式类型,也就是一元谓词,可以通过下面的方式来进行类型检查:
import nltk
read_expr = nltk.sem.Expression.fromstring
expr = read_expr('walk(angus)', type_check=True)
print(expr.argument)
print(expr.argument.type)
print(expr.function)
print(expr.function.type)
# 为什么我们在最后看到了 <e,?> 呢?虽然类型检查器会尝试推断出尽可能多的类型,但在这种情况下,它并没有能够推断出 walk 的类型,所以其结果类型是未知的。为了帮助类型检查器,我们可以指定一个**信号**,即实现一个字典,明确指出非逻辑常量的类型:
sig = {'walk': '<e, t>'}
expr = read_expr('walk(angus)', signature=sig)
print(expr.function.type)
# 在一阶逻辑中,谓词的参数也可以是独立变量,如 x、y 和 z。独立变量类似人称代词,如 he、she 和 it,我们需要联系上下文来弄清楚它们的含义。
#
# 为了解释 He disappeared. 这个句子中 he 指代的是什么,我们有两种做法:一种方法是直接将其指向一个相关的本地对象;另一种方法是为代词提供文本中的先行词,例如将句子 Cyril is Angus's dog. 放在 He disappeared. 前面,那么 He disappeared. 在语义上就和 Cyril disappeared. 等价。
#
# 再思考下面例子中的 he,在这种情况下,它收不确定的 NP(a dog)的约束,这是一个和共指关系不同的关系。如果我们替换代词 he 为 a dog,结果 b 就在语义上与 a 不等效。
#
# a. Angus had a dog but he disappeared.
# b. Angus had a dog but a dog disappeared.
#
# 对应下面的句子 a,我们可以构建一个**开放公式** b,其中变量 x 出现了两次:
#
# a. He is a dog and he disappeared.
# b. dog(x) & disappear(x)
#
# 通过在上面的开放公式前面指定一个**存在量词 ∃x**(存在某些 x),我们可以**绑定**这些变量,下面的例子中 a 的意思是 b,更习惯的写法是 c:
#
# a. ∃x.(dog(x) & disappear(x))
# b. At least one entity is a dog and disappeared.
# c. A dog disappeared.
#
# NLTK 中的写法是 exists x.(dog(x) & disappear(x))。
#
# 除了存在量词,一阶逻辑还为我们提供了**全称量词 ∀x**(对所有 x):
#
# a. ∀x.(dog(x) -> disappaer(x))
# b. Everything has the property that if it is a dog, it disappears.
# c. Every dog disappeared.
#
# NLTK 中的写法是 all x.(dog(x) -> disappear(x))。
#
# 考虑下面两组公式:
#
# a. ((exists x. dog(x)) -> bark(x)
# b. all x. ((exists x. dog(x)) -> bark(x))
#
# 在公式 a 中,存在量词 x 的范围是 dog(x),所以 bark(x) 中的 x 的出现是不受限制的。因此,它可以被其他一些量词约束,如公式 b 所示。
#
# 在一般情况下,变量 x 在公式 φ 中是**自由**的,如果它没有出现在 all x 或 exists x 的范围内。相反,如果 x 出现在了 all x 或 exists x 范围内,它就是**受限**的。如果公式中所有的变量都是受限的,那么我们说这个公式是**封闭**的。我们可以通过 Expression 对象的 free() 方法返回公式中自由变量的集合:
print(read_expr('dog(cyril)').free())
print(read_expr('dog(x)').free())
print(read_expr('own(angus, cyril)').free())
print(read_expr('exists x. dog(x)').free())
print(read_expr('((some x. walk(x)) -> sing(x))').free())
print(read_expr('exists x. own(y, x)').free())
# ## 一阶定理证明
#
# 回顾前面我们在 to the north of 上遇到的限制:
#
# if x is to the north of y then y is not to the north of x.
#
# 命题逻辑不足以表示与二元谓词相关的概括,因此,我们不能正确地捕获以下论证:
#
# Sylvania is to the north of Freedonia. Therefore, Freedonia is not to the north of Sylvania.
#
# 毫无疑问,用一阶逻辑形式化这些规则是很理想的:
#
# all x. all y. (north_of(x, y) -> -north_of(y, x))
#
# 更妙的是,我们可以进行自动推理来证明论证的有效性。定理证明在一般情况下是为了确定我们要证明的公式(**证明目标**)是否可以从一系列假设的公式经过有限的推理步骤派生出来,写作 S ⊢ g,其中 S 是假设列表(可以为空),g 是证明目标。下面我们用 NLTK 中的定理证明接口 Prover9 来演示:
NotFnS = read_expr('-north_of(f, s)')
SnF = read_expr('north_of(s, f)')
R = read_expr('all x. all y. (north_of(x, y) -> -north_of(y, x))')
prover = nltk.Prover9()
prover.prove(NotFnS, [SnF, R])
# 定理证明器证明我们的论证是有效的,反过来,也可以用来验证不能从我们的假设推导出 north_of(f, s):
FnS = read_expr('north_of(f, s)')
prover.prove(FnS, [SnF, R])
# ## 一阶逻辑语言总结
#
# 下面我们来总结一阶逻辑的句法。我们采取约定:<e^n, t> 是一种由 n 个类型为 e 的参数组成的产生一个类型为 t 的表达式的谓词类型,n 是谓词的**元数**:
#
# 1. 如果 P 是类型 <e^n, t> 的谓词,a1,...,an 是 e 类型的术语,那么 P(a1,...,an) 的类型是 t。
# 2. 如果 α 和 β 都是 e 类型的,那么 (α = β) 和 (α != β) 都是 t 类型的。
# 3. 如果 φ 是 t 类型的,那么 -φ 也是 t 类型的。
# 4. 如果 φ 和 ψ 是 t 类型的,那么 (φ & ψ),(φ | ψ),(φ -> ψ) 和 (φ <-> ψ) 也是 t 类型的。
# 5. 如果 φ 是 t 类型的,x 是类型为 e 的变量,那么 exists x. φ 和 all x. φ 也是 t 类型的。
#
# 下表总结了一阶逻辑的几个新逻辑关系和 Expression 类的两个实用方法:
#
# | 示例 | 描述 |
# |--------------|--------------|
# | = | 等于 |
# | != | 不等于 |
# | exists | 存在量词 |
# | all | 全称量词 |
# | e.free() | e 的自由变量 |
# | e.simplify() | e 的简化形式 |
# ## 真值模型
#
# 现在,我们来给出一阶逻辑的真值条件的语义:给定一阶逻辑语言 L,L 的模型 M 是一个 <D, Val> 对,其中 D 是一个非空集合,称为模型的**域**,Val 是一个函数,称为**估值函数**,它按如下方式从 D 中分配值给 L 的表达式:
#
# 1. 对于 L 中的每一个独立常量 c,Val(c) 是 D 中的元素。
# 2. 对于每一个元数 n >= 0 的谓词符号 P,Val(P) 是从 D^n 到 {True, False} 的函数(如果 P 的元数为 0,则 Val(P) 是一个简单的真值,P 被认为是一个命题符号)。
#
# 在 NLTK 中我们采用更简单的定义,定义 Val(P) 为集合 S:
#
# S = {s | f(s) = True}
#
# 这样的 f 被称为 S 的**特征函数**。现在假设我们有一个域 dom 包括 Bertie、Olive 和 Cyril,其中 Bertie 是男孩,Olive 是女孩,而 Cyril 是小狗。为了方便记录,我们用 b、o 和 c 作为模型中对应的标签,并定义如下估值:
dom = {'b', 'o', 'c'}
v = """
bertie => b
olive => o
cyril => c
boy => {b}
girl => {o}
dog => {c}
walk => {o, c}
see => {(b, o), (c, b), (o, c)}
"""
val = nltk.Valuation.fromstring(v)
print(val)
# 根据这一估值,see 的值是一个元组集合,包含:Bertie 看到 Olive、Cyril 看到 Bertie、Olive 看到 Cyril。同时,一元谓词(如 boy、girl、dog)也是以单个元组的集合而不是个体的集合出现的,这使我们能够方便地统一处理任何元数的关系。
#
# 一个形式为 P(T1,...Tn) 的谓词,其中 P 是 n 元的,其为真的条件是对应于 (T1,...Tn) 的值的元组属于 P 的元组集合:
print(('o', 'c') in val['see'])
print(('b',) in val['boy'])
# ## 独立变量和赋值
#
# 在我们的模型中,上下文的使用对应的是为变量**赋值**,这是一个从独立变量到域中实体的映射。[nltk.Assignment](https://www.nltk.org/_modules/nltk/sem/evaluate.html#Assignment) 提供了赋值的功能,它以模型的域为参数,并按照(变量,值)的形式进行绑定:
g = nltk.Assignment(dom, [('x', 'o'), ('y', 'c')])
g
# 接下来我们创建一个模型,然后调用 evaluate() 方法计算真值:
m = nltk.Model(dom, val)
m.evaluate('see(olive, y)', g)
# 由于我们已经知道 o 和 c 在 see 关系中表示的含义,所以 True 值是我们所期望的,在这种情况下,我们可以说赋值 g 满足公式 see(olive, y)。相比之下,下面的公式相对于 g 的评估结果为 False:
m.evaluate('see(y, x)', g)
# 我们可以使用方法 purge() 清除一个赋值中所有的绑定:
g.purge()
g
# 如果现在我们再尝试为公式 see(olive, y) 相对于 g 估值计算真值,就会得到 Undefined:
m.evaluate('see(olive, y)', g)
# 由于我们的模型已经包含了解释布尔运算的规则,因此任何复杂的公式都可以进行组合和评估:
m.evaluate('see(bertie, olive) & boy(bertie) & -walk(bertie)', g)
# ## 量化
#
# 现代逻辑的关键特征之一就是变量满足的概念可以用来解释量化的公式,例如:exists x. (girl(x) & walk(x)) 什么时候为真?我们要检查域中的个体中是否有属性是女孩并且走路的,也就是说 dom 中是否存在某个 u 使 g[u/x] 满足开放公式 girl(x) & walk(x):
m.evaluate('exists x. (girl(x) & walk(x))', g)
# 这里 evaluate() 返回了 True, 因为 dom 中的 o 满足开放公式。NLTK 中提供了一个有用的工具:satisfiers() 方法,它返回满足开放公式的所有个体集合,其参数是一个已分析的公式、一个变量和一个赋值。
fmla1 = read_expr('girl(x) | boy(x)')
print(m.satisfiers(fmla1, 'x', g))
fmla2 = read_expr('girl(x) -> walk(x)')
print(m.satisfiers(fmla2, 'x', g))
fmla3 = read_expr('walk(x) -> girl(x)')
print(m.satisfiers(fmla3, 'x', g))
# 以 fmla2 为例,girl(x) -> walk(x) 等价于 -girl(x) | walk(x),即要么不是女孩,要么在步行的个体都满足条件,因此 Bertie、Cyril 和 Olive 都满足条件。同时,由于域 dom 中的每一个成员都满足 fmla2,那么相应的全称量化公式也为真:
m.evaluate('all x. (girl(x) -> walk(x))', g)
# ## 量词范围歧义
#
# 对于句子 Everybody admires someone. 我们可以有两种不同的一阶逻辑表达式:
#
# a. all x. (person(x) -> exists y. (person(y) & admire(x, y)))
# b. exists y. (person(y) & all x. (person(x) -> admire(x, y)))
#
# 这两个的含义不同,公式 b 声称只有一个人被所有人钦佩,而公式 a 只要其对于每一个 x 我们可以找到 x 钦佩的一些人 y 即可,每次找到的人 y 可以不同。我们使用术语**量化范围**来区分它们,在公式 a 中 ∀ 比 ∃ 具有更广的量化范围,而公式 b 则相反。这两种一阶逻辑表达式都是合理的,因此我们称原句子关于量化范围有歧义。
#
# 为了更仔细地检查歧义,我们定义如下估值:
v2 = """
bruce => b
elspeth => e
julia => j
matthew => m
person => {b, e, j, m}
admire => {(j, b), (b, b), (m, e), (e, m)}
"""
val2 = nltk.Valuation.fromstring(v2)
# 其中的 admire 关系可以用下面的映射图进行可视化,其中 b 和 j 都钦佩 b,而 e 和 m 互相钦佩。
#
# 
#
# 在这样的估值下,前面提到的公式 b 为真而公式 a 为假。我们使用 Model 对象的 satisfiers() 方法进行进一步的探索:
dom2 = val2.domain
m2 = nltk.Model(dom2, val2)
g2 = nltk.Assignment(dom2)
fmla4 = read_expr('(person(x) -> exists y. (person(y) & admire(x, y)))')
m2.satisfiers(fmla4, 'x', g2)
# 这表明 fmla4 包含域中的每一个个体,相反,下面的公式 fmla5 则没有满足的 y 值。
fmla5 = read_expr('(person(y) & all x. (person(x) -> admire(x, y)))')
m2.satisfiers(fmla5, 'y', g2)
# 也就是说,没有大家都钦佩的人。看看另一个开放公式 fmla6,可以验证有一个人,即 Bruce,他被 Julia 和 Bruce 都钦佩。
fmla6 = read_expr('(person(y) & all x. ((x = bruce | x = julia) -> admire(x, y)))')
m2.satisfiers(fmla6, 'y', g2)
# ## 模型的建立
#
# 前面一致假设我们已经有了一个模型,并要检查模型中的一个句子的真值。相比之下,模型的建立是给定一些句子的集合,尝试创造一种新的模型。如果成功,那么我们知道集合是一致的,因为我们有模型的存在作为证据。
#
# 我们通过创建 [nltk.Mace](http://www.nltk.org/_modules/nltk/inference/mace.html#Mace) 的一个实例并调用它的 build_model() 方法来调用 Mace4 产生器。下面的例子显示了公式 a3 和 c1 一致,a3 和 c2 也一致,但是 c1 和 c2 不一致:
a3 = read_expr('exists x. (man(x) & walks(x))')
c1 = read_expr('mortal(socrates)')
c2 = read_expr('-mortal(socrates)')
mb = nltk.Mace(5)
print(mb.build_model(None, [a3, c1]))
print(mb.build_model(None, [a3, c2]))
print(mb.build_model(None, [c1, c2]))
# 我们也可以使用模型建立器作为定理证明器的辅助。假设我们试图证明 S ⊢ g,即 g 是假设 S = [s1, s2, ..., sn] 的逻辑派生。我们把同样的输入提供给 Mace4,Mace4 将尝试为假设 A 连同 g 的否定找到一个模型,即 S' =
# [s1, s2, ..., sn, -g]。如果 g 不能从 S 证明出来,那么 Mace4 就会返回一个反例,可以比 Prover9 更快地得出结论;相反,如果 g 可以从 S 证明出来,Mace4 可能要花很长时间不能成功地找到一个反例模型,最终放弃。
#
# 让我们思考一个具体的方案,我们的假设是 There is a woman that every man loves,Adam is a man 和 Eve is a woman,结论是 Adam loves Eve。在下面的代买中,我们使用 [nltk.MaceCommand](http://www.nltk.org/_modules/nltk/inference/mace.html#MaceCommand) 检查已建立的模型:
a4 = read_expr('exists y. (woman(y) & all x. (man(x) -> love(x, y)))')
a5 = read_expr('man(adam)')
a6 = read_expr('woman(eve)')
g = read_expr('love(adam, eve)')
mc = nltk.MaceCommand(g, assumptions=[a4, a5, a6])
mc.build_model()
# 让我们细看 Mace4 的模型,转换成估值的形式:
print(mc.valuation)
# 这个估值形式包含了一些单独的常量和谓词,每一个都有适当类型的值。其中 C1 是模型生成器作为存在量词的表示引入的,当模型生成器遇到 a4 里面的 exists y,它知道域中有某个个体 b 满足 a4 中的开放公式,然而它不知道 b 是否也是它的输入中某个地方的一个独立常量的标志,所以它为 b 凭空创造了一个新名字 C1。现在,由于我们的假设中没有关于独立常量 adam 和 eve 的信息,模型生成器认为没有任何理由将它们当作表示不同的实体,于是它们都被映射到 a。此外,我们并没有指定 man 和 woman 表示不相交的集合,因此模型生成器让它们相互重叠。现在我们添加一个新的假设,使 man 和 woman 不相交:
a7 = read_expr('all x. (man(x) -> -woman(x))')
g = read_expr('love(adam, eve)')
mc = nltk.MaceCommand(g, assumptions=[a4, a5, a6, a7])
mc.build_model()
# 此时,man 和 woman 就被映射到了不同的变量。
print(mc.valuation)
| 10.3-first-order-logic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Loops
# Say that you want to print the numbers one through four. Easy!
#
# ```
# Print 1
# Print 2
# Print 3
# Print 4
# ```
# Ok, but now you want to print one through four hundred... It might be easier if we could just tell the computer to reuse the print command
# ```
# Print x, but do it 400 times
# ```
# But what is x? And is it going to print the same thing 400 times? This needs more definition, and should be broken into smaller steps
# ```
# Let's set x to equal the number 1
#
# Let's print x
# Let's add one to the value of x
# Is x larger than 400?
# If so we should stop.
# If not, we should print x...
# ```
# But instead of printing here, let's go back to that previous print command!
# And that right there would be a "while" loop. Not the best solution, but you can better understand the process. We would need to move the steps around a little bit too.
# ```
# Let's set x to 1
# If x is less or equal to 400, carry on.
# Otherwise, stop.
# Print x
# Add one to x
# Go back and check the value of x to see if we can stop.
# ```
# It might look like this (in Python):
# + tags=[]
x = 1
while(x <= 400):
print(x)
x = x + 1
# -
# You may notice the psedo code has an instruction to go back. This part happens "under the hood" when the end of the "block" is encountered. The end of the block is when the code is no longer indented.
#
# But with a loop that counts sequentially, we typically would want to use a "for" loop for this process.
# ```
# For a variable x, let's start at one
# Let's limit x to increment up to 400
# Let's only increment x by one.
# And after each increment, let's print the value of x!
# ```
# This solution might look like this (in Python):
# + tags=[]
for x in range(1,400,1):
print(x)
# -
# This needs some explanation though. The range function starts at the first digit (1), and continues until it reaches the second digit (400). The third digit is how much to increment (by 1).
#
# But since incrementing by one is the default, we can simplify by writing (in Python):
for x in range(1,400):
print(x)
# But then you might run this code and realize only 1 through 399 printed.
#
# *That is because I made a mistake.*
#
# With range, the first number is inclusive and the second is not. We need to type 400+1 or 401, so range stops at the right spot.
#
# __Coding can involve a lot of trial and error, especially at first :)__
#
# This solution will look like this (in Python):
for x in range(1,401):
print(x)
# Programming is often arduous and is always a neverending journey. The best we can do is to try helping one another on our way.
| 04_loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Floating point vs Finite Differences
#
# Copyright (C) 2020 <NAME>
#
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>
# + jupyter={"outputs_hidden": false}
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as pt
# -
# Define a function and its derivative:
# + jupyter={"outputs_hidden": false}
c = 20*2*np.pi
def f(x):
return np.sin(c*x)
def df(x):
return c*np.cos(c*x)
n = 2000
x = np.linspace(0, 1, n, endpoint=False).astype(np.float32)
pt.plot(x, f(x))
# -
# Now compute the relative $l^\infty$ norm of the error in the finite differences, for a bunch of mesh sizes:
# + jupyter={"outputs_hidden": false}
h_values = []
err_values = []
for n_exp in range(5, 24):
n = 2**n_exp
h = (1/n)
x = np.linspace(0, 1, n, endpoint=False).astype(np.float32)
fx = f(x)
dfx = df(x)
dfx_num = (np.roll(fx, -1) - np.roll(fx, 1)) / (2*h)
err = np.max(np.abs((dfx - dfx_num))) / np.max(np.abs(fx))
print(h, err)
h_values.append(h)
err_values.append(err)
pt.rc("font", size=16)
pt.title(r"Single precision FD error on $\sin(20\cdot 2\pi)$")
pt.xlabel(r"$h$")
pt.ylabel(r"Rel. Error")
pt.loglog(h_values, err_values)
# + jupyter={"outputs_hidden": false}
| demos/intro/Floating point vs Finite Differences.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Classes
#
# Classes are the key features of object-oriented programming. A class is a structure for representing an object and the operations that can be performed on the object.
#
# In Python a class can contain `attributes` (variables) and `methods` (functions).
#
# A class is defined almost like a function, but using the `class` keyword, and the class definition usually contains a number of class method definitions (a function in a class).
#
# * Each class method should have an argument `self` as its first argument. This object is a self-reference.
#
# * Some class method names have special meaning, for example:
#
# * `__init__`: The name of the method that is invoked when the object is first created.
# * `__str__` : A method that is invoked when a simple string representation of the class is needed, as for example when printed.
# * There are many more, see https://docs.python.org/3/reference/datamodel.html#special-method-names
class Point:
"""
Simple class for representing a point in a Cartesian coordinate system.
"""
def __init__(self, x, y):
"""
Create a new Point at x, y.
"""
self.x = x
self.y = y
def translate(self, dx, dy):
"""
Translate the point by dx and dy in the x and y direction.
"""
self.x += dx
self.y += dy
def __str__(self):
return(f"Point at [{self.x:0.2f}, {self.y:0.2f}]")
# To create a new instance of a class. This will invoke the `__init__` method in the Point class.
p1 = Point(0, 0)
# If we print the object this will invoke the `__str__` method.
print(p1)
# To invoke a class method in the class instance `p`:
# Lets call translate function on `p1` object.
p1.translate(0.25, 1.5)
print(p1)
# Create a nwe instance of `Point` class
p2 = Point(1, 1)
print(p2)
# Notice how one instance of the class does not effect another instance. This is one of the nice features of object-oriented design. Code such as functions and related variables are grouped in separate and independent entities that do not share memory.
| 15 - Classes.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: python_scripts//py:percent,notebooks//ipynb
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Introduction to scikit-learn
#
# ## Basic preprocessing and model fitting
#
# In this notebook, we present how to build predictive models on tabular
# datasets.
#
# In particular we will highlight:
# * the difference between numerical and categorical variables;
# * the importance of scaling numerical variables;
# * typical ways to deal categorical variables;
# * how to train predictive models on different types of data;
# * evaluate the performance of a model via cross-validation.
#
# ## Introducing the dataset
#
# To this aim, we will use data from the 1994 Census bureau database. The goal
# with this data is to regress wages from heterogeneous data such as age,
# employment, education, family information, etc.
#
# Let's first load the data located in the `datasets` folder.
# %%
import pandas as pd
df = pd.read_csv(
"https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
# %% [markdown]
# Let's have a look at the first records of this data frame:
# %%
df.head()
# %% [markdown]
# The target variable in our study will be the "class" column while we will use
# the other columns as input variables for our model. This target column divides
# the samples (also known as records) into two groups: high income (>50K) vs low
# income (<=50K). The resulting prediction problem is therefore a binary
# classification problem.
#
# For simplicity, we will ignore the "fnlwgt" (final weight) column that was
# crafted by the creators of the dataset when sampling the dataset to be
# representative of the full census database.
# %%
target_name = "class"
target = df[target_name].to_numpy()
target
# %%
data = df.drop(columns=[target_name, "fnlwgt"])
data.head()
# %% [markdown]
# We can check the number of samples and the number of features available in
# the dataset:
# %%
print(
f"The dataset contains {data.shape[0]} samples and {data.shape[1]} "
"features")
# %% [markdown]
# ## Working with numerical data
#
# Numerical data is the most natural type of data used in machine learning
# and can (almost) directly be fed to predictive models. We can quickly have a
# look at such data by selecting the subset of numerical columns from the
# original data.
#
# We will use this subset of data to fit a linear classification model to
# predict the income class.
# %%
data.columns
# %%
data.dtypes
# %%
# "i" denotes integer type, "f" denotes float type
numerical_columns = [
c for c in data.columns if data[c].dtype.kind in ["i", "f"]]
numerical_columns
# %%
data_numeric = data[numerical_columns]
data_numeric.head()
# %% [markdown]
# When building a machine learning model, it is important to leave out a
# subset of the data which we can use later to evaluate the trained model.
# The data used to fit a model a called training data while the one used to
# assess a model are called testing data.
#
# Scikit-learn provides an helper function `train_test_split` which will
# split the dataset into a training and a testing set. It will ensure that
# the data are shuffled randomly before splitting the data.
# %%
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test = train_test_split(
data_numeric, target, random_state=42)
print(
f"The training dataset contains {data_train.shape[0]} samples and "
f"{data_train.shape[1]} features")
print(
f"The testing dataset contains {data_test.shape[0]} samples and "
f"{data_test.shape[1]} features")
# %% [markdown]
# We will build a linear classification model called "Logistic Regression". The
# `fit` method is called to train the model from the input (features) and
# target data. Only the training data should be given for this purpose.
#
# In addition, check the time required to train the model and the number of
# iterations done by the solver to find a solution.
# %%
from sklearn.linear_model import LogisticRegression
import time
model = LogisticRegression(solver='lbfgs')
start = time.time()
model.fit(data_train, target_train)
elapsed_time = time.time() - start
print(f"The model {model.__class__.__name__} was trained in "
f"{elapsed_time:.3f} seconds for {model.n_iter_} iterations")
# %% [markdown]
# Let's ignore the convergence warning for now and instead let's try
# to use our model to make some predictions on the first three records
# of the held out test set:
# %%
target_predicted = model.predict(data_test)
target_predicted[:5]
# %%
target_test[:5]
# %%
predictions = data_test.copy()
predictions['predicted-class'] = target_predicted
predictions['expected-class'] = target_test
predictions['correct'] = target_predicted == target_test
predictions.head()
# %% [markdown]
# To quantitatively evaluate our model, we can use the method `score`. It will
# compute the classification accuracy when dealing with a classificiation
# problem.
# %%
print(f"The test accuracy using a {model.__class__.__name__} is "
f"{model.score(data_test, target_test):.3f}")
# %% [markdown]
# This is mathematically equivalent as computing the average number of time
# the model makes a correct prediction on the test set:
# %%
(target_test == target_predicted).mean()
# %% [markdown]
# ## Exercise 1
#
# - What would be the score of a model that always predicts `' >50K'`?
# - What would be the score of a model that always predicts `' <= 50K'`?
# - Is 81% or 82% accuracy a good score for this problem?
#
# Hint: You can compute the cross-validated of a [DummyClassifier](https://scikit-learn.org/stable/modules/model_evaluation.html#dummy-estimators) the performance of such baselines.
#
# Use the dedicated notebook to do this exercise.
# %% [markdown]
# Let's now consider the `ConvergenceWarning` message that was raised previously
# when calling the `fit` method to train our model. This warning informs us that
# our model stopped learning because it reached the maximum number of
# iterations allowed by the user. This could potentially be detrimental for the
# model accuracy. We can follow the (bad) advice given in the warning message
# and increase the maximum number of iterations allowed.
# %%
model = LogisticRegression(solver='lbfgs', max_iter=50000)
start = time.time()
model.fit(data_train, target_train)
elapsed_time = time.time() - start
# %%
print(
f"The accuracy using a {model.__class__.__name__} is "
f"{model.score(data_test, target_test):.3f} with a fitting time of "
f"{elapsed_time:.3f} seconds in {model.n_iter_} iterations")
# %% [markdown]
# We now observe a longer training time but not significant improvement in
# the predictive performance. Instead of increasing the number of iterations, we
# can try to help fit the model faster by scaling the data first. A range of
# preprocessing algorithms in scikit-learn allows us to transform the input data
# before training a model. We can easily combine these sequential operations
# with a scikit-learn `Pipeline`, which chain together operations and can be
# used like any other classifier or regressor. The helper function
# `make_pipeline` will create a `Pipeline` by giving as arguments the successive
# transformations to perform followed by the classifier or regressor model.
#
# In our case, we will standardize the data and then train a new logistic
# regression model on that new version of the dataset set.
# %%
data_train.describe()
# %%
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data_train_scaled = scaler.fit_transform(data_train)
data_train_scaled
# %%
data_train_scaled = pd.DataFrame(data_train_scaled,
columns=data_train.columns)
data_train_scaled.describe()
# %%
from sklearn.pipeline import make_pipeline
model = make_pipeline(StandardScaler(),
LogisticRegression(solver='lbfgs'))
start = time.time()
model.fit(data_train, target_train)
elapsed_time = time.time() - start
# %%
print(
f"The accuracy using a {model.__class__.__name__} is "
f"{model.score(data_test, target_test):.3f} with a fitting time of "
f"{elapsed_time:.3f} seconds in {model[-1].n_iter_} iterations")
# %% [markdown]
# We can see that the training time and the number of iterations is much shorter
# while the predictive performance (accuracy) stays the same.
#
# In the previous example, we split the original data into a training set and a
# testing set. This strategy has several issues: in the setting where the amount
# of data is limited, the subset of data used to train or test will be small;
# and the splitting was done in a random manner and we have no information
# regarding the confidence of the results obtained.
#
# Instead, we can use cross-validation. Cross-validation consists of
# repeating this random splitting into training and testing sets and aggregating
# the model performance. By repeating the experiment, one can get an estimate of
# the variability of the model performance.
#
# The function `cross_val_score` allows for such experimental protocol by giving
# the model, the data and the target. Since there exists several
# cross-validation strategies, `cross_val_score` takes a parameter `cv` which
# defines the splitting strategy.
#
#
#
#
#
#
#
# %%
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, data_numeric, target, cv=5)
print(f"The different scores obtained are: \n{scores}")
# %%
print(f"The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown]
# Note that by computing the standard-deviation of the cross-validation scores
# we can get an idea of the uncertainty of our estimation of the predictive
# performance of the model: in the above results, only the first 2 decimals seem
# to be trustworthy. Using a single train / test split would not allow us to
# know anything about the level of uncertainty of the accuracy of the model.
#
# Setting `cv=5` created 5 distinct splits to get 5 variations for the training
# and testing sets. Each training set is used to fit one model which is then
# scored on the matching test set. This strategy is called K-fold
# cross-validation where `K` corresponds to the number of splits.
#
# The following matplotlib code helps visualize how the dataset is partitioned
# into train and test samples at each iteration of the cross-validation
# procedure:
# %%
# %matplotlib inline
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Patch
cmap_cv = plt.cm.coolwarm
def plot_cv_indices(cv, X, y, ax, lw=20):
"""Create a sample plot for indices of a cross-validation object."""
splits = list(cv.split(X=X, y=y))
n_splits = len(splits)
# Generate the training/testing visualizations for each CV split
for ii, (train, test) in enumerate(splits):
# Fill in indices with the training/test groups
indices = np.zeros(shape=X.shape[0], dtype=np.int32)
indices[train] = 1
# Visualize the results
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw, cmap=cmap_cv,
vmin=-.2, vmax=1.2)
# Formatting
yticklabels = list(range(n_splits))
ax.set(yticks=np.arange(n_splits + 2) + .5,
yticklabels=yticklabels, xlabel='Sample index',
ylabel="CV iteration", ylim=[n_splits + .2,
-.2], xlim=[0, 100])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
# %%
# Some random data points
n_points = 100
X = np.random.randn(n_points, 10)
y = np.random.randn(n_points)
fig, ax = plt.subplots(figsize=(10, 6))
cv = KFold(5)
_ = plot_cv_indices(cv, X, y, ax)
# TODO: add summary here
| notebooks/02_basic_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import math
import time
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torchvision
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
# Our libraries
from train import train_model
from model_utils import *
from predict_utils import *
from vis_utils import *
from apmeter import *
from train_valid_split import *
# some initial setup
np.set_printoptions(precision=2)
use_gpu = torch.cuda.is_available()
np.random.seed(1234)
# -
use_gpu = True
cuda_available = torch.cuda.is_available()
device = torch.device("cuda" if (cuda_available and use_gpu) else "cpu")
def plot_loss(trn_hist, val_hist, loss_acc):
plt.plot(trn_hist, label='Training ' + loss_acc)
plt.plot(val_hist, label='Validation ' + loss_acc)
plt.legend()
plt.xlabel('Epoch')
plt.ylabel(loss_acc)
plt.show()
def freeze_bn(m):
if isinstance(m, nn.BatchNorm2d):
m.eval()
DATA_DIR = "F:/MyArticel/DTASET/NEW DATASETTT/willow_orgin_20_from_test/im2flow"
sz = 224
batch_size = 16
os.listdir(DATA_DIR)
trn_dir = f'{DATA_DIR}train'
val_dir = f'{DATA_DIR}valid'
os.listdir(trn_dir)
trn_fnames = glob.glob(f'{trn_dir}/*/*.png')
trn_fnames[:5]
# LOAD DATA
train_ds = datasets.ImageFolder(trn_dir)
train_ds.classes
train_ds.class_to_idx
train_ds.root
# +
# Data augmentation and normalization for training
train_transforms = transforms.Compose([
transforms.Resize((sz, sz)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.1, 0.1, 0.1, 0.01),
transforms.RandomRotation(20),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Just normalization for validation
valid_transforms = transforms.Compose([
transforms.Resize((sz, sz)),
# transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_ds = datasets.ImageFolder(f'{DATA_DIR}train', train_transforms)
#valid_ds = datasets.ImageFolder(f'{DATA_DIR}valid', valid_transforms)
train_ds, valid_ds = train_valid_split(train_ds, 10)
train_dl = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True)
valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=batch_size, shuffle=True)
train_ds_sz = len(train_ds)
valid_ds_sz = len(valid_ds)
print('Train size: {}\nValid size: {} ({:.2f})'.format(train_ds_sz, valid_ds_sz, valid_ds_sz/(train_ds_sz + valid_ds_sz)))
class_names = train_ds.mother.classes
# -
# dataloader
train_dl = torch.utils.data.DataLoader(train_ds, batch_size=batch_size,
shuffle=True, num_workers=4)
valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=batch_size,
shuffle=True, num_workers=4)
inputs,targets = next(iter(train_dl))
out = torchvision.utils.make_grid(inputs, padding=3)
plt.figure(figsize=(16, 12))
imshow(out, title='random image from training data')
# +
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# load pre-trained ResNet50
model = load_pretrained_resnet50(model_path=None, num_classes=7)
model.apply(freeze_bn)
# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.9)
# optimizer = optim.Adam(model.parameters(), lr=0.00002)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
model = model.to(device)
criterion = criterion.to(device)
model
# -
# training
model, trn_loss_hist, val_loss_hist, trn_acc_hist, val_acc_hist = train_model(model, train_dl, valid_dl, criterion, optimizer, scheduler, num_epochs=20)
loss_acc = 'Loss'
plot_loss(trn_loss_hist, val_loss_hist, loss_acc)
loss_acc = 'Accuracy'
plot_loss(trn_acc_hist, val_acc_hist, loss_acc)
# +
for param in model.parameters():
param.require_grad = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.9)
model = model.to(device)
criterion = criterion.to(device)
# -
model, trn_loss_hist1, val_loss_hist1, trn_acc_hist1, val_acc_hist1 = train_model(model, train_dl, valid_dl, criterion, optimizer, scheduler, num_epochs=15)
trn_loss_hist.extend(trn_loss_hist1)
val_loss_hist.extend(val_loss_hist1)
trn_acc_hist.extend(trn_acc_hist1)
val_acc_hist.extend(val_acc_hist1)
loss_acc = 'Loss'
plot_loss(trn_loss_hist, val_loss_hist, loss_acc)
loss_acc = 'Accuracy'
plot_loss(trn_acc_hist, val_acc_hist, loss_acc)
# +
# acuracy on validation data
def evaluate_model(model, dataloader):
model.eval() # for batch normalization layers
corrects = 0
for inputs, targets in dataloader:
inputs, targets = to_var(inputs, True), to_var(targets, True)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
corrects += (preds == targets.data).sum()
print('accuracy: {:.2f}'.format(100. * corrects / len(dataloader.dataset)))
# -
# mAP
m = torch.nn.Softmax(dim=1)
def calculate_model_mAP(model, dataloader):
mAP = APMeter()
model.eval() # for batch normalization layers
corrects = 0
for inputs, targets in dataloader:
inputs, targets = to_var(inputs, True), to_var(targets, True)
outputs = model(inputs)
outputs = m(outputs)
targets.resize_(targets.size(0), 1)
targets = Variable(targets)
one_hot = torch.cuda.FloatTensor(targets.size(0), outputs.size(1)).zero_()
one_hot.scatter_(1, targets.data, 1)
one_hot = Variable(one_hot)
mAP.add(outputs,one_hot)
#print(outputs.data)
for i, value in enumerate(mAP.value()):
print(train_ds.classes[i], ' AP: {:.2f}'.format(100. * value))
print('mAP: {:.2f}'.format(100. * mAP.value().sum() / mAP.value().size(0)))
evaluate_model(model, valid_dl)
evaluate_model(model, train_dl)
visualize_model(model, train_dl)
visualize_model(model, valid_dl)
plot_errors(model, train_dl)
# confusion matrix
y_pred, y_true = predict_class(model, valid_dl)
cm = confusion_matrix(y_true, y_pred)
plot_confusion_matrix(cm, train_ds.classes, normalize=True, figsize=(12,12 ))
# +
# Look at the sizes of the images
fnames = glob.glob(f'{trn_dir}/*/*.png')
sizes = [Image.open(f).size for f in fnames]
hs, ws = list(zip(*sizes))
plt.figure(figsize=(12., 4.))
plt.hist(hs)
plt.hist(ws);
# -
visualize_model(model, valid_dl, num_images=6)
plot_errors(model, valid_dl)
# +
### testing
class_names = train_ds.classes
test_dir = f'{DATA_DIR}\\test'
test_ds = datasets.ImageFolder(test_dir,valid_transforms)
len(test_ds)
# +
### dataloder
test_dl = torch.utils.data.DataLoader(test_ds,batch_size= batch_size, num_workers=4)
# +
pred_class_names, y = predict_class_names(model, test_dl, class_names)
test_fnames= glob.glob(f'{test_dir}/*/*.png')
len(test_fnames), test_fnames [:5]
# -
test_fnames_len= len(test_fnames)
for i in range(test_fnames_len):
test_fnames[i] = os.path.basename(test_fnames[i])
len(test_fnames), test_fnames[:5]
pred_result = np.stack([test_fnames, pred_class_names], axis=1)
len(pred_result), pred_result
evaluate_model(model, test_dl)
calculate_model_mAP(model, test_dl)
plot_errors(model, test_dl)
# confusion matrix
y_pred, y_true = predict_class(model, test_dl)
cm = confusion_matrix(y_true, y_pred)
plot_confusion_matrix(cm, train_ds.classes, normalize=True, figsize=(12,12 ))
| PyTorch/willow_temporal_stream.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from notebookHelper import NotebookLoader
nl = NotebookLoader()
L2 = nl.load_module("L2")
#
# # The Vocabulary Tree
#
# Implementing k-medoids recursively allows us to arrange our vocabulary features into a tree with the leaves defining clusters of local space, aka a "word" in visual space.
#
# <div style="overflow:hidden; margin: 0; height:270px; width:100%">
# <img src="https://i.stack.imgur.com/XJAYK.png" />
# </div>
#
# The tree structure represents how we partitioned the space. Take any (32,1) BRIEF feature descriptor and traverse the tree from root to leaf. At each step we comparse the hamming distance to the children and pick the "nearest" one. At the end of the tree traversal we have found our visual word(label) which best categorises our feature.
#
# 
import numpy as np #maths
visualFeatureVocabulary = None
visualFeatureVocabularyList = None
with open("data/ORBvoc.txt", "r") as fin:
extractedFeatures = list(map(lambda x: x.split(" ")[2:-2], fin.readlines()[1:]))
#dedup
dedupedFeatureStrings = set()
for extractedFeature in extractedFeatures:
strRep = ".".join(extractedFeature)
dedupedFeatureStrings.add(strRep)
finalFeatures = []
for dedupedFeatureStr in list(dedupedFeatureStrings):
finalFeatures.append([int(i) for i in dedupedFeatureStr.split(".")])
visualFeatureVocabulary = np.asarray(finalFeatures, dtype=np.uint8)
visualFeatureVocabularyList = list(finalFeatures)
print(visualFeatureVocabulary.shape)
# %%time
bestCentroids, bestCluster = L2.clusterOpt(visualFeatureVocabulary, 10, L2.hammingPoint, L2.hammingVector)
# +
print(len(bestCentroids))
import pickle
#data = (bestCentroids, bestCluster)
#with open('firstlevel1.pickle', 'wb') as f:
# pickle.dump(data, f)
with open('firstlevel0.pickle', 'rb') as f:
data = pickle.load(f)
print(data)
total = 0
totalSave = 1062676
for cluster in data[1]:
print(cluster.shape[0])
total = total + cluster.shape[0]
print(total)
#convert each cluster into data
#for clusters in data[1]:
bestCluster2_10_ids = data[1][9]
bestCluster2_10_data = np.take(visualFeatureVocabulary, bestCluster2_10_ids, 0)
print(bestCluster2_10_data)
bestCentroids2_10, bestCluster2_10 = L2.clusterOpt(bestCluster2_10_data, 10, L2.hammingPoint, L2.hammingVector)
# -
print(bestCentroids2_10)
# # Bag Of Words (BOW) [Yet another space!]
# If we take the collection of features from an input image, we can via the tree traversal compute "votes" for each word in our visual vocabulary. Then summing the features via some metric; we derive our final BOW vector for this image. Think of this as a sort of barcode for the image, where we have a histogram of the existance of paritcular visual ques of the scene.
#
# <div style="overflow:hidden; margin: 0; height:200px; width:100%">
# <img style="margin-top:-270px" src="https://i.stack.imgur.com/XJAYK.png" />
# </div>
#
# This is a nice representation allows for easy semantic comparison to other images with some concept of the score or the quality of a match.
#imports
import numpy as np #maths
import cv2 #open computer vision
from matplotlib import pyplot as plt #plotting library
#Example get a video source
cap = cv2.VideoCapture(0)
#Example show an image
ret, input_image = cap.read()
plt.imshow(input_image[:,:,::-1]) #[:,:,::-1] converts BGR to RGB
cap.release()
# +
def getOrb():
return cv2.ORB_create(edgeThreshold=15, patchSize=31, nlevels=6, fastThreshold=2, nfeatures=1000000, scoreType=cv2.ORB_FAST_SCORE, firstLevel=0)
class CV:
def __init__(self, detector):
self.detector = detector
def plotImage(self, img):
plt.imshow(img[:,:,::-1])
def imgExtractKPDes(self, img):
(kp, des) = orb.detectAndCompute(input_image, None)
return (kp, des)
def annotateImgWithKPDes(self, img, kpdes):
return cv2.drawKeypoints(img,kpdes[0],color=(0,255,0), outImage=None, flags=0)
# -
orb = getOrb()
cv = CV(orb)
cv.plotImage(input_image)
input_image_kpdes = cv.imgExtractKPDes(input_image)
print(input_image_kpdes)
cv.plotImage(cv.annotateImgWithKPDes(input_image, input_image_kpdes))
kp, des = input_image_kpdes
des
des.shape
kp_des_mean = np.mean(np.sqrt(des), axis=0).reshape(1, -1)
kp_des_mean.shape
#lets get vocab
keypointDescriptors = None
with open("data/ORBvoc.txt", "r") as fin:
kpListWithoutScoreOrIndex = list(map(lambda x: x.split(" ")[2:-2], fin.readlines()[1:]))
keypointDescriptors = np.asarray(kpListWithoutScoreOrIndex, dtype=np.uint8)
keypointDescriptors.shape
keypointDescriptors = np.sqrt(keypointDescriptors)
kp_des_mean=np.mean(keypointDescriptors, axis=0).reshape(1,-1)
kp_des_mean.shape
# this sort of clustering only works well for non binary string type data. e.g. sift/surf features as it uses eucledean distance when with orb we should use hamming
retval, bestLabels, centers = cv2.kmeans(data=np.asarray(keypointDescriptors, dtype=np.float32), bestLabels=None, K=1024,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10), attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
import sys
(cv2.__version__, sys.version_info)
centers[0].shape
centers[1].shape
len(centers)
bestLabels
## kmedoids can take any distance metric
# https://github.com/terkkila/scikit-learn/blob/kmedoids/sklearn/cluster/k_medoids_.py
import sklearn
from pyclustering.cluster.kmedoids import kmedoids
# +
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.utils import read_sample
sample = read_sample(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS)
# -
#
sample
| L3 - Vocabulary tree and BOW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import snap
dir()
# create a graph PNGraph
G1 = snap.TNGraph.New()
G1.AddNode(1)
G1.AddNode(5)
G1.AddNode(32)
G1.AddEdge(1, 5)
G1.AddEdge(5, 1)
G1.AddEdge(5, 32)
G1
# +
# create a directed random graph on 100 nodes and 1k edges
G2 = snap.GenRndGnm(snap.PNGraph, 100, 1000)
# traverse the nodes
for NI in G2.Nodes():
print("node id %d with out-degree %d and in-degree %d" % (
NI.GetId(), NI.GetOutDeg(), NI.GetInDeg()))
# traverse the edges
for EI in G2.Edges():
print("edge (%d, %d)" % (EI.GetSrcNId(), EI.GetDstNId()))
# traverse the edges by nodes
for NI in G2.Nodes():
for Id in NI.GetOutEdges():
print("edge (%d %d)" % (NI.GetId(), Id))
# -
# ```python
# GetId(): return node id
# GetOutDeg(): return out-degree of a node
# GetInDeg(): return in-degree of a node
# GetOutNId(e): return node id of the endpoint of e-th out-edge
# GetInNId(e): return node id of the endpoint of e-th in-edge
# IsOutNId(int NId): do we point to node id n
# IsInNId(n): does node id n point to us
# IsNbrNId(n): is node n our neighbor
# ```
# +
#save and load
# generate a network using Forest Fire model
G3 = snap.GenForestFire(1000, 0.35, 0.35)
# save and load binary
FOut = snap.TFOut("output/test.graph")
G3.Save(FOut)
FOut.Flush()
FIn = snap.TFIn("output/test.graph")
G4 = snap.TNGraph.Load(FIn)
# save and load from a text file
snap.SaveEdgeList(G4, "output/test.txt", "Save as tab-separated list of edges")
G5 = snap.LoadEdgeList(snap.PNGraph, "output/test.txt", 0, 1)
# +
#graph manipulation
# generate a network using Forest Fire model
G6 = snap.GenForestFire(1000, 0.35, 0.35)
# convert to undirected graph
G7 = snap.ConvertGraph(snap.PUNGraph, G6)
WccG = snap.GetMxWcc(G6)
# get a subgraph induced on nodes {0,1,2,3,4,5}
SubG = snap.GetSubGraph(G6, snap.TIntV.GetV(0, 1, 2, 3, 4))
# get 3-core of G
Core3 = snap.GetKCore(G6, 3)
# delete nodes of out degree 10 and in degree 5
snap.DelDegKNodes(G6, 10, 5)
# +
# Computing Structural Properties of Networks
# generate a Preferential Attachment graph on 1000 nodes and node out degree of 3
G8 = snap.GenPrefAttach(1000, 3)
# vector of pairs of integers (size, count)
CntV = snap.TIntPrV()
# get distribution of connected components (component size, count)
snap.GetWccSzCnt(G8, CntV)
# get degree distribution pairs (degree, count)
snap.GetOutDegCnt(G8, CntV)
# vector of floats
EigV = snap.TFltV()
# get first eigenvector of graph adjacency matrix
snap.GetEigVec(G8, EigV)
# get diameter of G8
snap.GetBfsFullDiam(G8, 100)
# count the number of triads in G8, get the clustering coefficient of G8
snap.GetTriads(G8)
snap.GetClustCf(G8)
# -
# # все типы в snap
#
# - PNGraph, a directed graph;
# - PUNGraph, an undirected graph;
# - PNEANet, a directed network;
# - PGraph, one of PNGraph, PUNGraph, or PNEANet;
# - TCnComV, a vector of connected components;
# - TFltPrV, a vector of float pairs;
# - TFltV, a vector of floats;
# - TGVizLayout, one of gvlDot, gvlNeato, gvlTwopi, gvlCirco, gvlSfdp;
# - TIntFltH, a hash table with integer keys and float values;
# - TIntFltKdV, a vector of (integer, float) values;
# - TIntH, a hash table with integer keys and values;
# - TIntPrFltH, a hash table with (integer, integer) pair keys and float values;
# - TIntPrV, a vector of (integer, integer) pairs;
# - TIntSet, a hash table with integer keys and no values;
# - TIntStrH, a hash table with integer keys and string values;
# - TIntTrV, a vector of (integer, integer, integer) triplets;
# - TIntV, a vector of integers;
# - TRnd, a random generator;
# - TStrHash, a hash table woth string keys and integer values;
# - TVec, a vector of vectors of floats.
# # Node iterators provide several useful methods
#
# - GetId(): returns node id
# - GetOutDeg(): returns out-degree of a node
# - GetInDeg(): returns in-degree of a node
# - GetOutNId(e): returns node id of the endpoint of e-th out-edge
# - GetInNId(e): returns node id of the endpoint of e-th in-edge
# - IsOutNId(n): tests if there is an out-edge to node n
# - IsInNId(n): tests if there is an in-edge from node n
# - IsNbrNId(n): tests if node n is a neighbor
import random
print(random.choice(G1.Nodes()))
import random
rand = random.choice(list(G1.Nodes()))
dir(rand)
and.GetId()
rand.GetNI()
rand
dir(rand.GetNI())
list(G1.Nodes())[0]
# test random external method
Nr = 20
Graph_t = snap.PUNGraph.New()
for item in range(Nr):
Graph_t.AddNode(item)
NId = snap.TInt.GetRnd(Nr)
randomNode = Graph_t.GetNI(NId)
randomNode
Graph_t.GetRndNId()
| snap-learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Using Libraries
# ## Import
# To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
# + tags=["raises-exception"]
math.sin(1.6)
# -
import math
math.sin(1.6)
# We call these libraries **modules**:
type(math)
# The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
dir(math)
# They include properties as well as functions:
math.pi
# You can always find out where on your storage medium a library has been imported from:
print(math.__file__[0:50])
print(math.__file__[50:])
# Note that `import` does *not* install libraries. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.
# So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library*.
# **Supplementary Materials**: Review the list of standard library modules: https://docs.python.org/library/
# If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.
#
# **Supplementary Materials**: Review the list of modules that are packaged with Anaconda by default on different architectures: https://docs.anaconda.com/anaconda/packages/pkg-docs/ (modules installed by default are shown with ticks)
#
# We'll see later how to add more libraries to our setup.
# ### Why bother?
# Why bother with modules? Why not just have everything available all the time?
#
# The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this.
# ## Importing from modules
# Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.
# Things can be imported *from* modules to become part of the current module:
# +
import math
math.sin(math.pi)
# +
from math import sin
sin(math.pi)
# -
# Importing one-by-one like this is a nice compromise between typing and risk of name clashes.
# It *is* possible to import **everything** from a module, but you risk name clashes.
# +
from math import *
sin(pi)
# -
# ### Import and rename
# You can rename things as you import them to avoid clashes or for typing convenience
# +
import math as m
m.cos(0)
# +
pi = 3
from math import pi as realpi
print(sin(pi), sin(realpi))
| module02_intermediate_python/02_02_using_libraries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import random
import time
from sklearn.model_selection import RepeatedKFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from scipy import linalg
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
Edges = pd.read_csv("/Users/qizhe/Documents/GitHub/GraphNN/Data/Cora/cora Edges.csv",header=None)
Edges = np.array(Edges)
Labels = pd.read_csv("/Users/qizhe/Documents/GitHub/GraphNN/Data/Cora/cora node_labels.csv",header=None)
y = np.array(Labels)
Edges.max(),np.shape(Edges),np.shape(y)
A = np.zeros((2708,2708))
for i in range (0,5429):
A[Edges[i,0]-1,Edges[i,1]-1]=1
n=2708
K=int(y.max())
# %run Encoder.ipynb
EncoderLDA(A)
| Jupyter/Cora.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Improving Linear Regression with Neural Networks (Logistic Regression)
#
# This function shows how to use TensorFlow to solve logistic regression with a multiple layer neural network
#
# $$
# \textbf{y} = sigmoid(\textbf{A}_{3} \times sigmoid(\textbf{A}_{2} \times sigmoid(\textbf{A}_{1} \times \textbf{x} + \textbf{b}_{1}) + \textbf{b}_{2}) + \textbf{b}_{3})
# $$
#
# We will use the low birth weight data, specifically:
# ```
# y = 0 or 1 = low birth weight
# x = demographic and medical history data
# ```
# +
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import requests
import os.path
import csv
from tensorflow.python.framework import ops
# reset computational graph
ops.reset_default_graph()
# -
# ## Obtain and prepare data for modeling
# +
# name of data file
birth_weight_file = 'birth_weight.csv'
# download data and create data file if file does not exist in current directory
if not os.path.exists(birth_weight_file):
birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')
birth_header = birth_data[0].split('\t')
birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1]
with open(birth_weight_file, "w") as f:
writer = csv.writer(f)
writer.writerows(birth_data)
f.close()
# read birth weight data into memory
birth_data = []
with open(birth_weight_file, newline='') as csvfile:
csv_reader = csv.reader(csvfile)
birth_header = next(csv_reader)
for row in csv_reader:
birth_data.append(row)
birth_data = [[float(x) for x in row] for row in birth_data]
# Pull out target variable
y_vals = np.array([x[0] for x in birth_data])
# Pull out predictor variables (not id, not target, and not birthweight)
x_vals = np.array([x[1:8] for x in birth_data])
# set for reproducible results
seed = 99
np.random.seed(seed)
tf.set_random_seed(seed)
# Declare batch size
batch_size = 90
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m-col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
# -
# ## Define Tensorflow computational graph
# +
# Create graph
sess = tf.Session()
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 7], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variable definition
def init_variable(shape):
return(tf.Variable(tf.random_normal(shape=shape)))
# Create a logistic layer definition
def logistic(input_layer, multiplication_weight, bias_weight, activation = True):
linear_layer = tf.add(tf.matmul(input_layer, multiplication_weight), bias_weight)
# We separate the activation at the end because the loss function will
# implement the last sigmoid necessary
if activation:
return(tf.nn.sigmoid(linear_layer))
else:
return(linear_layer)
# First logistic layer (7 inputs to 14 hidden nodes)
A1 = init_variable(shape=[7,14])
b1 = init_variable(shape=[14])
logistic_layer1 = logistic(x_data, A1, b1)
# Second logistic layer (14 hidden inputs to 5 hidden nodes)
A2 = init_variable(shape=[14,5])
b2 = init_variable(shape=[5])
logistic_layer2 = logistic(logistic_layer1, A2, b2)
# Final output layer (5 hidden nodes to 1 output)
A3 = init_variable(shape=[5,1])
b3 = init_variable(shape=[1])
final_output = logistic(logistic_layer2, A3, b3, activation=False)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_output, labels=y_target))
# Declare optimizer
my_opt = tf.train.AdamOptimizer(learning_rate = 0.002)
train_step = my_opt.minimize(loss)
# -
# ## Train model
# +
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Actual Prediction
prediction = tf.round(tf.nn.sigmoid(final_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Training loop
loss_vec = []
train_acc = []
test_acc = []
for i in range(1500):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
temp_acc_train = sess.run(accuracy, feed_dict={x_data: x_vals_train, y_target: np.transpose([y_vals_train])})
train_acc.append(temp_acc_train)
temp_acc_test = sess.run(accuracy, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])})
test_acc.append(temp_acc_test)
if (i+1)%150==0:
print('Loss = ' + str(temp_loss))
# -
# ## Display model performance
# +
# %matplotlib inline
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Cross Entropy Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Cross Entropy Loss')
plt.show()
# Plot train and test accuracy
plt.plot(train_acc, 'k-', label='Train Set Accuracy')
plt.plot(test_acc, 'r--', label='Test Set Accuracy')
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
| 06_Neural_Networks/07_Improving_Linear_Regression/07_improving_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
from django import forms
class CommentForm(forms.Form):
name = forms.CharField(label='Your name')
url = forms.URLField(label='Your website', required=False)
comment = forms.CharField()
f = CommentForm(auto_id=True)
print(f)
f = CommentForm(auto_id=False)
print(f)
class ContactForm(forms.Form):
age = forms.IntegerField()
nationality = forms.CharField()
captha_answer = forms.IntegerField(label= '2 + 2', label_suffix=' =')
f = ContactForm(label_suffix='?')
print(f.as_p())
from django import forms
class CommentForm(forms.Form):
name = forms.CharField(initial='Your name')
url = forms.URLField(initial='http://')
comment = forms.CharField()
f = CommentForm(auto_id=False)
print(f)
class CommentForm(forms.Form):
name = forms.CharField()
url = forms.URLField()
comment = forms.CharField()
default_data = {'name': 'Your name', 'url': 'http://'}
f = CommentForm(default_data, auto_id=False)
print(f)
class CommentForm(forms.Form):
name = forms.CharField(initial='Your name')
url = forms.URLField(initial='http://')
comment = forms.CharField()
data = {'name': '', 'url': '', 'comment': 'foo'}
f = CommentForm(data)
f.is_valid()
# +
# 폼은 초기값들을 사용하여 Fall back 하지 않습니다.
# -
f.errors
import datetime
class DateForm(forms.Form):
day = forms.DateField(initial=datetime.date.today)
print(DateForm())
from django import forms
class HelpTextContactForm(forms.Form):
subject = forms.CharField(max_length=100, help_text='100 characters max.')
message = forms.CharField()
sender = forms.EmailField(help_text='A valid email address, please')
cc_myself = forms.BooleanField(required=False)
f = HelpTextContactForm(auto_id=False)
print(f.as_table())
print(f.as_ul())
print(f.as_p())
from django import forms
generic = forms.CharField()
generic.clean('')
class Developer(forms.Form):
name = forms.CharField(error_messages={'required': 'Please enter your name'})
d = Developer(name='')
d.is_valid()
from django.forms import ComboField, CharField, EmailField
f = ComboField(fields=[CharField(max_length=20), EmailField])
f = '<EMAIL>'
f = forms(f)
# +
from django.core.validators import RegexValidator
from django.forms import forms
from django.forms import MultiValueField
class PhoneField(MultivalueField):
def __init__(self, **kwargs):
# Define one message for all fields.
error_messages = {
'incomplete': 'Enter a country calling code and a phone number.'
}
# Or define a different message for each field.
fields = (
Charfield(
error_messages = {'incomplete': 'Enter a country calling code.'},
validators = [RegexValidator(r'^[0-9]+$', 'Enter a valid country calling code.'),
],
),
CharField(
validators = [RegexValidator(r'^[0-9]+$', 'Enter a valid extension.')],
required = False,
),
)
super().__init__(
error_messages=error_messages, fields=fields,
require_all_fields=False, **kwargs
)
# -
| formfields/Formfields.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Numerical Integration
# _By <NAME>_
#
# ### **Objective: Implementaion of various numerical integration schemes**
# Key libraries: Numpy(for mathematical procedures) and matplotlib(to create plots)
import numpy as np
import matplotlib.pyplot as plt
import copy
from scipy.integrate import quad
# ## Midpoint Rule, Trapezoidal Rule, Simpson's Rule
# +
def mid_pt_int(func,a,b):
"""<NAME>, 18 Oct 2021
Obj: Use Midpoint Rule to compute the integral of a function
Args:
func: function, function that will be integrated from a to b
a: float, lowe limit of the integral
b: float, upper limit of the integral
Output:
I: float, integral of function func from limit a to b
"""
I = (b-a)*func((b+a)/2)
return I
def trapezoidal_int(func,a,b):
"""<NAME>, 18 Oct 2021
Obj: Use Trapezoidal Rule to compute the integral of a function
Args:
func: function, function that will be integrated from a to b
a: float, lowe limit of the integral
b: float, upper limit of the integral
Output:
I: float, integral of function func from limit a to b
"""
I = (b-a)/2*(func(b)+func(a))
return I
def simpson_int(func,a,b):
"""<NAME>, 18 Oct 2021
Obj: Use Simpson's Rule to compute the integral of a function
Args:
func: function, function that will be integrated from a to b
a: float, lowe limit of the integral
b: float, upper limit of the integral
Output:
I: float, integral of function func from limit a to b
"""
I = (b-a)/6*(func(b)+4*func((b+a)/2)+func(a))
return I
# -
# # #Example 1
# +
def func2_2(x): # Function 2
return 8+4*np.cos(x)
# Using scipy.integrate -> quad to find analytical limit integral
a_2_2 = 0 # Lower Limit of Function 1
b_2_2 = np.pi/2 # Upper Limit of Function 1
I2_a_2 = quad(func2_2, 0, np.pi/2)
print('Analytical Integral of function: %0.6f'%I2_a_2[0])
# Midpoint Rule
I2_b_2 = mid_pt_int(func2_2, a_2_2, b_2_2)
print('\nMidpoint Rule Integral of function: %0.6f'%I2_b_2)
err_b_2 = abs(I2_a_2[0]-I2_b_2)
print('Absol Err of Function: %0.6f'%err_b_2)
# Trapezoidal Rule
I2_c_2 = trapezoidal_int(func2_2, a_2_2, b_2_2)
print('\nTrapezoidal Rule Integral of function: %0.6f'%I2_c_2)
err_c_2 = abs(I2_a_2[0]-I2_c_2)
print('Absol Err of Function: %0.6f'%err_c_2)
# Simpson's Rule
I2_d_2 = simpson_int(func2_2, a_2_2, b_2_2)
print('\nSimpson\'s Rule Integral of function: %0.6f'%I2_d_2)
err_d_2 = abs(I2_a_2[0]-I2_d_2)
print('Absol Err of Function: %0.6f'%err_d_2)
# -
# ## Forward Euler Method
# Forward Euler Method (Explicit Method)
def forward_euler(t0,tf, h, y0, func):
""""<NAME>, 31 Oct, 2021
Obj: Uses Forward Euler's Method [y(t{k+1}) = y(tk) + h*f(yk,tk)] to solve IVP
Args:
t0: initial time, float
tf: final time, float
h: step size, float
y0: Initial Condition, float
func: evaluation function that describes y' and on which the bisection method is applied
Output:
soln: dict, evaluated y values ['y'] and respective time steps ['t'], ndarray
"""
# Calculate len(y), time steps, zero array for y
N = int((tf-t0)/h)+1
tlen = np.linspace(t0,tf,N)
yval = np.zeros((N),dtype = np.float64)
# Setup count, use IC
count = 1
yval[0] = y0
while N > count:
yval[count] = yval[count-1] + h*func(yval[count-1],tlen[count-1]) # FORWARD EULER's METHOD
count = count + 1
soln = {}
soln['y'] = yval
soln['t'] = tlen
return soln
# ## Backward Euler Method
# +
# Backward Euler Method (Implicit Method)
def backward_euler(t0, tf, h, y0, func, dfunc_yk1):
""""<NAME>, 31 Oct, 2021
Obj: Uses Backward Euler's Method [y(t{k+1}) = y(tk) + h*f(yk,tk)] to solve IVP
Args:
t0: initial time, float
tf: final time, float
h: step size, float
y0: Initial Condition, float
func: evaluation function that describes y' and on which the bisection method is applied
dfunc_yk1:
Output:
soln: dict, evaluated y values ['y'] and respective time steps ['t'], ndarray
"""
def backward_euler_root_f(y,yprev,t,h): # Use same func as the one passed into backward_euler()
""""<NAME>, 31 Oct, 2021, MSAAE Purdue
Obj: f = yk+1 - (yk + h*f(yk+1,tk+1)) = 0, setup to use nonlinear algebraic solver to get yk+1
Args:
y: yk+1, float
yprev: yk, float
t: tk+1, float
h: step size, float
Output:
yk+1 - (yk + h*f(yk+1,tk+1))
"""
return y - (yprev + h*func(y,t))
# Calculate len(y), time steps, zero array for y
N = int((tf-t0)/h)+1
tlen = np.linspace(t0,tf,N)
yval = np.zeros((N),dtype = np.float64)
# Setup count, use IC
count = 1
yval[0] = y0
while N > count:
yval[count] = yval[count-1] # Use yk as the inital guess of yk+1
roots,iterat = newton(yval[count], tlen[count], h, 1e-12, 50, backward_euler_root_f, dfunc_yk1) # pass yk+1 = yk (Initial Guess), tk+1
yval[count] = roots[-1]
count = count + 1
soln = {}
soln['y'] = yval
soln['t'] = tlen
return soln
# Newton's Method for Backward Euler's Method
def newton(po, ti, h, tol, Nmax, func, dfunc):
"""<NAME>, 7th Sept 2021
This function uses Newton's method to obtain the zero of the funcion using a set inital guess for a certain tolerance and/or # iterations
Args:
po: inital guess = yk, float
ti: tk+1, float
h: step size, float
tol: absolute tolerance, float
Nmax: Maximum allowable iteration, positive int
func: Function on which the Newton's method will be applied
dfunc: derivative of yk+1 - (yk + h*f(yk+1, tk+1)) wrt yk+1 for Newton's Method
Output:
root: Contains all the intermediate x_k+1 where the last value is the root based on Nmax and tol
count: # iterations for which Newton's method is implemented, positive int
"""
def g(val):
return val - func(val, po, ti, h)/dfunc(val, ti, h)
pk = np.zeros(Nmax+1)
count = 0
pk[0] = po
if count < Nmax:
pk[count+1] = g(pk[count])
count = count + 1
else:
return None, None
while count < Nmax and np.abs(pk[count] - pk[count-1]) > tol:
pk[count+1] = g(pk[count])
count = count + 1
roots = pk[:count+1]
return roots, count
# -
# # #Example 2
# +
# Double Precision
h = np.array([0.1, 0.01, 0.001, 0.00001],dtype = np.float64)
t0 = 0
tf = 1
y0 = 0
def p1f(y,t):
""""<NAME>, 31 Oct, 2021
Obj: y' expression
Args:
y: yval to evaluate y', float
t: time @ which y' to be evaluated, float
Output:
ydot: y', float
"""
ydot = (np.sin(y) - np.exp(t))/np.cos(t)
return ydot
def d_p1f_yk1(y,t,h):
""""<NAME>, 31 Oct, 2021
Obj: Derivative of yk+1 - (yk + h*f(yk+1, tk+1)) wrt yk+1 for Newton's Method, to find the root of nonlinear algebraic expression calculated in BACKWARD EULER'S METHOD
= 1 - ( 0 +h*cos(yk+1)/cos(tk+1))
Args:
y: yk+1, float
t: tk+1, float
h: step size, float
Output:
yd: derivative of yk+1 - (yk + h*f(yk+1, tk+1)) wrt yk+1, float
"""
yd = 1-h*np.cos(y)/np.cos(t)
return yd
# +
# Forward Euler Method
sol1_a = []
for i in range(len(h)):
ya = forward_euler(t0,tf,h[i],y0,p1f)
sol1_a.append(ya)
#Plot
for i in range(len(h)-1):
plt.figure(i)
plt.title('IVP-Forward Euler\'s Method using different h')
plt.plot(sol1_a[i]['t'][:],sol1_a[i]['y'][:],marker='.',label='h='+str(h[i]))
plt.plot(sol1_a[i+1]['t'][:],sol1_a[i+1]['y'][:],marker='.',label='h='+str(h[i+1]))
plt.ylabel('y')
plt.xlabel('t')
plt.grid()
plt.legend()
i = i+1
plt.figure(i)
plt.title('IVP-Forward Euler\'s Method using different h')
plt.plot(sol1_a[0]['t'][:],sol1_a[0]['y'][:],marker='.',label='h='+str(h[0]))
plt.plot(sol1_a[i]['t'][:],sol1_a[i]['y'][:],marker='.',label='h='+str(h[i]))
plt.ylabel('y')
plt.xlabel('t')
plt.grid()
plt.legend()
# -
print('h, y(tf){hmin}-y(tf){hi}')
for i in range(len(h)):
print(h[i], round(sol1_a[i]['y'][-1]-sol1_a[-1]['y'][-1],6))
# From the above plots and the error in the final term estimated through different steps and the min step decreases. We cannot strictly comment on the accuracy of the solution as no exact solution exists, however, comparing the estimated solution found through various step sizes, we can conclude that as the step stize decreases the values are getting more accurate to a solution close to the one found using the minimum step size(h = 1e-5). The same is true for convergernce and stability as the final estimated value found from different step sizes seem to converge to a value close to the found using h=1e-5 and thus we can even say that the system is stable at the least.
# +
# Backward Euler
sol1_b = []
for i in range(len(h)):
yb = backward_euler(t0,tf, h[i], y0, p1f, d_p1f_yk1)
sol1_b.append(yb)
#Plot
for i in range(len(h)-1):
plt.figure(i)
plt.title('HW 5, P1 (b): IVP-Backward Euler\'s Method using different h')
plt.scatter(sol1_b[i]['t'][:],sol1_b[i]['y'][:],marker='.',label='h='+str(h[i]))
plt.scatter(sol1_b[i+1]['t'][:],sol1_b[i+1]['y'][:],marker='.',label='h='+str(h[i+1]))
plt.ylabel('y')
plt.xlabel('t')
plt.grid()
plt.legend()
i = i+1
plt.figure(i)
plt.title('HW 5, P1 (b): IVP-Backward Euler\'s Method using different h')
plt.scatter(sol1_b[0]['t'][:],sol1_b[0]['y'][:],marker='.',label='h='+str(h[0]))
plt.scatter(sol1_b[i]['t'][:],sol1_b[i]['y'][:],marker='.',label='h='+str(h[i]))
plt.ylabel('y')
plt.xlabel('t')
plt.grid()
plt.legend()
# -
# ## Runge-Kutta $4^{th}$ order Method
# Fourth-Order Runge-Kutta Method for n-order -> n equations
def rk4(h, t0, tf, y0,func):
""""<NAME>, 6 Nov, 2021
Obj: Uses Fourth-Order Runge-Kutta Method [y(t{k+1}) = y(tk) + h*(k1 +2k2 + 2k3 + k4)/6] to solve IVP for n equations based on len(y0)
Args:
h: step size, float
t0: startting time, float
tf: final time, float
y0: inital conditions of all teh states, float ndarray
func: state space representation of all the equations(functions)
Output:
soln: dict, evaluated yi values ['y'] and respective time steps ['t'], ndarray
"""
N = int((tf-t0)/h)+1
tlen = np.linspace(t0,tf,N)
yval = np.zeros((N,len(y0)),dtype = np.float64)
# Setup count, use IC
count = 1
yval[0,:] = y0
while N > count:
#Calculate the coefficients K1, K2, K3, K4
k1 = func(yval[count-1], tlen[count-1])
k2 = func(yval[count-1] + 0.5*h*k1, tlen[count-1]+0.5*h)
k3 = func(yval[count-1] + 0.5*h*k2, tlen[count-1]+0.5*h)
k4 = func( yval[count-1] + h*k3, tlen[count-1]+h)
yval[count] = yval[count-1] +h*(k1 +2*k2 +2*k3 +k4)/6 # RK 4th Order Method
count = count +1
soln = {}
soln['y'] = yval
soln['t'] = tlen
return soln
# # Example 3
# EOMs: <br>
# 1) $m\ddot{x} = -D|v|\dot{x}$<br>
# 2) $m\ddot{y} = -D|v|\dot{y} - mg + F_n$<br>
#
# To numerically integrate, we can rewrite the equation as system of first-order DE:<br>
# $u_1 = x$<br>
# $u_2 = y$ <br>
# $u_3 = \dot{x}$<br>
# $u_4 = \dot{y}$ <br>
#
# $\dot{u_1} = \dot{x} = u_3$<br>
# $\dot{u_2} = \dot{y} = u_4$<br>
# $\dot{u_3} = \frac{-D|v|u_3}{m}$ <br>
# $\dot{u_4} = \frac{-D|v|u_4 - mg + F_n}{m}$ <br>
#
# where,<br>
# $v = \sqrt{u_3^2+u_4^2}$<br>
# $F_n = k(R-u_2(t))$, u2 < R<br>
# 0, u2 $\ge$ R
# +
# Given values
k = 1000
R = 0.2
g = 9.81
m = 0.3
Ddum = [0, 0.01, 0.025]
def func1_xy(y,t):
""""<NAME>, 6 Nov, 2021
Obj: State Space Formulation of the problem
Args:
y: yval @ tcurrent, float ndarray
t: time @ which y' to be evaluated, float
Output:
dydt: y' of each state in the setup, float ndarray
"""
dydt = np.zeros((len(y)))
dydt[0] = y[2]
dydt[1] = y[3]
v = np.sqrt(y[2]**2+y[3]**2)
if y[1] < R:
Fn = k*(R-y[1])
else:
Fn = 0
dydt[2] = (-D*v*y[2])/m
dydt[3] = (-D*v*y[3]-m*g + Fn)/m
return dydt
# +
t0 = 0
tf = 8
h = 1e-4
ic_xy = np.array([0,2,10,5], dtype=np.float64) # x0 = 0; h = y0 = 2m; vx0 = 10 m/s; vy0 = 5 m/s
sol1_xy = []
for i in range(len(Ddum)):
D = Ddum[i]
solrk4 = rk4(h, t0, tf, ic_xy,func1_xy)
sol1_xy.append(solrk4)
plt.figure(1)
plt.title('Plot of Trajectory for D = [0, 0.01, 0.025]')
for i in range(len(Ddum)):
plt.plot(sol1_xy[i]['y'][:,0],sol1_xy[i]['y'][:,1],marker='.',label='D='+str(Ddum[i]))
plt.ylabel('y (m)')
plt.xlabel('x (m)')
plt.grid()
plt.legend()
# -
# ## Composite Trapezoidal Rule
# Newton Interpolating Polynomial Function:
def newton_interp(xi,fi,x_interp):
"""<NAME>, 18 Oct, 2021
Obj: Use Newton Form of the Interpolating Polynomial to find f(x_i) for given x_i using given (x,y) data points
Args:
xi: (n x 1) vector, x-coordinates of the given data set
fi: (n x 1) vector, y-coordinates of the given data set
x_interp: (n_interp x 1) vector, given x-coordinate points for which we interpolate and finds its y-coordinates
Output:
f_interp: (n_iterp x n) vector, interpolated y-ccordinates of x_interp using Lagrange form of Interpolating Polynomial
coeff: (n-1 x 1) vector, coefficients of Newton form of Interpolating Polynomial: f[x0], f[x0,x1], f[x0,x1,x2].......,f[x0,x1,....,xn]
"""
n = len(xi)
coeff = np.zeros(n)
coeff[0] = copy.copy(fi[0]) # coeff contains all the f[x0], f[x0,x1]....f[x0,x1,.....,xn] terms
temp = copy.copy(fi)
# Step 1: Compute f[x0], f[x0,x1] ... f[x0,x1,....xn] coeffecient values; independent of the points we want to interploate for
for i in range(1,n):
for j in range(n-i):
temp[j] = (temp[j+1]-temp[j])/(xi[j+i]-xi[j])
coeff[i] = temp[0]
# Step 2: Multiply the numerator (x-x0)(x-x1)...(x-xn); dependent on x_interp, thus needs to be called len(x_interp) times
def newton_interp_num_coeff(xval):
"""<NAME>, 18 Oct, 2021
Obj: Calculate and multiply (x-x0)(x-x1)...(x-xn) based on x_interp[i] value to get f_interp[i]
Args:
xval: float, value for which the interpolated value is calculated
Output:
sum: (n_iterp x n) vector, interpolated y-ccordinates of x_interp using Lagrange form of Interpolating Polynomial
"""
sum = coeff[0]
for i in range(1,n):
temp = coeff[i]*(xval-xi[i-1])
for j in range(1,i):
temp = temp*(xval-xi[i-j-1])
sum = sum + temp
return sum
# Initialize and calculate y-coord for each x_interp element
f_interp = np.zeros(len(x_interp))
for i in range(len(x_interp)):
f_interp[i] = newton_interp_num_coeff(x_interp[i])
return f_interp, coeff
def compo_trap_rule(xdata, fdata):
"""<NAME>, 18 Oct 2021
Obj: Computes Composite Trapezoidal Rule of a given n (x,y) points
Args:
xdata: nx1 numpy array float, x data
fdata: nx1 numpy array float, y data
Output:
ctr_integral: Composite Trapezoidal Rule (Integral)
"""
ctr_integral = 0
n = len(xdata)
for i in range(n-1):
ctr_integral += (xdata[i+1]-xdata[i])/2*(fdata[i+1]+fdata[i]) # Compute Trapezoidal Rule Integral for each interval and sum them
return ctr_integral
# # #Example 4
# +
# Sample data
xdat = np.array([0,1,2,3,4,5])
fdat = np.array([1.5,2.5,3,2.5,1.5,1])
x_cont_dat = np.linspace(min(xdat),max(xdat),1000) # To create near-continous data set
f_cont_dat, coeff = newton_interp(xdat, fdat, x_cont_dat) # Interpolated result
n_trap = 6
xdat_b = np.linspace(min(xdat),max(xdat),n_trap) # 6 data points for 5 equal-length intervals
fdat_b,_ = newton_interp(xdat, fdat, xdat_b) # 101 f(x) using the Newton interpolating polynomial
ctr_integral = compo_trap_rule(xdat_b, fdat_b)
# Plot
plt.figure(1)
plt.title('Newton Interpolation Result')
plt.scatter(xdat,fdat,color = 'black',label='Given data')
plt.plot(x_cont_dat,f_cont_dat,label='Newton Interpolation Polynomial')
plt.ylabel('F (N)')
plt.xlabel('x (m)')
plt.grid()
plt.legend()
print('Composite Trapezoidal Rule with',n_trap-1,'equal-length intervals gives an Integral value: ',ctr_integral,'Nm')
# -
# ## 2-point Gaussian Quadrature Rule
# $G$(f) = $w_1f(x_1)$ + $w_2f(x_2)$ <br>
# We know for [-1,1] -> $w_1$ = $w_2$ = 1, $x_1$ = -$\sqrt{1/3}$, $x_2$ = $\sqrt{1/3}$ <br>
# We first map [a, b] -> [-1, 1] to use the above setup<br>
#
# x = $\frac{(b-a)}{2}t$ + $\frac{b+a}{2}$
#
# Jacobian = $\frac{dx}{dt}$
def compute_2pt_gauss_quad(func,b,a):
"""<NAME>, 10 November 2021
Obj: To compute 2 point Gaussian Quadrature Rule
Args:
func: function on which the gaussian quadrature rule will be applied
b: Upper limit of interval
a: Lower limit of interval
"""
def map_2_sq(b,a,t):
"""<NAME>, 10 November 2021
Obj: Maps a value in interval [a,b] to [-1,1]
Args:
b: Upper limit of interval
a: Lower limit of interval
t: point to be mapped
"""
return (b-a)*0.5*t+(b+a)*0.5
def jacobian_2pt_gauss_quad(b,a):
""" <NAME>, 10 November 2021
Obj: Jacobian to succesfully map to [-1,1] to apply 2-point gaussian quadrature
Args:
b: Upper limit of interval
a: Lower limit of interval
"""
return (b-a)/2
w1 = 1
w2 = 1
x1 = -np.sqrt(1/3)
x2 = np.sqrt(1/3)
jacobian = jacobian_2pt_gauss_quad(b,a)
return (w1*func(map_2_sq(b,a,x1)) + w2*func(map_2_sq(b,a,x2)))*jacobian
# ## 3-point Gaussian Quadrature Rule
# For three-point Gauss Quadrature: <br>
# $G_3$(f) = $w_1f(x_1)$ + $w_2f(x_2)$ + $w_3f(x_3)$ <br>
# We know for [-1,1] -> $w_1$ = $w_3$ = 5/9, $w_2$ = 8/9, $x_1$ = -$\sqrt{3/5}$, $x_2$ = 0, $x_3$ = $\sqrt{3/5}$ <br>
# We first map [a, b] -> [-1, 1] to use the above setup<br>
#
# x = $\frac{(b-a)}{2}t$ + $\frac{b+a}{2}$
#
# Jacobian = $\frac{dx}{dt}$
def compute_3pt_gauss_quad(func,b,a):
"""<NAME>, 10 November 2021
Obj: To compute 3 point Gaussian Quadrature Rule
Args:
func: function on which the gaussian quadrature rule will be applied
b: Upper limit of interval
a: Lower limit of interval
"""
def map_2_sq(b,a,t):
"""<NAME>, 10 November 2021
Obj: Maps a value in interval [a,b] to [-1,1]
Args:
b: Upper limit of interval
a: Lower limit of interval
t: point to be mapped
"""
return (b-a)*0.5*t+(b+a)*0.5
def jacobian_3pt_gauss_quad(b,a):
""" <NAME>, 10 November 2021
Obj: Jacobian to succesfully map to [-1,1] to apply 2-point gaussian quadrature
Args:
b: Upper limit of interval
a: Lower limit of interval
"""
return (b-a)/2
w1 = 5/9
w2 = 8/9
w3 = 5/9
x1 = -np.sqrt(3/5)
x2 = 0
x3 = np.sqrt(3/5)
jacobian = jacobian_3pt_gauss_quad(b,a)
return (w1*func(map_2_sq(b,a,x1)) + w2*func(map_2_sq(b,a,x2)) + w3*func(map_2_sq(b,a,x3)))*jacobian
# # #Example 5
def func3(x):
""""<NAME>, 6 Nov, 2021
Obj: y(x) in Example 4 using coeffecients found using Newton Interpolation
Args:
y(x): float, function value
"""
y = coeff[0] + coeff[1]*(x-xdat[0]) + coeff[2]*(x-xdat[0])*(x-xdat[1]) + coeff[3]*(x-xdat[0])*(x-xdat[1])*(x-xdat[2])+ coeff[4]*(x-xdat[0])*(x-xdat[1])*(x-xdat[2])*(x-xdat[3]) + coeff[5]*(x-xdat[0])*(x-xdat[1])*(x-xdat[2])*(x-xdat[3])*(x-xdat[4])
return y
# +
# Setup
a = 0
b = 5
# Scipy.quad Integral of function
Integral_quad = quad(func3, a, b)
Integral_2pt_gq = compute_2pt_gauss_quad(func3,b,a)
Integral_3pt_gq = compute_3pt_gauss_quad(func3,b,a)
print('Scipy.quad integral value of the interploated function of data defined in Example 4:',Integral_quad[0])
print('2 point Gaussian Quadrature Rule of the interploated function of data defined in Example 4:',Integral_2pt_gq)
print('3 point Gaussian Quadrature Rule of the interploated function of data defined in Example 4:',Integral_3pt_gq)
# -
# There is a significant difference betweeen the integral calculated using 2-point gaussian quarature rule and scipy.qauad integral of the polynomial function. The 2-point gaussian quarature rule does not exactly integrate the polynimal, but provides a good first approximate.<br>
#
# The integral calculed using 3-point gaussian quadrature rule and scipy.quad integral of the polynomial function yeild comparable results. Thus 3-point gaussian quadrature rule gives a very good approximate of the definite integral.
| Numerical Integration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### The code in this notebook is borrowed from "TensorFlow Core" at: https://www.tensorflow.org/tutorials/text/nmt_with_attention
# ### There may be some local change for study purposes.
# +
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
from bs4 import BeautifulSoup
import requests
import unicodedata
import re
import numpy as np
import os
import io
import time
# -
# Download the file
path_to_zip = tf.keras.utils.get_file(
fname = 'spa-eng.zip',
origin = 'http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract = True)
filepath = os.path.dirname(path_to_zip) + '/spa-eng/spa.txt'
# +
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFKD', s) if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r'([?.!,¿])', r' \1 ', w)
w = re.sub(r'[""]', " ", w)
# add space between chinese characters without affecting english letters
w = re.sub(r'(?<=[^a-z\W\d_])(?=[^a-z\W\d_])', ' ', w)
# replace everything with space except (a-z, A-Z, ".", "?", "!", ",")
# w = re.sub(r'[^a-zA-Z?.,!¿]', ' ', w)
w.rstrip().strip()
# add a start and an end token to the sentence
# so that the model know when to start and stop
w = '<start> ' + w + ' <end>'
return w
# -
# #### Use this function when we have a dataset; use the fuction below for now
# ```python
# # remove the accent & clean sentences & return word pairs [eng, spn]
# def create_dataset(path, num_exmaples):
# lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
# word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_exmaples]]
# return zip(*word_pairs)
# ```
def create_dataset(url):
html_content = requests.get(url).text
soup = BeautifulSoup(html_content)
lines = soup.find_all(class_="ltf")
word_pairs = [[preprocess_sentence(w) for w in l.get_text().split('\n')] for l in lines]
# return zip(*word_pairs)
return word_pairs
url = "https://lyricstranslate.com/en/nǐ-zěnme-shuō-你怎么说-nǐ-zěnme-shuō.html"
aa, bb = create_dataset(url)
print(aa[-1])
print(bb[-1])
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
# get word to index dictionary for sequences
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
# pad converted sequences
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None):
inp_lang, targ_lang = create_dataset(path)
inp_tensor, inp_token = tokenize(inp_lang)
targ_tensor, targ_token = tokenize(targ_lang)
return inp_tensor, targ_tensor, inp_token, targ_token
input_tensor, target_tensor, input_token, target_token = load_dataset(url)
max_length_input = max_length(input_tensor)
max_length_target = max_length(target_tensor)
# +
input_tensor_train, input_tensor_valid, target_tensor_train, target_tensor_valid = train_test_split(input_tensor, target_tensor, test_size=0.2)
print(len(input_tensor_train), len(input_tensor_valid), len(target_tensor_train), len(target_tensor_valid))
# -
def convert(token, tensor):
for t in tensor:
if t != 0:
print(f"{t} -----> {token.index_word[t]}")
print('Input index ------> input language')
print('==================================')
convert(input_token, input_tensor_train[0])
print()
convert(target_token, target_tensor_train[0])
# +
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 4
steps_per_epoch = BUFFER_SIZE
embedding_dim = 256
units = 1024
vocab_input_size = len(input_token.word_index) + 1
vocab_target_size = len(target_token.word_index) + 1
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
# -
example_input_batch, example_target_batch = next(iter(dataset))
example_input_batch.shape, example_target_batch.shape
| attention-01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PythonData
# language: python
# name: pythondata
# ---
# # Pyber Challenge
# ### 4.3 Loading and Reading CSV files
# +
# Add Matplotlib inline magic command
# %matplotlib inline
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import os
# File to Load (Remember to change these)
city_data_to_load = "Resources/city_data.csv"
ride_data_to_load = "Resources/ride_data.csv"
# Read the City and Ride Data
city_data_df = pd.read_csv(city_data_to_load)
ride_data_df = pd.read_csv(ride_data_to_load)
#city_data_df.head(10)
# -
# ### Merge the DataFrames
# +
# Combine the data into a single dataset
pyber_data_df = pd.merge(ride_data_df, city_data_df, how="left", on=["city", "city"])
# Display the data table for preview
pyber_data_df.head()
# -
# ## Deliverable 1: Get a Summary DataFrame
# 1 Get the total rides for each city type
total_rides_by_type = pyber_data_df.groupby(["type"]).count()["ride_id"]
total_rides_by_type
# 2 Get the total drivers for each city type
total_drivers_by_type = city_data_df.groupby(["type"]).sum()["driver_count"]
total_drivers_by_type
# 3 Get the total amount of fares for each city type
total_fare_by_type = pyber_data_df.groupby(["type"]).sum()["fare"]
total_fare_by_type
# 4 Get the average fare per ride for each city type.
avg_fare_per_ride = total_fare_by_type / total_rides_by_type
avg_fare_per_ride
# 5 Get the average fare per driver for each city type.
avg_fare_per_driver = total_fare_by_type / total_drivers_by_type
avg_fare_per_driver
# 6 Create a PyBer summary DataFrame.
summary = {
"Total Rides": total_rides_by_type,
"Total Drivers": total_drivers_by_type,
"Total Fares": total_fare_by_type,
"Average Fare per Ride": avg_fare_per_ride,
"Average Fare per Driver": avg_fare_per_driver
}
pyber_ride_summary_df = pd.DataFrame(summary)
pyber_ride_summary_df
# ## PyBer summary DataFrame
# 7. Cleaning up the DataFrame. Delete the index name
pyber_ride_summary_df.index.name = ""
pyber_ride_summary_df
# 8. Format the columns.
pyber_ride_summary_df["Total Rides"] = pyber_ride_summary_df["Total Rides"].map("{:,}".format)
pyber_ride_summary_df["Total Drivers"] = pyber_ride_summary_df["Total Drivers"].map("{:,}".format)
pyber_ride_summary_df["Total Fares"] = pyber_ride_summary_df["Total Fares"].map("${:,.2f}".format)
pyber_ride_summary_df["Average Fare per Ride"] = pyber_ride_summary_df["Average Fare per Ride"].map("${:,.2f}".format)
pyber_ride_summary_df["Average Fare per Driver"] = pyber_ride_summary_df["Average Fare per Driver"].map("${:,.2f}".format)
# ## PyBer summary DataFrame Format
pyber_ride_summary_df
# ## Deliverable 2. Create a multiple line plot that shows the total weekly of the fares for each type of city.
# 1. Read the merged DataFrame and Rename columns
pyber_data_df = pyber_data_df.rename(columns = {
"city": "City",
"date": "Date",
"fare": "Fare",
"ride_id": "Ride id",
"driver_count": "No. Drivers",
"type": "City Type"
})
pyber_data_df
# 6. Set the index to the "Date" column
pyber_data_df.set_index(pyber_data_df["Date"], inplace=True)
pyber_data_df
column_names = ["Date", "City Type", "Fare"]
pyber_cities_fare = pyber_data_df[column_names].copy()
pyber_cities_fare
pyber_cities_fare.drop(["Date"], axis=1, inplace=True)
pyber_cities_fare.index = pd.to_datetime(pyber_data_df.index)
pyber_cities_fare.head()
# 2. Using groupby() to create a new DataFrame showing the sum of the fares
# for each date where the indices are the city type and date.
sum_fare_by_type = pyber_cities_fare.groupby(["City Type", "Date"]).sum()["Fare"]
sum_fare_by_type
# Convert using the pd.DataFrame method
sum_fare_by_type = pd.DataFrame(sum_fare_by_type)
sum_fare_by_type
# 3. Reset the index on the DataFrame you created in #1. This is needed to use the 'pivot()' function.
# df = df.reset_index()
# Reset index so we can pivot method in pandas
sum_fare_by_type = sum_fare_by_type.reset_index()
sum_fare_by_type
#4. Create a pivot table with the 'date' as the index, the columns ='type', and values='fare'
# to get the total fares for each type of city by the date.
# Create a pivot table table with the date as the index and columns as the type with fare in each row
sum_fare_by_type_pivot = sum_fare_by_type.pivot(index="Date", columns="City Type")["Fare"]
sum_fare_by_type_pivot.head(10)
sum_fare_by_type_pivot.info()
# ## total fare for the date and time
# 5. Create a new DataFrame from the pivot table DataFrame using loc on the given dates, '2019-01-01':'2019-04-29'.
# Create a line chart taht shows fares from Jan 1, 2019 to April 29, 2019
fare_Jan_April = sum_fare_by_type_pivot.loc['2019-01-01': '2019-04-28']
fare_Jan_April.head(10)
# 7. Check that the datatype for the index is datetime using df.info()
pyber_cities_fare.info()
# ## resample DataFrame by week
# 8. Create a new DataFrame using the "resample()" function by week 'W' and get the sum of the fares for each week.
# Create a new df and use resample by week and get sum of the fares for each week
weekly_fares_df = fare_Jan_April.resample("W").sum()
weekly_fares_df.head(10)
# ## Summary - Total Fare by City Type
# 8. Using the object-oriented interface method, plot the resample DataFrame using the df.plot() function.
# Import the style from Matplotlib.
# Use the graph style fivethirtyeight.
# Use axes plotting to add labels and titles
from matplotlib import style
import matplotlib.pyplot as plt
style.use("fivethirtyeight")
ax = weekly_fares_df.plot(figsize=(20,6))
# Add a title
ax.set_title("Total Fare by City Type")
# Add a x-axis and y-axis label
ax.set_xlabel("Month")
ax.set_ylabel("Fare ($USD)")
plt.savefig("Resources/Challenge_fare_summary.png")
plt.show()
| PyBer_Challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # %%capture
# # %env DATABASE_USERNAME=thor
# # %env DATABASE_PASSWORD=
# # %env DATABASE_NAME=legal
# # %env DATABASE_HOST=docker.for.mac.localhost
# # %env DATABASE_PORT=5432
# # %env SECRET_KEY=ops
# -
# %%capture
# %env DATABASE_USERNAME=postgres
# %env DATABASE_PASSWORD=
# %env DATABASE_NAME=lear
# %env DATABASE_HOST=docker.for.mac.localhost
# %env DATABASE_PORT=54321
# %env SECRET_KEY=ops
import datetime
import os
import flask
import sqlalchemy
import psycopg2
import simplejson
import pandas as pd
import matplotlib
import legal_api
from IPython.core.display import HTML
# %load_ext sql
# %config SqlMagic.displaylimit = 5
# %config Application.log_level="ERROR"
import flask
APP_CONFIG = legal_api.config.get_named_config(os.getenv('DEPLOYMENT_ENV', 'production'))
FLASK_APP = flask.Flask(__name__)
FLASK_APP.config.from_object(APP_CONFIG)
legal_api.db.init_app(FLASK_APP)
FLASK_APP.app_context().push()
def stop_on_false(test: bool, test_name: str):
# this will bail out of the execution if called by papermill
# failure_condition=True
# assert not test, test_name
assert test, test_name
def stop_on_true(test: bool, test_name: str):
# this will bail out of the execution if called by papermill
# failure_condition=True
assert not test, test_name
from legal_api.models import Filing, db
from sqlalchemy import or_
def get_latest_correction_filing(business_id: str) -> Filing:
filing_type='correction'
expr = Filing._filing_json[('filing', filing_type)]
max_filing = db.session.query(db.func.max(Filing._filing_date).label('last_filing_date')).\
filter(Filing.business_id == business_id).\
filter(or_(Filing._filing_type == filing_type,
expr.label('legal_filing_type').isnot(None))).\
subquery()
filings = Filing.query.join(max_filing, Filing._filing_date == max_filing.c.last_filing_date). \
filter(Filing.business_id == business_id). \
order_by(Filing.id.desc())
return filings.first()
| support/notebooks/business-ops/bcr-business-setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from __future__ import division, print_function, absolute_import
import json
from pprint import pprint
import pickle
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
import math
import csv
import numpy as np
# setting the paths
filepath = '/home/pujun/Desktop/StanfordClasses/lstm for natural language understanding/hi.json'
percentage_split = .7
GLOVE_SIZE = 50
import os
path_to_glove = os.environ.get("GLV_HOME")
def glove2dict(src_filename):
"""GloVe Reader.
Parameters
----------
src_filename : str
Full path to the GloVe file to be processed.
Returns
-------
dict
Mapping words to their GloVe vectors.
"""
reader = csv.reader(open(src_filename), delimiter=' ', quoting=csv.QUOTE_NONE)
return {line[0]: np.array(list(map(float, line[1: ]))) for line in reader}
glove = glove2dict(os.path.join(path_to_glove,
'glove.6B.%dd.txt' % GLOVE_SIZE))
vocab = set()
def parse_data_using_glove(json_data, num_examples_to_read=10000, num_words_in_longest_sentence=82):
Y = []
X = np.random.rand(num_examples_to_read, 82, 50)
for i, d in enumerate(json_data):
if i >= num_examples_to_read:
break
current_attribute_list = np.random.rand(82, 50)
tokenized_and_lowercase = word_tokenize(d['example'].lower())
for j,w in enumerate(tokenized_and_lowercase):
current_attribute_list[j,:] = np.array(glove.get(w))
vocab.add(w)
for j in range(len(tokenized_and_lowercase), num_words_in_longest_sentence):
current_attribute_list[j,:] = np.zeros(50);
X[i,:, :] = current_attribute_list
Y.append(d['label'])
return (X, np.array(Y))
data = []
with open(filepath) as f:
for line in f:
data.append(json.loads(line))
X, Y = parse_data_using_glove(data)
word_count = len(vocab)
print("Vocab size:", word_count)
print(X.shape)
print (np.array(X)[1][0].shape)
data_length_list = [len(eg) for eg in X]
num_words_in_longest_sentence = max(data_length_list)
print("Length of the biggest sentence:", num_words_in_longest_sentence)
# +
num_training_examples = int(math.ceil(len(X) * percentage_split))
print("number of training examples:", num_training_examples)
npX = np.array(X)
npY = np.array(Y)
trainX = npX[:num_training_examples]
trainY = npY[:num_training_examples]
testX = X[num_training_examples:]
testY = Y[num_training_examples:]
print(trainX.shape)
print(trainY.shape)
# +
# Data preprocessing
# Sequence padding
# trainX = pad_sequences(trainX, maxlen=num_words_in_longest_sentence, value=0.)
# testX = pad_sequences(testX, maxlen=num_words_in_longest_sentence, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
word_count = len(vocab)
print("num_words_in_longest_sentence:", num_words_in_longest_sentence)
print("GLOVE_SIZE:", GLOVE_SIZE)
print("word_count", word_count)
print("dimension of X", len(X), "where each element is", X[0].size)
# -
# Network building
net = tflearn.input_data(shape=[None, 82, 50],name='input')
net = tflearn.lstm(net, 82, return_seq=True)
net = tflearn.dropout(net,0.5)
net = tflearn.lstm(net, 82)
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam',
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(trainX, trainY), show_metric=True,
batch_size=128)
| lstm/lstm_using_entire_SNLI_data-usingGLOVE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
import numpy as np
def create_blank_dict():
blank = r'C:\Data\FEMA_Lightbox_Parcels\Notebooks\completeness_blank.xlsx'
completeness_blank = pd.read_excel(blank)
completeness_dict = completeness_blank.to_dict()
return completeness_dict
data_file = r'C:\Data\FEMA_Lightbox_Parcels\Notebooks\completeness.xlsx'
completeness = pd.read_excel(data_file)
#completeness
FEMAregionList = ["FEMA_Region_I", "FEMA_Region_II", "FEMA_Region_III", "FEMA_Region_IV", "FEMA_Region_V",
"FEMA_Region_VI", "FEMA_Region_VII", "FEMA_Region_VIII", "FEMA_Region_IX", "FEMA_Region_X"]
from arcgis import GIS
from arcgis.features import SpatialDataFrame
for FEMAregion in FEMAregionList:
print(FEMAregion)
regionfolder = r"C:\Data\FEMA_Lightbox_Parcels\{}\{}".format(FEMAregion, FEMAregion)
gdblist = os.listdir(regionfolder)
for gdb in gdblist:
print(gdb)
workinggdb = os.path.join(regionfolder, gdb)
arcpy.env.workspace = workinggdb
counties = arcpy.ListDatasets()
for county in counties:
print(county)
currentcounty = os.path.join(workinggdb, county)
#fc_points = arcpy.ListFeatureClasses("*","POINT",county)
#fc_polygons = arcpy.ListFeatureClasses("*","POLYGON",county)
completeness_dict = create_blank_dict()
#point analysis first
for fc in fc_points:
print(fc)
fc_pt_current = os.path.join(workinggdb, county, fc)
sdf = pd.DataFrame.spatial.from_featureclass(fc_pt_current)
list1 = completeness.columns
list2 = sdf.columns
main_list = list(set(list1) - set(list2))
i = 0
for column in completeness.columns:
if i == 0 :
completeness_dict[column] = FEMAregion
i += 1
continue
if i == 1 :
completeness_dict[column] = gdb
i += 1
continue
if i == 2 :
completeness_dict[column] = county
i += 1
continue
if i == 3 :
completeness_dict[column] = fc
i += 1
continue
else :
if column in main_list:
completeness_dict[column] = np.nan
continue
notnulls = len(sdf[sdf[column].notnull()])
nulls = len(sdf[sdf[column].isnull()])
total = notnulls + nulls
if total == 0:
completeness_dict[column] = np.nan
continue
percentnull = (nulls / total) * 100
completeness_dict[column] = percentnull
#print(column + ": " + str(percentnull))
i += 1
completeness = completeness.append(completeness_dict, ignore_index = True)
completeness.to_excel(r'C:\Data\FEMA_Lightbox_Parcels\Notebooks\completeness.xlsx')
len(completeness["State_GDB"].unique())
| ParcelDataAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/HansHenseler/masdav/blob/main/Part_4_Exercise_Answers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="hvx7X9eKBoYA"
# # Elasticsearch and log2timeline
#
# Exercise 4:
#
# Master of Advanced Studies in Digital Forensics & Cyber Investigation
#
# Data Analytics and Visualization for Digital Forensics
#
# (c) <NAME>, 2021
#
# + [markdown] id="KnErnDkm3BxJ"
# ## 1 The examples are in the Part 4 notebook. This notebook only contains initializations needed to run the exercises at the end
#
# First install Plaso-tools as we did in exercise 3
# + id="QiPCqp_jBcjA"
# various install steps to install plaso tools and dependencies to get plaso working in colab
# -y option is to skip user interaction
# some packages need to be deinstalled and reinstalled to resolve dependencies
# these steps take app. 3 minutes to complete on a fresh colab instance
# !add-apt-repository -y ppa:gift/stable
# !apt update
# !apt-get update
# !apt install plaso-tools
# !pip uninstall -y pytsk3
# !pip install pytsk3
# !pip uninstall -y yara-python
# !pip install yara-python
# !pip uninstall -y lz4
# !pip install lz4
# + colab={"base_uri": "https://localhost:8080/"} id="2ak8TjmbDF7A" outputId="285b85c3-3ab1-40d3-dc3e-c7c7ec1ecbb8" language="bash"
#
# wget -q https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.9.2-linux-x86_64.tar.gz
# wget -q https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.9.2-linux-x86_64.tar.gz.sha512
# tar -xzf elasticsearch-oss-7.9.2-linux-x86_64.tar.gz
# sudo chown -R daemon:daemon elasticsearch-7.9.2/
# shasum -a 512 -c elasticsearch-oss-7.9.2-linux-x86_64.tar.gz.sha512
# + [markdown] id="sQ1gKS_FDYJG"
# Run Elasticsearch as a daemon process
# + id="u3nH9lAkFEBj"
import time
# + colab={"base_uri": "https://localhost:8080/"} id="bjIR1PURDTsE" outputId="1b3e3052-764d-4f54-9247-ad92ba4f17ba" magic_args="--bg" language="bash"
#
# sudo -H -u daemon elasticsearch-7.9.2/bin/elasticsearch
# + id="ej6ZePUNE61X"
# Sleep for few seconds to let the instance start.
time.sleep(20)
# + [markdown] id="cG4sfxfbFKSP"
# Once the instance has been started, grep for elasticsearch in the processes list to confirm the availability.
# + colab={"base_uri": "https://localhost:8080/"} id="0c_QDA8IFzPd" outputId="3b66fbc1-5329-4384-bec3-1a75d6da6266" language="bash"
#
# ps -ef | grep elasticsearch
# + [markdown] id="VnjrjknBF0RD"
# query the base endpoint to retrieve information about the cluster.
# + id="wEJVCLHBFMLa" colab={"base_uri": "https://localhost:8080/"} outputId="c209a1ed-947e-4985-c9db-94e69e9e98ab" language="bash"
#
# curl -sX GET "localhost:9200/"
# + colab={"base_uri": "https://localhost:8080/"} id="p_mnrRCNZ3iy" outputId="6241a1d1-4e77-4ffa-e0c4-d8b461568bc9"
# This command created an index in Elasticsearch
#
# this is a useful page to look up Elasticsearch REST api calls :
# https://www.elastic.co/guide/en/elasticsearch/reference/6.8/cat-indices.html
#
# !curl -X GET "localhost:9200/_cat/indices?format=json&pretty"
# + colab={"base_uri": "https://localhost:8080/"} id="z-0n_jfhGWrZ" outputId="282a5f9b-34af-401a-d795-21afab6a0104"
from google.colab import drive
drive.mount('/content/gdrive')
# + colab={"base_uri": "https://localhost:8080/"} id="MfTBh-2nHfll" outputId="a1185a57-7fd5-4a7c-f5d6-7b5453d740f6"
# In part 3 (step 3) we stored the mus2019ctf.plaso file in your drive.
#
plaso_file = 'gdrive/MyDrive/mus2019ctf.plaso'
#
# and check if it's there
#
# !ls -l $plaso_file
# + [markdown] id="ny6RqS1BM7Ht"
# Use psort to write events to Elasticsearch that we setup earlier. We can use the elastic output format
# + id="2g1h4cDrND-f" colab={"base_uri": "https://localhost:8080/"} outputId="a416443f-1129-4559-f385-2d6f0b2706b7"
# run psort.py. It takes about 10 minutes to export all rows from the 430MB plaso file
#
# !psort.py -o elastic --server localhost --port 9200 --elastic_mappings /usr/share/plaso/elasticsearch.mappings --index_name mus2019ctf $plaso_file --status_view none
# + id="dD9lqpx6QcBw"
# So far we have been accessing information directly with curl from the Elasticsearch REST API
# The is also an Elasticsearch Python API that we can use
#
from elasticsearch import Elasticsearch
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
# + id="GsDMZgm0inQi"
# We define a Python function to list results
#
def print_results(response):
for num, doc in enumerate(response['hits']['hits']):
print(num, '-->', doc['_source'], "\n")
# + id="z4WMaiojSeS8"
# First we define some helper functions:
def print_facets(agg_dict):
sum=0
for field, val in agg_dict:
print("facets of field ", field,':')
for bucket in val['buckets']:
for key in bucket:
if key=='key':
print('\t',bucket[key],end='=')
else:
print(bucket[key],end='')
sum = sum + bucket[key]
print()
print("total number of hits for ",field," is ",sum)
def print_hit_stats(response):
print('hit stats:')
for key, val in response['hits'].items():
print(key, val)
print('\n')
# + [markdown] id="YbiK2I2JRJ9I"
# # Exercises
# + [markdown] id="Q5UrnrtHNwik"
# ## 1 Use elasticsearch to filter events in between 2019-03-12 and 2019-03-22
# + colab={"base_uri": "https://localhost:8080/"} id="DbVOPwcJOQlG" outputId="d772b926-4b15-4b5e-8cfb-24249f4c8a9b"
query = '{"query": { "query_string": {"query": "datetime:[2019-03-12 TO 2019-03-22]" }}}'
response = es.search(index="mus2019ctf", body=query, size=5)
print_hit_stats(response)
print_results(response)
# + [markdown] id="bfONyKEXN-rV"
# ## 2 Write a query that performs an aggregation on source_long and source_short (can you find the right field names?)
# + colab={"base_uri": "https://localhost:8080/"} id="ZODQrUy0ORHV" outputId="da7e0a80-ffa7-4471-8519-8471a7187d41"
query = '{"query": {"match_all": {}}, "aggs": { "source_short": { "terms": { "field": "source_short.keyword"}}, "source_long": { "terms": { "field": "source_long.keyword"}}}}'
response = es.search(index="mus2019ctf", body=query, size=0)
print_hit_stats(response)
print_facets(response['aggregations'].items())
# + [markdown] id="O3GkaEj-qYFt"
# ## 3 Combine your date range filter from exercise 1 with facet aggregation in exercise 2
# + colab={"base_uri": "https://localhost:8080/"} id="t1-llbwAORqS" outputId="268ff2ab-4a51-4c73-c23f-b2d0a9302fe8"
querystring = '{ "query_string": {"query": "datetime:[2019-03-18 TO 2019-03-19]" }}'
facets = '"aggs": { "source_short": { "terms": { "field": "source_short.keyword"}}, "source_long": { "terms": { "field": "source_long.keyword"}}}'
query = '{"query": %s,%s}' % (querystring,facets)
print_hit_stats(response)
response = es.search(index="mus2019ctf", body=query, size=0)
print_facets(response['aggregations'].items())
# + [markdown] id="PiIcl3VyVUjD"
# ## 4 ***Advanced*** Create an aggregation accross 3 fields and visualise them in a treemap or sunburst plot
# + colab={"base_uri": "https://localhost:8080/"} id="StUwuHNLsHN5" outputId="8652659f-132d-43d7-ac19-49f43c22bb55"
# The source_short looks interesting
# let's focus on REG, LOG and FILE
#
facets = '"aggs": { "source_long": { "terms": { "field": "source_long.keyword"}}}'
daterange = 'datetime:[2019-03-12 TO 2019-03-22]'
# FILE:
querystring_file = '{ "query_string": {"query": "%s AND source_short:FILE" }}' % daterange
query_file = '{"query": %s,%s}' % (querystring_file,facets)
# LOG:
querystring_log = '{ "query_string": {"query": "%s AND source_short:LOG" }}' % daterange
query_log = '{"query": %s,%s}' % (querystring_log,facets)
# REG
querystring_reg = '{ "query_string": {"query": "%s AND source_short:REG" }}' % daterange
query_reg = '{"query": %s,%s}' % (querystring_reg,facets)
response_reg = es.search(index="mus2019ctf", body=query_reg, size=0)
response_file = es.search(index="mus2019ctf", body=query_file, size=0)
response_log = es.search(index="mus2019ctf", body=query_log, size=0)
print(query_reg)
print_facets(response_reg['aggregations'].items())
print(query_file)
print_facets(response_file['aggregations'].items())
print(query_log)
print_facets(response_log['aggregations'].items())
# + colab={"base_uri": "https://localhost:8080/"} id="4oa6MYapuiRP" outputId="270b4cd4-a76a-47c3-a0bb-8b683d665141"
# create a variation of the print_facets function that outputs python code for filling a dataframe
#
# the data frame has 3 columns: source_long, source_short, count
#
def print_list_of_lists(agg_dict,name):
for field, val in agg_dict:
print("data = [",end='')
for bucket in val['buckets']:
for key in bucket:
if key=='key':
print('[\'',name,'\',\'',bucket[key],end='\',')
else:
print(bucket[key],end='],')
print(']\n')
print_list_of_lists(response_file['aggregations'].items(),'FILE')
print_list_of_lists(response_reg['aggregations'].items(),'REG')
print_list_of_lists(response_log['aggregations'].items(),'LOG')
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="kgWsjKNbyxkT" outputId="4a3d3805-c2d3-424f-90a6-37f332eea44a"
# # copy the ouput of the previous cell in this cell an manually clean it up
# the idea is we can mannual create a dataframe
#
# data = [['a1', 'b1', 'c1'],
# ['a2', 'b2', 'c2'],
# ['a3', 'b3', 'c3']]
#
# df = pd.DataFrame(data)
import pandas as pd
data = [[' FILE ',' NTFS USN change',292405],[' FILE ',' File stat',1880],[' FILE ',' File entry shell item',25],
[' REG ',' Registry Key',18055],[' REG ',' Task Cache',158],[' REG ',' Registry Key - Service',65],[' REG ',' Background Activity Moderator Registry Entry',36],[' REG ',' AppCompatCache Registry Entry',13],[' REG ',' Registry Key - Winlogon',3],[' REG ',' Registry Key - USB Entries',2],[' REG ',' Registry Key - Run Key',1],[' REG ',' Registry Key - User Account Information',1],[' REG ',' Registry Key Shutdown Entry',1],
[' LOG ',' WinPrefetch',597],[' LOG ',' Windows Setupapi Log',18],[' LOG ',' System',17],[' LOG ',' System - Network Connection',1]]
df = pd.DataFrame(data)
df
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="Zq1lXnNa0dlP" outputId="01f70f93-7205-4d4b-b022-a742ee83d159"
# set column names
df.columns = ['source_short','source_long','count']
df
# + colab={"base_uri": "https://localhost:8080/"} id="LevuJH3y4vOi" outputId="b13d2dfc-1b29-4525-b761-decb9916cda2"
# !pip install --upgrade plotly
import plotly.express as px
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="f2PbtGH341ZA" outputId="1bb64763-5c7b-4035-a7f9-46fbec1201ed"
fig = px.treemap(df, path=['source_short', 'source_long'],values='count')
fig.show()
# + id="aN_qcB7q6EH2"
# note: it's a pitty of the large values but fortunately you can zoom in.
| Part_4_Exercise_Answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/susyimes/12306/blob/master/Copy_of_esrgan_tf2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="9YvVZuXTbx9j"
# # Demo esrgan-tf2 on single image input
# This notebooks shows the testing results of the ESRGAN model trained on Tensorflow2.0+.
# + [markdown] id="emiMjP6rcO6R"
# ## Setup Environment
#
#
# + id="pwCjbXk6bwz5" colab={"base_uri": "https://localhost:8080/"} outputId="e9427f35-e3a0-4492-af95-71366d3b8393"
# !git clone https://github.com/peteryuX/esrgan-tf2.git
# %cd esrgan-tf2/
# !pip install -r requirements.txt
# + [markdown] id="Wh0RUrq2hPsp"
# ## Dowload Pretrained Model
# + id="eXcLo3eiftON" colab={"base_uri": "https://localhost:8080/"} outputId="e02ca2d8-3cd3-4f25-ad39-5d0bb990b03f"
from google_drive_downloader import GoogleDriveDownloader as gdd
gdd.download_file_from_google_drive(file_id='1ckihm-YJ<KEY>',
dest_path='./esrgan_inference.zip',
unzip=True)
# !mkdir -p checkpoints/
# !mv esrgan_inference checkpoints/esrgan
# !rm esrgan_inference.zip
# + [markdown] id="MV64le7MjVPt"
# ## The Source Image
# + id="UIFIa3qlugsa" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="7b8d13d5-397d-40f2-a137-e6bb5a7ec2ca"
from IPython.display import Image
Image(filename='./data/lr-1.jpg')
# + [markdown] id="VzLqJ0Jtu1yD"
# ## Prediction Result from Testing Script
# + id="d2_s_KAjt82c" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="1e73df52-e4fe-43bc-e473-b79edcce35f7"
# !python test.py --cfg_path="./configs/esrgan.yaml" --img_path="./data/lr-1.jpg"
Image(filename='./Bic_SR_HR_lr-1.jpg')
| Copy_of_esrgan_tf2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tracking Callbacks
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
# -
# This module regroups the callbacks that track one of the metrics computed at the end of each epoch to take some decision about training. To show examples of use, we'll use our sample of MNIST and a simple cnn model.
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
# + hide_input=true
show_doc(callbacks.TerminateOnNaNCallback)
# -
# Sometimes, training diverges and the loss goes to nan. In that case, there's no point continuing, so this callback stops the training.
model = simple_cnn((3,16,16,2))
learn = Learner(data, model, metrics=[accuracy])
learn.fit_one_cycle(2,1e4)
# Using it prevents that situation to happen.
model = simple_cnn((3,16,16,2))
learn = Learner(data, model, metrics=[accuracy], callbacks=[TerminateOnNaNCallback()])
learn.fit(2,1e4)
# + hide_input=true
show_doc(EarlyStoppingCallback)
# -
# This callback tracks the quantity in `monitor` during the training of `learn`. `mode` can be forced to 'min' or 'max' but will automatically try to determine if the quantity should be the lowest possible (validation loss) or the highest possible (accuracy). Will stop training after `patience` epochs if the quantity hasn't improved by `min_delta`.
model = simple_cnn((3,16,16,2))
learn = Learner(data, model, metrics=[accuracy],
callback_fns=[partial(EarlyStoppingCallback, monitor='accuracy', min_delta=0.01, patience=3)])
learn.fit(50,1e-42)
# + hide_input=true
show_doc(SaveModelCallback)
# -
# This callback tracks the quantity in `monitor` during the training of `learn`. `mode` can be forced to 'min' or 'max' but will automatically try to determine if the quantity should be the lowest possible (validation loss) or the highest possible (accuracy). Will save the model in `name` whenever determined by `every` ('improvement' or 'epoch'). Loads the best model at the end of training is `every='improvement'`.
# + hide_input=true
show_doc(ReduceLROnPlateauCallback)
# -
# This callback tracks the quantity in `monitor` during the training of `learn`. `mode` can be forced to 'min' or 'max' but will automatically try to determine if the quantity should be the lowest possible (validation loss) or the highest possible (accuracy). Will reduce the learning rate by `factor` after `patience` epochs if the quantity hasn't improved by `min_delta`.
# + hide_input=true
show_doc(TrackerCallback)
# -
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
# ## New Methods - Please document or move to the undocumented section
# + hide_input=true
show_doc(SaveModelCallback.on_epoch_end)
# -
#
# + hide_input=true
show_doc(TerminateOnNaNCallback.on_batch_end)
# -
#
# + hide_input=true
show_doc(EarlyStoppingCallback.on_train_begin)
# -
#
# + hide_input=true
show_doc(SaveModelCallback.on_train_end)
# -
#
# + hide_input=true
show_doc(ReduceLROnPlateauCallback.on_epoch_end)
# -
#
# + hide_input=true
show_doc(EarlyStoppingCallback.on_epoch_end)
# -
#
# + hide_input=true
show_doc(TerminateOnNaNCallback.on_epoch_end)
# -
#
# + hide_input=true
show_doc(TrackerCallback.on_train_begin)
# -
#
# + hide_input=true
show_doc(ReduceLROnPlateauCallback.on_train_begin)
# -
#
# + hide_input=true
show_doc(TrackerCallback.get_monitor_value)
# -
#
# + hide_input=true
show_doc(TerminateOnNaNCallback)
# -
#
| docs_src/callbacks.tracker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-metabolic]
# language: python
# name: conda-env-miniconda3-metabolic-py
# ---
# # Mapping aerobic habitat in trait space
# %load_ext autoreload
# %autoreload 2
import cartopy
import cartopy.crs as ccrs
import cmocean
import data_collections as dc
import funnel
import intake
import matplotlib.pyplot as plt
import numpy as np
import util
import xarray as xr
from matplotlib import colors
sub_spec = dict(
name='drift-corrected',
experiment=['20C', 'RCP85'],
member_id=dc.ocean_bgc_member_ids,
)
catalog = funnel.to_intake_esm(agg_member_id=True).search(**sub_spec)
catalog
cat = catalog.search(variable='vol_habitat_trait_spc')
cat.df.head()
try:
cluster
client
except:
cluster, client = util.get_ClusterClient(memory='8GB')
cluster.scale(6)
client
# + tags=[]
cat = catalog.search(variable='vol_habitat_trait_spc')
dsets = cat.to_dataset_dict(zarr_kwargs={'use_cftime': True})
keys = list(dsets.keys())
exp_keys = ['20C.ocn.pop.h.drift-corrected', 'RCP85.ocn.pop.h.drift-corrected']
ds = xr.concat([dsets[k] for k in exp_keys], dim='time', coords='minimal').compute()
ds.vol_habitat_trait_spc.data *= 1e-6 * 1e-6 # convert from cm^3 to 10^6 m^6
ds.vol_habitat_trait_spc.attrs['units'] = '10$^6$ m$^3$'
ds.vol_habitat_trait_spc.attrs['long_name'] = 'Aerobic habitat volume'
ds
# -
yrfrac = util.year_frac(ds.time)
tndx_ref = np.where(yrfrac < 1966)[0]
tndx_2100 = np.where(yrfrac > 2080)[0]
# +
with xr.set_options(keep_attrs=True):
vol_hab_ref = ds.vol_habitat_trait_spc.isel(time=tndx_ref).mean(['member_id', 'time'])
vol_hab_ref_std = ds.vol_habitat_trait_spc.isel(time=tndx_ref).std(['member_id', 'time'])
vol_hab_2100 = ds.vol_habitat_trait_spc.isel(time=tndx_2100).mean(['member_id', 'time'])
vol_hab_2100_std = ds.vol_habitat_trait_spc.isel(time=tndx_2100).std(['member_id', 'time'])
# vol_hab_ref_percent = 100.0 * vol_hab_ref / total_volume
# vol_hab_ref_percent.attrs['long_name'] = 'Ocean volume'
# vol_hab_ref_percent.attrs['units'] = '%'
vol_hab_2100
# -
vol_hab_ref.plot.contourf(levels=30)
vol_hab_2100.plot()
(vol_hab_2100 - vol_hab_ref).plot.contourf(levels=30)
# +
change_percent = 100.0 * (vol_hab_2100 - vol_hab_ref) / vol_hab_ref.where(vol_hab_ref > 1e5)
mx = np.floor(change_percent.max())
mn = -24.0
divnorm = colors.TwoSlopeNorm(vmin=mn, vcenter=0, vmax=mx)
dx = 1.0
change_percent.plot.contourf(norm=divnorm, levels=np.arange(mn, mx + dx, dx), extend='both');
# +
change_percent = 100.0 * (vol_hab_2100 - vol_hab_ref) / vol_hab_ref.where(vol_hab_ref > 1e5)
mx = np.floor(change_percent.max())
mn = -24.0
dx = 1.0
cf = plt.contourf(
change_percent.Eo,
1.0 / change_percent.Ac,
change_percent,
levels=np.arange(mn, mx + dx, dx),
norm=colors.TwoSlopeNorm(vmin=mn, vcenter=0, vmax=mx),
extend='both',
cmap=cmocean.cm.curl_r,
)
plt.ylim(0, 25)
plt.colorbar(cf)
# -
| notebooks/aerobic-habitat-trait-space.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.
#
# The same repeated number may be chosen from candidates unlimited number of times.
#
# **Note:**
#
# All numbers (including target) will be positive integers.
# The solution set must not contain duplicate combinations.
#
# **Example 1:**<br>
# Input: candidates = [2,3,6,7], target = 7,
#
# A solution set is:
# [
# [7],
# [2,2,3]
# ]
#
# **Example 2:**<br>
# Input: candidates = [2,3,5], target = 8,
#
# A solution set is:
# [
# [2,2,2,2],
# [2,3,3],
# [3,5]
# ]
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.L=[]
def dfs(candidates,remain,temp,index):
if remain<0:
return
if remain==0:
self.L.append(temp[:])
return
for i in range(index,len(candidates)):
temp.append(candidates[i])
dfs(candidates,remain-candidates[i],temp, i)
temp.pop()
dfs(candidates,target,[],0)
return self.L
#optimization: beat 98%
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.res=[]
candidates.sort()
def dfs(candidates,remain,index,nums):
if remain==0:
self.res.append(nums[:])
return
if candidates[index]<=remain:
for i in range(index,len(candidates)):
nums.append(candidates[i])
dfs(candidates,remain-candidates[i],i,nums)
nums.pop()
else:
return
dfs(candidates,target,0,[])
return self.res
| 39. Combination Sum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Seaborn demo per <NAME> below
# +
from __future__ import print_function, division
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# -
plt.style.use('ggplot')
x = np.linspace(0, 10, 1000)
plt.plot(x, np.sin(x), x, np.cos(x));
import seaborn as sns
sns.set()
plt.plot(x, np.sin(x), x, np.cos(x));
# +
data = np.random.multivariate_normal([0, 0], [[5, 2], [2, 2]], size=2000)
data = pd.DataFrame(data, columns=['x', 'y'])
for col in 'xy':
plt.hist(data[col], density=True, alpha=0.5)
# old Matplotlib would be plt.hist(data[col], normed=True, alpha=0.5)
# -
for col in 'xy':
sns.kdeplot(data[col], shade=True)
sns.distplot(data['x']);
sns.kdeplot(data.x, data.y); # formerly sns.kdeplot(data)
with sns.axes_style('white'):
sns.jointplot("x", "y", data, kind='kde');
with sns.axes_style('white'):
sns.jointplot("x", "y", data, kind='hex')
iris = sns.load_dataset("iris")
iris.head()
tips = sns.load_dataset('tips')
tips.head()
# +
tips['tip_pct'] = 100 * tips['tip'] / tips['total_bill']
grid = sns.FacetGrid(tips, row="sex", col="time", margin_titles=True)
grid.map(plt.hist, "tip_pct", bins=np.linspace(0, 40, 15));
# -
with sns.axes_style(style='ticks'):
g = sns.factorplot("day", "total_bill", "sex", data=tips, kind="box")
g.set_axis_labels("Day", "Total Bill");
with sns.axes_style('white'):
sns.jointplot("total_bill", "tip", data=tips, kind='hex')
sns.jointplot("total_bill", "tip", data=tips, kind='reg');
planets = sns.load_dataset('planets')
planets.head()
with sns.axes_style('white'):
g = sns.factorplot("year", data=planets, aspect=1.5)
g.set_xticklabels(step=5)
with sns.axes_style('white'):
g = sns.factorplot("year", data=planets, aspect=4.0,
hue='method', order=range(2001, 2015), kind="count")
g.set_ylabels('Number of Planets Discovered')
# ## Scikit-learn tutorial from pycon 2015 <NAME> [here](http://nbviewer.ipython.org/github/jakevdp/sklearn_pycon2015/blob/master/notebooks/Index.ipynb)
| docs/seaborn_demo_from_jakevdp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import os
import warnings
import sys
import ipywidgets as widgets
import plotly.graph_objects as go
import numpy as np
import pandas as pd
from IPython.display import display
warnings.filterwarnings("ignore")
module_path = os.path.abspath(os.path.join("../../.."))
if module_path not in sys.path:
sys.path.append(module_path)
from openbb_terminal import api as openbb
# +
z_dict = {
"IV": "Implied Volatility",
"OI": "Open Interest",
"LP": "Last Price",
}
strings = {
"IV": "DTE: %{x} <br>Strike: %{y} <br>IV: %{z}<extra></extra>",
"OI": "DTE: %{x} <br>Strike: %{y} <br>OI: %{z}<extra></extra>",
"LP": "DTE: %{x} <br>Strike: %{y} <br>LP: %{z}<extra></extra>",
}
def create_layout(fig, width, height, z, ticker):
fig.update_layout(
title=f"{z.upper()} Surface for {ticker.upper()}",
autosize=False,
width=width,
height=height,
scene=dict(
xaxis_title="Days to Expiration",
yaxis_title="Strike",
zaxis_title=z_dict[z],
),
)
# +
class Chart:
def __init__(self):
self.last_ticker = ""
self.df = pd.DataFrame()
def create(self, z, ticker, height, width):
if ticker:
if ticker != self.last_ticker:
self.df = openbb.stocks.options.models.yfinance.get_iv_surface(ticker)
self.last_ticker = ticker
if not self.df.empty:
z_dict = {
"IV": self.df.impliedVolatility,
"OI": self.df.openInterest,
"LP": self.df.lastPrice,
}
fig = go.Figure(
data=[
go.Mesh3d(
z=z_dict[z],
x=self.df.dte,
y=self.df.strike,
intensity=np.sqrt(z_dict[z]),
colorscale="Jet",
hovertemplate=strings[z],
showscale=False,
)
]
)
create_layout(fig, height, width, z, ticker)
if os.environ.get("SERVER_SOFTWARE", "jupyter").startswith("voila"):
fig.show(config={"showTips": False}, renderer="notebook")
else:
fig.show(config={"showTips": False})
opts = ["IV", "OI", "LP"]
z_widget = widgets.Dropdown(options=opts, value="IV", description="Data")
tickers_widget = widgets.Text(description="Ticker", value="TSLA")
h_widget = widgets.IntText(value=1000, description="Height")
w_widget = widgets.IntText(value=1000, description="Width")
controls = widgets.VBox([tickers_widget, z_widget])
size = widgets.VBox([h_widget, w_widget])
controls = widgets.HBox([controls, size])
chart = Chart()
stocks_view = widgets.interactive_output(
chart.create,
{"z": z_widget, "ticker": tickers_widget, "height": h_widget, "width": w_widget},
)
title_html = "<h1>Option Surface Dashboard</h1>"
app_contents = [
widgets.HTML(title_html),
controls,
stocks_view,
]
app = widgets.VBox(app_contents)
display(app)
# -
| openbb_terminal/dashboards/vsurf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finding Scale Card Colours
#
# Use this notebook to find the HSV thresholds for a scale card
# ## Load an image with the scale colours
#
# +
import redpatch as rp
f = rp.FileBrowser()
f.widget()
# -
new_image = rp.load_as_hsv( f.path )
# ## Move the sliders to select the proper region
rp.run_threshold_preview(new_image, width = 5)
# ## Make a note of the HSV values
| redpatch_notebooks/Find Scale Card Filter Settings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# Set some Pandas options
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 25)
# -
# # Plotting and Visualization
#
# There are a handful of third-party Python packages that are suitable for creating scientific plots and visualizations. These include packages like:
#
# * matplotlib
# * Chaco
# * PyX
# * Bokeh
#
# Here, we will focus excelusively on matplotlib and the high-level plotting availabel within pandas. It is currently the most robust and feature-rich package available.
#
# ### Visual representation of data
#
# We require plots, charts and other statistical graphics for the written communication of quantitative ideas.
#
# They allow us to more easily convey relationships and reveal deviations from patterns.
#
# Gelman and Unwin 2011:
#
# > A well-designed graph can display more information than a table of the same size, and more information than numbers embedded in text. Graphical displays allow and encourage direct visual comparisons.
# ## Matplotlib
#
# The easiest way to interact with matplotlib is via `pylab` in iPython. By starting iPython (or iPython notebook) in "pylab mode", both matplotlib and numpy are pre-loaded into the iPython session:
#
# ipython notebook --pylab
#
# You can specify a custom graphical backend (*e.g.* qt, gtk, osx), but iPython generally does a good job of auto-selecting. Now matplotlib is ready to go, and you can access the matplotlib API via `plt`. If you do not start iPython in pylab mode, you can do this manually with the following convention:
#
# import matplotlib.pyplot as plt
plt.plot(np.random.normal(size=100), np.random.normal(size=100), 'ro')
# The above plot simply shows two sets of random numbers taken from a normal distribution plotted against one another. The `'ro'` argument is a shorthand argument telling matplotlib that I wanted the points represented as red circles.
#
# This plot was expedient. We can exercise a little more control by breaking the plotting into a workflow:
with mpl.rc_context(rc={'font.family': 'serif', 'font.weight': 'bold', 'font.size': 8}):
fig = plt.figure(figsize=(6,3))
ax1 = fig.add_subplot(121)
ax1.set_xlabel('some random numbers')
ax1.set_ylabel('more random numbers')
ax1.set_title("Random scatterplot")
plt.plot(np.random.normal(size=100), np.random.normal(size=100), 'r.')
ax2 = fig.add_subplot(122)
plt.hist(np.random.normal(size=100), bins=15)
ax2.set_xlabel('sample')
ax2.set_ylabel('cumulative sum')
ax2.set_title("Normal distrubution")
plt.tight_layout()
plt.savefig("normalvars.png", dpi=150)
# matplotlib is a relatively low-level plotting package, relative to others. It makes very few assumptions about what constitutes good layout (by design), but has a lot of flexiblility to allow the user to completely customize the look of the output.
#
# If you want to make your plots look pretty like mine, steal the *matplotlibrc* file from [<NAME>](http://www.huyng.com/posts/sane-color-scheme-for-matplotlib/).
#
# ## Plotting in Pandas
#
# On the other hand, Pandas includes methods for DataFrame and Series objects that are relatively high-level, and that make reasonable assumptions about how the plot should look.
normals = pd.Series(np.random.normal(size=10))
normals.plot()
# Notice that by default a line plot is drawn, and a light grid is included. All of this can be changed, however:
normals.cumsum().plot(grid=False)
# Similarly, for a DataFrame:
variables = pd.DataFrame({'normal': np.random.normal(size=100),
'gamma': np.random.gamma(1, size=100),
'poisson': np.random.poisson(size=100)})
variables.cumsum(0).plot()
# As an illustration of the high-level nature of Pandas plots, we can split multiple series into subplots with a single argument for `plot`:
variables.cumsum(0).plot(subplots=True)
# Or, we may want to have some series displayed on the secondary y-axis, which can allow for greater detail and less empty space:
variables.cumsum(0).plot(secondary_y='normal')
# If we would like a little more control, we can use matplotlib's `subplots` function directly, and manually assign plots to its axes:
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(12, 4))
for i,var in enumerate(['normal','gamma','poisson']):
variables[var].cumsum(0).plot(ax=axes[i], title=var)
axes[0].set_ylabel('cumulative sum')
# ## Bar plots
#
# Bar plots are useful for displaying and comparing measurable quantities, such as counts or volumes. In Pandas, we just use the `plot` method with a `kind='bar'` argument.
#
# For this series of examples, let's load up the Titanic dataset:
titanic = pd.read_excel("data/titanic.xls", "titanic")
titanic.head()
titanic.groupby('pclass').survived.sum().plot(kind='bar')
titanic.groupby(['sex','pclass']).survived.sum().plot(kind='barh')
death_counts = pd.crosstab([titanic.pclass, titanic.sex], titanic.survived.astype(bool))
death_counts.plot(kind='bar', stacked=True, color=['black','gold'], grid=False)
# Another way of comparing the groups is to look at the survival *rate*, by adjusting for the number of people in each group.
death_counts.div(death_counts.sum(1).astype(float), axis=0).plot(kind='barh', stacked=True, color=['black','gold'])
# ## Histograms
#
# Frequenfly it is useful to look at the *distribution* of data before you analyze it. Histograms are a sort of bar graph that displays relative frequencies of data values; hence, the y-axis is always some measure of frequency. This can either be raw counts of values or scaled proportions.
#
# For example, we might want to see how the fares were distributed aboard the titanic:
titanic.fare.hist(grid=False)
# The `hist` method puts the continuous fare values into **bins**, trying to make a sensible décision about how many bins to use (or equivalently, how wide the bins are). We can override the default value (10):
titanic.fare.hist(bins=30)
# There are algorithms for determining an "optimal" number of bins, each of which varies somehow with the number of observations in the data series.
# +
sturges = lambda n: int(np.log2(n) + 1)
square_root = lambda n: int(np.sqrt(n))
from scipy.stats import kurtosis
doanes = lambda data: int(1 + np.log(len(data)) + np.log(1 + kurtosis(data) * (len(data) / 6.) ** 0.5))
n = len(titanic)
sturges(n), square_root(n), doanes(titanic.fare.dropna())
# -
titanic.fare.hist(bins=doanes(titanic.fare.dropna()))
# A **density plot** is similar to a histogram in that it describes the distribution of the underlying data, but rather than being a pure empirical representation, it is an *estimate* of the underlying "true" distribution. As a result, it is smoothed into a continuous line plot. We create them in Pandas using the `plot` method with `kind='kde'`, where `kde` stands for **kernel density estimate**.
titanic.fare.dropna().plot(kind='kde', xlim=(0,600))
# Often, histograms and density plots are shown together:
titanic.fare.hist(bins=doanes(titanic.fare.dropna()), normed=True, color='lightseagreen')
titanic.fare.dropna().plot(kind='kde', xlim=(0,600), style='r--')
# Here, we had to normalize the histogram (`normed=True`), since the kernel density is normalized by definition (it is a probability distribution).
# We will explore kernel density estimates more in the next section.
# ## Boxplots
#
# A different way of visualizing the distribution of data is the boxplot, which is a display of common quantiles; these are typically the quartiles and the lower and upper 5 percent values.
titanic.boxplot(column='fare', by='pclass', grid=False)
# You can think of the box plot as viewing the distribution from above. The blue crosses are "outlier" points that occur outside the extreme quantiles.
# One way to add additional information to a boxplot is to overlay the actual data; this is generally most suitable with small- or moderate-sized data series.
bp = titanic.boxplot(column='age', by='pclass', grid=False)
for i in [1,2,3]:
y = titanic.age[titanic.pclass==i].dropna()
# Add some random "jitter" to the x-axis
x = np.random.normal(i, 0.04, size=len(y))
plt.plot(x, y, 'r.', alpha=0.2)
# When data are dense, a couple of tricks used above help the visualization:
#
# 1. reducing the alpha level to make the points partially transparent
# 2. adding random "jitter" along the x-axis to avoid overstriking
# A related but inferior cousin of the box plot is the so-called dynamite plot, which is just a bar chart with half of an error bar.
titanic.groupby('pclass')['fare'].mean().plot(kind='bar', yerr=titanic.groupby('pclass')['fare'].std())
# Why is this plot a poor choice?
#
# - bar charts should be used for measurable quantities (*e.g.* raw data), not estimates. The area of the bar does not represent anything, since these are estimates derived from the data.
# - the "data-ink ratio" (*sensu* Edward Tufte) is very high. There are only 6 values represented here (3 means and 3 standard deviations).
# - the plot hides the underlying data.
#
# A boxplot is **always** a better choice than a dynamite plot.
# +
data1 = [150, 155, 175, 200, 245, 255, 395, 300, 305, 320, 375, 400, 420, 430, 440]
data2 = [225, 380]
fake_data = pd.DataFrame([data1, data2]).transpose()
p = fake_data.mean().plot(kind='bar', yerr=fake_data.std(), grid=False)
# -
fake_data = pd.DataFrame([data1, data2]).transpose()
p = fake_data.mean().plot(kind='bar', yerr=fake_data.std(), grid=False)
x1, x2 = p.xaxis.get_majorticklocs()
plt.plot(np.random.normal(x1, 0.01, size=len(data1)), data1, 'ro')
plt.plot([x2]*len(data2), data2, 'ro')
# ### Exercise
#
# Using the Titanic data, create kernel density estimate plots of the age distributions of survivors and victims.
# ## Scatterplots
#
# To look at how Pandas does scatterplots, let's reload the baseball sample dataset.
baseball = pd.read_csv("data/baseball.csv")
baseball.head()
# Scatterplots are useful for data exploration, where we seek to uncover relationships among variables. There are no scatterplot methods for Series or DataFrame objects; we must instead use the matplotlib function `scatter`.
plt.scatter(baseball.ab, baseball.h)
plt.xlim(0, 700); plt.ylim(0, 200)
# We can add additional information to scatterplots by assigning variables to either the size of the symbols or their colors.
plt.scatter(baseball.ab, baseball.h, s=baseball.hr*10, alpha=0.5)
plt.xlim(0, 700); plt.ylim(0, 200)
plt.scatter(baseball.ab, baseball.h, c=baseball.hr, s=40, cmap='hot')
plt.xlim(0, 700); plt.ylim(0, 200);
# To view scatterplots of a large numbers of variables simultaneously, we can use the `scatter_matrix` function that was recently added to Pandas. It generates a matrix of pair-wise scatterplots, optiorally with histograms or kernel density estimates on the diagonal.
_ = pd.scatter_matrix(baseball.loc[:,'r':'sb'], figsize=(12,8), diagonal='kde')
# ## Trellis Plots
#
# One of the enduring strengths of carrying out statistical analyses in the R language is the quality of its graphics. In particular, the addition of [<NAME>'s ggplot2 package](http://ggplot2.org) allows for flexible yet user-friendly generation of publication-quality plots. Its srength is based on its implementation of a powerful model of graphics, called the [Grammar of Graphics](http://vita.had.co.nz/papers/layered-grammar.pdf) (GofG). The GofG is essentially a theory of scientific graphics that allows the components of a graphic to be completely described. ggplot2 uses this description to build the graphic component-wise, by adding various layers.
#
# Pandas recently added functions for generating graphics using a GofG approach. Chiefly, this allows for the easy creation of **trellis plots**, which are a faceted graphic that shows relationships between two variables, conditioned on particular values of other variables. This allows for the representation of more than two dimensions of information without having to resort to 3-D graphics, etc.
#
# Let's use the `titanic` dataset to create a trellis plot that represents 4 variables at a time. This consists of 4 steps:
#
# 1. Create a `RPlot` object that merely relates two variables in the dataset
# 2. Add a grid that will be used to condition the variables by both passenger class and sex
# 3. Add the actual plot that will be used to visualize each comparison
# 4. Draw the visualization
# +
from pandas.tools.rplot import *
titanic = titanic[titanic.age.notnull() & titanic.fare.notnull()]
tp = RPlot(titanic, x='age')
tp.add(TrellisGrid(['pclass', 'sex']))
tp.add(GeomDensity())
_ = tp.render(plt.gcf())
# -
# Using the cervical dystonia dataset, we can simultaneously examine the relationship between age and the primary outcome variable as a function of both the treatment received and the week of the treatment by creating a scatterplot of the data, and fitting a polynomial relationship between `age` and `twstrs`:
cdystonia = pd.read_csv("data/cdystonia.csv", index_col=None)
cdystonia.head()
plt.figure(figsize=(12,12))
bbp = RPlot(cdystonia, x='age', y='twstrs')
bbp.add(TrellisGrid(['week', 'treat']))
bbp.add(GeomScatter())
bbp.add(GeomPolyFit(degree=2))
_ = bbp.render(plt.gcf())
# We can use the `RPlot` class to represent more than just trellis graphics. It is also useful for displaying multiple variables on the same panel, using combinations of color, size and shapes to do so.
cdystonia['site'] = cdystonia.site.astype(float)
plt.figure(figsize=(6,6))
cp = RPlot(cdystonia, x='age', y='twstrs')
cp.add(GeomPoint(colour=ScaleGradient('site', colour1=(1.0, 1.0, 0.5), colour2=(1.0, 0.0, 0.0)),
size=ScaleSize('week', min_size=10.0, max_size=200.0),
shape=ScaleShape('treat')))
_ = cp.render(plt.gcf())
| 3. Plotting and Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#File input / output
#openでファイルを開く
#wは書き込みモード、そのファイルが存在しなければ作成する
#存在する場合はその内容が消去されてしまうので注意
#open関数を呼び出すと、file型のデータが戻り値としてかえってくる
#test_fileという変数でファイルが関連付けられた
test_file = open('test.txt','w')
# -
#file型がもっているwriteというメソッドで書き込める
test_file.write('Hello')
#test_fileという変数を通じてtest.txtを操作しており、その間にホースがあることをイメージしたとする
#そこを文字列などが通り、ファイルに書き込む
#そのホースにデータが残らないようにきれいにする
test_file.flush()
#作業が完了したら、ファイルと切り離す
test_file.close()
#読み込みモードrでファイルの中身を読むことができる
test_file = open('test.txt','r')
#先頭から1行だけ読み込んで返すreadlineメソッド
read_str = test_file.readline()
#書き込む際と異なり、すぐにプログラムに渡されるため、flushは必要ない
test_file.close()
#読み込んだ文字列を表示する
print(read_str)
| chapter6/program6-2.ipynb |