code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.0
# language: julia
# name: julia-1.7
# ---
# # Módulo 1 - Panorâmica Global
# Dados
using CSV # Gravação/leitura de ficheiros em formato CSV
using DataFrames # Tabelas
using StatsPlots
DJ30r = CSV.read("/Users/antonio/Documents/Universidade/GRF-2022/Dados/DJ30r.csv", DataFrame);
DIVr = CSV.read("/Users/antonio/Documents/Universidade/GRF-2022/Dados/DIVr.csv", DataFrame);
Datas = DJ30r[!,:Data];
JPMorgan = DJ30r[!, :JPM]; # JP Morgan
PG = DJ30r[!, :PG]; # Procter & Gamble
Tesla = DIVr[!, :TSLA]; # Tesla
plot(Datas, JPMorgan, legend = false, ylabel = "Retornos diáriosn", framestyle = :box)
plot(Datas, Tesla, legend = false, ylabel = "Retornos diários", framestyle = :box)
| notebooks/Modulo01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/PUC-RecSys-Class/RecSysPUC-2021/blob/master/practicos/HT4Rec.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
# + [markdown] id="ZZujf16p_c2y"
# # H-Transformer for Item Recommendation in MOBA Games
#
# <NAME>
# + [markdown] id="ybR1dWGRAIda"
# ## Dependencies
# + id="BKIBwjERa-dT"
# !pip install python-box
# !wget https://gist.githubusercontent.com/vgaraujov/47ef44430fdbcc95dcb6c87233c3ef92/raw/97c4608ee2b62c77929784e0d07e05ff27b56ee4/drive_download.py
# + id="3FBcRa_eWsTO"
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
# This only needs to be done once per notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + id="PzgP0fnAa-3f"
import drive_download
idx = '19oln5xzNGI50KwO7kIP3HADQOeIXMW-R'
drive_download.drive_download(drive, idx)
# + id="eboJpTf7fSN3"
# !mv drive_download/* .
# + id="gTPvgHwFQJs_"
import time
import os
import logging
import yaml
from timeit import default_timer as timer
## Libraries
import numpy as np
from box import box_from_file
from pathlib import Path
## Torch
import torch
import torch.nn as nn
from torch.utils import data
import torch.optim as optim
## Custom Imports
from logger import setup_logs
from seed import set_seed
from train import train, snapshot
from validation import validation
from dataset import DotaDataset, DataCollatorForDota
from model_aux import HTransformer
import losses
# + [markdown] id="jGJmV8y0BwdZ"
# ## Training Model
# + id="_1OG_Wy_d7Fr"
############ Control Center and Hyperparameter ###############
config = box_from_file(Path('config.yaml'), file_type='yaml')
config.training.logging_dir = '.'
config.dataset.train_data_path = '/content/training_all.pkl'
config.dataset.test_data_path = '/content/testing_all.pkl'
config.dataset.item_path = '/content/item_ids.csv'
config.dataset.champ_path = '/content/hero_names.csv'
# + id="5Ze3ArW1k5xW"
run_name = config.model.model_type + time.strftime("-%Y-%m-%d_%H_%M_%S")
# setup logger
global_timer = timer() # global timer
logger = setup_logs(config.training.logging_dir, run_name) # setup logs
logger.info('### Experiment {} ###'.format(run_name))
logger.info('### Hyperparameter summary below ###\n {}'.format(config))
# define if gpu or cpu
use_cuda = not config.training.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
logger.info('===> use_cuda is {}'.format(use_cuda))
# set seed for reproducibility
set_seed(config.training.seed, use_cuda)
# + id="b5_MPUo3k6m3"
## Loading the dataset
logger.info('===> loading train and validation dataset')
train_dataset = DotaDataset(config, 'train')
validation_dataset = DotaDataset(config, 'test')
data_collator = DataCollatorForDota(max_length = config.dataset.max_seq_length)
multiplier = torch.cuda.device_count() if not config.training.no_cuda else 1
batch_size = int(config.training.batch_size*multiplier)
train_loader = data.DataLoader(train_dataset,
batch_size=batch_size,
collate_fn=data_collator,
drop_last=True
)
validation_loader = data.DataLoader(validation_dataset,
batch_size=batch_size, # batch 1 for evaluate variable length
collate_fn=data_collator,
drop_last=True
)
config.dataset.n_items = len(train_dataset.id2item)
config.dataset.n_champs = len(train_dataset.id2champ)
# + id="K06uuWT-mbAK"
model = HTransformer(config=config)
# move to device
model.to(device)
# Adam optimizer
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=2e-4, betas=(0.9, 0.98), eps=1e-09, weight_decay=1e-4, amsgrad=True)
if config.training.resume_name:
optimizer.load_state_dict(checkpoint['optimizer'])
# create loss function
loss_fn = losses.LossFunction(loss_type=config.model.loss_fn)
model_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info('### Model summary below ###\n {}'.format(str(model)))
logger.info('===> Model total parameter: {}\n'.format(model_params))
# + id="w-RLv8sdmmbo"
best_acc = 0
best_loss = np.inf
best_epoch = -1
step = 0
initial_epoch = 1
logger.info('### Training begins at epoch {} and step {} ###'.format(initial_epoch,step))
for epoch in range(initial_epoch, config.training.epochs + 1):
epoch_timer = timer()
# Train and validate
tr_acc, tr_loss, step = train(
step,
model,
train_loader,
loss_fn,
device,
optimizer,
epoch,
config.training.log_interval)
if not epoch % 10:
val_acc, val_loss = validation(
step,
model,
validation_loader,
loss_fn,
device)
# Save
if val_loss < best_loss:
best_loss = min(val_loss, best_loss)
if torch.cuda.device_count() > 1 and not config.training.no_cuda:
dict_to_save = model.module.state_dict()
else:
dict_to_save = model.state_dict()
snapshot(config.training.logging_dir, run_name, {
'epoch': epoch,
'step_train': step,
'validation_acc': val_acc,
'validation_loss': val_loss,
'state_dict': dict_to_save,
'optimizer': optimizer.state_dict(),
})
best_epoch = epoch
end_epoch_timer = timer()
logger.info("#### End epoch {}/{}, elapsed time: {}".format(epoch, config.training.epochs, end_epoch_timer - epoch_timer))
## end
end_global_timer = timer()
logger.info("################## Success #########################")
logger.info("Total elapsed time: %s" % (end_global_timer - global_timer))
# + [markdown] id="g6vF5SRAEXGN"
# ## Visualization
# + id="7njMbj5v_8Nd"
import numpy as np; np.random.seed(0)
import seaborn as sns
import matplotlib.pyplot as plt
# + id="kUlTVc5SB2EW"
import pandas as pd
def open_champs_info(champs_path):
champs_df = pd.read_csv(champs_path)
champs_df.drop([106, 111], inplace = True)
champs_df.drop(['name'], axis = 1, inplace = True)
champs_df.reset_index(drop=True, inplace=True)
names = champs_df['localized_name'].tolist()
dictionary = {names[i] : v for i, v in enumerate(champs_df['hero_id'].tolist())}
dictionary[0] = 0
reversed_dictionary = {value : key for (key, value) in dictionary.items()}
return dictionary, reversed_dictionary
_, mapping = open_champs_info(config.dataset.champ_path)
# + id="S9-h5i5eO4op"
run_name = 'HTransformerV3-2021-10-30_04_30_23'
logger.info('===> loading a checkpoint')
checkpoint = torch.load('{}/{}-{}'.format(config.training.logging_dir, run_name, 'model_best.pth'))
model.load_state_dict(checkpoint['state_dict'])
# + id="S4sW9jNz_qVy"
validation_loader = data.DataLoader(validation_dataset,
batch_size=1, # batch 1 for evaluate variable length
collate_fn=data_collator,
drop_last=True,
shuffle=True
)
# + id="k4nRAiVn_6q5"
champs, items, target, attn_mask = next(iter(validation_loader))
# + id="zKaI0GLdAKR8"
output, attn_1, attn_2 = model(champs.cuda(), items.cuda())
# + id="yNesJdO-AYVT"
data_1=attn_1.detach().cpu().numpy()
data_2=attn_2.squeeze(0).detach().cpu().numpy()
# + id="Meu8ytkJB48Q"
heros = champs.detach().cpu().squeeze(0).tolist()
name_heros = []
for i in heros:
name_heros.append(mapping[int(i)])
# + id="9Oq4kOllC1U1"
fig,axn = plt.subplots(5, 1, sharex=True, sharey=True, figsize=(10,6))
aux = 2
for i, ax in enumerate(axn.flat):
df = pd.DataFrame(data_1[i+aux], index=name_heros, columns=name_heros)
df.drop(labels=name_heros[1:], axis=0, inplace=True)
# ax.set_title("Step "+str(i+aux))
sns.heatmap(df, ax=ax, cmap="Blues", cbar=True)
plt.xlabel("Heros")
# + id="Y-9UU6SzAnbV"
plt.figure(figsize=(8, 6))
ax = sns.heatmap(data_2, cmap="Blues")
plt.xlabel("Sequence Step")
plt.ylabel("Sequence Step")
| practicos/HT4Rec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### New to Plotly?
# Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
# <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
# <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#
# #### Version Check
# Run `pip install plotly --upgrade` to update your Plotly version
import plotly
plotly.__version__
# ### Basic Histogram ###
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x = np.random.randn(500)
data = [go.Histogram(x=x)]
py.iplot(data, filename='basic histogram')
# -
# ### Normalized Histogram
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x = np.random.randn(500)
data = [go.Histogram(x=x,
histnorm='probability')]
py.iplot(data, filename='normalized histogram')
# -
# ### Horizontal Histogram ###
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
y = np.random.randn(500)
data = [go.Histogram(y=y)]
py.iplot(data, filename='horizontal histogram')
# -
# ### Overlaid Histogram ###
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x0 = np.random.randn(500)
x1 = np.random.randn(500)+1
trace1 = go.Histogram(
x=x0,
opacity=0.75
)
trace2 = go.Histogram(
x=x1,
opacity=0.75
)
data = [trace1, trace2]
layout = go.Layout(barmode='overlay')
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='overlaid histogram')
# -
# ### Stacked Histograms
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x0 = np.random.randn(500)
x1 = np.random.randn(500)
trace0 = go.Histogram(
x=x0
)
trace1 = go.Histogram(
x=x1
)
data = [trace0, trace1]
layout = go.Layout(barmode='stack')
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='stacked histogram')
# -
# ### Styled Histogram
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x0 = np.random.randn(500)
x1 = np.random.randn(500)+1
trace1 = go.Histogram(
x=x0,
histnorm='percent',
name='control',
xbins=dict(
start=-4.0,
end=3.0,
size=0.5
),
marker=dict(
color='#FFD7E9',
),
opacity=0.75
)
trace2 = go.Histogram(
x=x1,
name='experimental',
xbins=dict(
start=-3.0,
end=4,
size=0.5
),
marker=dict(
color='#EB89B5'
),
opacity=0.75
)
data = [trace1, trace2]
layout = go.Layout(
title='Sampled Results',
xaxis=dict(
title='Value'
),
yaxis=dict(
title='Count'
),
bargap=0.2,
bargroupgap=0.1
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='styled histogram')
# -
# ### Cumulative Histogram
# +
import plotly.plotly as py
import plotly.graph_objs as go
import numpy as np
x = np.random.randn(500)
data = [go.Histogram(x=x,
cumulative=dict(enabled=True))]
py.iplot(data, filename='cumulative histogram')
# -
# ### Specify Binning Function
# +
import plotly.plotly as py
import plotly.graph_objs as go
x = ["Apples","Apples","Apples","Oranges", "Bananas"]
y = ["5","10","3","10","5"]
data = [
go.Histogram(
histfunc = "count",
y = y,
x = x,
name = "count"
),
go.Histogram(
histfunc = "sum",
y = y,
x = x,
name = "sum"
)
]
py.iplot(data, filename='binning function')
# -
# ### Custom Binning
# For custom binning along x-axis, use the attribute [`nbinsx`](https://plot.ly/python/reference/#histogram-nbinsx). Please note that the autobin algorithm will choose a 'nice' round bin size that may result in somewhat fewer than `nbinsx` total bins. Alternatively, you can set the exact values for [`xbins`](https://plot.ly/python/reference/#histogram-xbins) along with `autobinx = False`.
# +
from plotly import tools
import plotly.plotly as py
import plotly.graph_objs as go
x = ['1970-01-01', '1970-01-01', '1970-02-01', '1970-04-01', '1970-01-02', '1972-01-31', '1970-02-13', '1971-04-19']
trace0 = go.Histogram(
x=x,
nbinsx = 4,
)
trace1 = go.Histogram(
x=x,
nbinsx = 8,
)
trace2 = go.Histogram(
x=x,
nbinsx = 10,
)
trace3 = go.Histogram(
x=x,
xbins=dict(
start='1969-11-15',
end='1972-03-31',
size= 'M18'),
autobinx = False
)
trace4 = go.Histogram(
x=x,
xbins=dict(
start='1969-11-15',
end='1972-03-31',
size= 'M4'),
autobinx = False
)
trace5 = go.Histogram(
x=x,
xbins=dict(
start='1969-11-15',
end='1972-03-31',
size= 'M2'),
autobinx = False
)
fig = tools.make_subplots(rows=3, cols=2)
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig.append_trace(trace2, 2, 1)
fig.append_trace(trace3, 2, 2)
fig.append_trace(trace4, 3, 1)
fig.append_trace(trace5, 3, 2)
py.iplot(fig, filename='custom binning')
# -
# ### Dash Example
# [Dash](https://plot.ly/products/dash/) is an Open Source Python library which can help you convert plotly figures into a reactive, web-based application. Below is a simple example of a dashboard created using Dash. Its [source code](https://github.com/plotly/simple-example-chart-apps/tree/master/dash-histogramplot) can easily be deployed to a PaaS.
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-histogramplot/", width="100%", height="650px", frameBorder="0")
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-histogramplot/code", width="100%", height=500, frameBorder="0")
# #### Reference
# See https://plot.ly/python/reference/#histogram for more information and chart attribute options!
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/csshref="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# !pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'histograms.ipynb', 'python/histograms/', 'Python Histograms | plotly',
'How to make Histograms in Python with Plotly.',
title = 'Python Histograms | plotly',
name = 'Histograms',
has_thumbnail='true', thumbnail='thumbnail/histogram.jpg',
language='python', page_type='example_index',
display_as='statistical', order=4, redirect_from='/python/histogram-tutorial/',
ipynb= '~notebook_demo/22')
# -
| _posts/python-v3/statistical/histogram/histograms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is a jupyter notebook to get all the data from my shapefiles, group together the objects with the same label and then save the data as png images of a size 10000 x 10000 pix (which correspond to the size of my bounding boxes).
# +
import geopandas as gpd
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from shapely.geometry import Point, Polygon
from matplotlib.collections import PatchCollection
from descartes.patch import PolygonPatch
import os
import numpy as np
# Load the box module from shapely to create box objects
from shapely.geometry import box
## Raster data library
import rasterio
import rasterio.features
import rasterio.warp
from rasterio import plot as rioplot
# to display images inline
get_ipython().magic(u'matplotlib inline')
matplotlib.use('Agg')# not sure what I used it for
# some custom files
from img_helpers import get_all_images_in_folder
# -
# define the region folder
global_path = "D:/allegoria/datasets_alegoria/BD/BD_topo/moselle/BDTOPO_3-0_TOUSTHEMES_SHP_LAMB93_D057_2019-03-19/BDTOPO/1_DONNEES_LIVRAISON_2019-03-00260"
# ## ROADS
# load all the shapely files related to ROADS
fp_road = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/TRANSPORT/ROUTE_NUMEROTEE_OU_NOMMEE.shp"
fp_road_troncon = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/TRANSPORT/TRONCON_DE_ROUTE.shp"
# Read file using gpd.read_file()
data_road = gpd.read_file(fp_road)
data_road_troncon = gpd.read_file(fp_road_troncon)
frames = [data_road, data_road_troncon]
# make a single table with all the roads, not just a signle type
all_roads= pd.concat(frames,ignore_index=True,sort=False)
all_roads.head() # small demo of the roads
# we can plot all the Roads network of cote d'Or
all_roads.plot()
# Data type - geographical projection of he data used.
# check the projection of the data - I need espg 2154, otherwise re-project using geopandas
all_roads.crs
len(all_roads)
# ## Houses
# Load all the data from the BUILDINGS caegory
fp_bati = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/BATI/BATIMENT.shp"
# Read file using gpd.read_file()
buildings1= gpd.read_file(fp_bati)
all_buildings = buildings1 #pd.concat([buildings1, buildings2, buildings3, buildings4],ignore_index=True )
all_buildings.crs
all_buildings.head()
all_buildings.NATURE.unique()
# separate all the buldings depending on the label
churches = all_buildings.NATURE=='Eglise'
churches = all_buildings.loc[(all_buildings['NATURE'] == "Eglise") | (all_buildings['NATURE'] == "Chapelle")]
towers = all_buildings.loc[all_buildings['NATURE'] == "Tour, donjon"]
monuments = all_buildings.loc[all_buildings['NATURE'] == "Monument"]
forts = all_buildings.loc[all_buildings['NATURE'] == 'Fort, blockhaus, casemate']
castels = all_buildings.loc[all_buildings['NATURE'] =='Château']
arcs = all_buildings.loc[all_buildings['NATURE'] =='Arc de triomphe']
ordinary_buildings =all_buildings.loc[(all_buildings['NATURE']=='Indifférenciée') | (all_buildings['NATURE'] == "Industriel, agricole ou commercial")]
print("statisics over all POI objects. There %d churches, %d towers, %d monuments, %d forts, %d castles, %d arcs and % ordinary buildings"
%(len(churches),len(towers),len(monuments), len(forts), len(castels), len(arcs), len(ordinary_buildings)))
churches.plot()
# ## WATER
fp_water = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/HYDROGRAPHIE/COURS_D_EAU.shp"
fp_plan = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/HYDROGRAPHIE/PLAN_D_EAU.shp"
fp_surface = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/HYDROGRAPHIE/SURFACE_HYDROGRAPHIQUE.shp"
data_water = gpd.read_file(fp_water)
data_plan = gpd.read_file(fp_plan)
data_surface = gpd.read_file(fp_surface)
# make a single table with all the roads, not just a signle type
all_water = pd.concat([data_water, data_plan, data_surface],ignore_index=True,sort=False)
all_water.plot()
len(all_water)
data_water.crs
# ## SPORT TERRITORIES
fp_sport = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/BATI/TERRAIN_DE_SPORT.shp"
data_sport = gpd.read_file(fp_sport)
data_sport.plot()
len(data_sport)
# ## CEMETRIES
fp_cemetries = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/BATI/CIMETIERE.shp"
data_cemetries = gpd.read_file(fp_cemetries)
data_cemetries.plot()
# ## GREENERY
fp_greenery = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/OCCUPATION_DU_SOL/ZONE_DE_VEGETATION.shp"
fp_parks = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/ZONES_REGLEMENTEES/PARC_OU_RESERVE.shp"
fp_public_forest = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/ZONES_REGLEMENTEES/FORET_PUBLIQUE.shp"
data_greenery = gpd.read_file(fp_greenery)
data_parks = gpd.read_file(fp_parks)
data_pubforest = gpd.read_file(fp_public_forest)
all_greenery = pd.concat([data_greenery, data_parks, data_pubforest],ignore_index=True,sort=False)
all_greenery.plot()
len(all_greenery )
# ## AERODROMES
#
# +
fp_aero = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/TRANSPORT/AERODROME.shp"
fp_pistes = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/TRANSPORT/PISTE_D_AERODROME.shp"
data_aero = gpd.read_file(fp_aero)
data_pistes = gpd.read_file(fp_pistes)
data_aero = pd.concat([data_aero, data_pistes],ignore_index=True,sort=False)
data_aero.plot()
len(data_aero)
# -
# ## RAILROADS
fp_rail = global_path + "/BDT_3-0_SHP_LAMB93_D057-ED2019-03-19/TRANSPORT/VOIE_FERREE_NOMMEE.shp"
data_rail = gpd.read_file(fp_rail)
data_rail.plot()
len(data_rail)
# ## BOUNDING BOXES FROM THE IMAGES
# Finally, load the bounding boxes from all the jp2 images I have for a department. I actually don't use them later, taking the bounding box from the jp2 meta data, but alternatively they can be used as bounding boxes for vector data.
bb_boxes_path ="D:/allegoria/datasets_alegoria/BD/BD_ortho/mozelle/BDORTHO_2-0_RVB-0M50_JP2-E080_LAMB93_D057_2015-01-01/BDORTHO/5_SUPPLEMENTS_LIVRAISON_2016-02-00008/BDO_RVB_0M50_JP2-E080_LAMB93_D57-2015/dalles.shp"
bb_boxes= gpd.read_file(bb_boxes_path)
bb_boxes.crs
bb_boxes.plot()
# ## IMAGES as JP2
# Load all the jp2 images from the folder and store the absolute path in a list.
folder = 'D:/allegoria/datasets_alegoria/BD/BD_ortho/mozelle/BDORTHO_2-0_RVB-0M50_JP2-E080_LAMB93_D057_2015-01-01/BDORTHO/1_DONNEES_LIVRAISON_2016-02-00008/BDO_RVB_0M50_JP2-E080_LAMB93_D57-2015'
img_type = '.jp2'
image_files = get_all_images_in_folder(folder, img_type)
name = image_files[0][-36:]
print(name)
# ## Save the shape files as .png in correspondence with given images
# The function below works very slow, long to execute but the files are really huge - 10000x10000 pixels.
# I store each shapefile object on a separate canvas, which is then saved as a .png image.
# Attention, this version save the images with a bounding box around them, look at the next script to see how the bounding box can be disabled.
# ## SAVE all LABELS on ONE image
# This script saves the jp2 image as a tif one, and also all the shapefiles in different colors on a separate canvas. The frame around the image is deleted. The final files are of the same size as the original jp2 file.
# +
plt.ioff() # don't plot anything here
import matplotlib as mpl
mpl.rcParams['savefig.pad_inches'] = 0
save_path = 'D:/allegoria/topo_ortho/mozelle/lbl_png/'
my_dpi=300
for i in range(1): # range - number of images len(image_files)
name = image_files[i][-36:]
print(name)
with rasterio.open(image_files[i]) as dataset:
# Read the dataset's valid data mask as a ndarray.
mask = dataset.dataset_mask()
# Extract feature shapes and values from the array.
for geom, val in rasterio.features.shapes(
mask, transform=dataset.transform):
# Transform shapes from the dataset's own coordinate
# reference system to CRS84 (EPSG:4326).
geom = rasterio.warp.transform_geom(
dataset.crs, 'epsg:2154', geom, precision=6)
# Print GeoJSON shapes to stdout.
print(geom)
raster = dataset.read()
# some setup
bb_box = geom['coordinates']
polygon_bbox = Polygon(bb_box[0])
# bounding boxes coordinates
coordinates = bb_box[0]
x_width, y_width = (coordinates[2][0]-coordinates[0][0])*2, (coordinates[0][1]-coordinates[1][1])*2
print(x_width, y_width)
# save the image
fig, ax = plt.subplots(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)))
rasterio.plot.show(raster, ax=ax)
plt.axis('off')
fig.tight_layout()
plt.autoscale(False)
plt.savefig('D:/allegoria/topo_ortho/mozelle/img_tif/'+name[0:-4]+'.tif', type="tif", dpi= my_dpi*10,
bbox_inches="tight", pad_inches=0)
plt.clf()
plt.close('all')
# from now on all the shape data
# shapefiles
sg_roads = all_roads[all_roads.geometry.within(polygon_bbox)] #extract segments of roads
sg_houses = ordinary_buildings[ordinary_buildings.geometry.within(polygon_bbox)] #extract segments of ordinary buildings
sg_churches = churches[churches.geometry.within(polygon_bbox)] #churches
sg_towers = towers[towers.geometry.within(polygon_bbox)] #towers
sg_monuments = monuments[monuments.geometry.within(polygon_bbox)] #monuments
sg_forts = monuments[monuments.geometry.within(polygon_bbox)] #forts
sg_castels = castels[castels.geometry.within(polygon_bbox)] # chateux
sg_arcs =arcs[arcs.geometry.within(polygon_bbox)] # arcs
sg_water = all_water[all_water.geometry.within(polygon_bbox)] #extract segments of water
sg_sport = data_sport[data_sport.geometry.within(polygon_bbox)] #extract segments of sport things
sg_cemetries = data_cemetries[data_cemetries.geometry.within(polygon_bbox)] # cemetries
sg_aero = data_aero[data_aero.geometry.within(polygon_bbox)] # aeroports
sg_railroads = data_rail[data_rail.geometry.within(polygon_bbox)] # railroads
sg_greenery = all_greenery[all_greenery.geometry.within(polygon_bbox)] # forests
#plotting & saving
name_wpath = save_path + name[0:-4] + '.png'
fig = plt.figure()
plt.margins(0)
fig = sg_greenery.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color='#00FF00')
sg_water.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#0000FF', ax=fig)
sg_roads.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)),linewidth=0.21, edgecolor='#FFA500', ax = fig)
sg_sport.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#8A2BE2', ax = fig)
sg_houses.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#FF0000', ax = fig)
sg_churches.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#FFFF00', ax = fig)
sg_towers.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ="#A52A2A", ax = fig)
sg_monuments.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#F5F5DC', ax = fig)
sg_forts.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color='#808080', ax = fig)
sg_castels.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color='#000000', ax = fig)
sg_arcs.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color='#C2B280', ax = fig)
sg_cemetries.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#4B0082', ax = fig)
sg_aero.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color ='#5F021F', ax = fig)
sg_railroads.plot(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)), color = '#FF00FF', ax = fig)
plt.autoscale(False)
plt.axis('off')
ax.set_xlim([coordinates[0][0],coordinates[2][0]])
ax.set_ylim([coordinates[1][1],coordinates[0][1]])
plt.autoscale(False)
plt.savefig(name_wpath, type="png", dpi= float(my_dpi) * 10, bbox_inches="tight", pad_inches=0)
dataset.close()
plt.clf()
plt.close('all')
# -
# ## TEST VISUALIZATION
# Just an example of how to plot vector data on a raster image using rasterio and matplotlib.
# +
from matplotlib import pyplot
get_ipython().magic(u'matplotlib inline')
matplotlib.rc('xtick', labelsize=5)
matplotlib.rc('ytick', labelsize=5)
my_dpi=300
i = 1
with rasterio.open(image_files[0]) as dataset: #an image name as an input
# Read the dataset's valid data mask as a ndarray.
mask = dataset.dataset_mask()
# Extract feature shapes and values from the array.
for geom, val in rasterio.features.shapes(
mask, transform=dataset.transform):
# Transform shapes from the dataset's own coordinate
# reference system to CRS84 (EPSG:4326).
geom = rasterio.warp.transform_geom(
dataset.crs, 'epsg:2154', geom, precision=6)
# Print GeoJSON shapes to stdout.
print(geom)
raster = rasterio.open(image_files[0])
name = image_files[i][-36:]
bb_box = geom['coordinates']
polygon_bbox = Polygon(bb_box[0])
coordinates = bb_box[0]
x_width, y_width = (coordinates[2][0]-coordinates[0][0])*2, (coordinates[0][1]-coordinates[1][1])*2
# shapefiles
sg_roads = all_roads[all_roads.geometry.within(polygon_bbox)] #extract segments of roads
sg_houses = ordinary_buildings[ordinary_buildings.geometry.within(polygon_bbox)] #extract segments of ordinary buildings
sg_churches = churches[churches.geometry.within(polygon_bbox)] #churches
sg_towers = towers[towers.geometry.within(polygon_bbox)] #towers
sg_monuments = monuments[monuments.geometry.within(polygon_bbox)] #monuments
sg_forts = monuments[monuments.geometry.within(polygon_bbox)] #forts
sg_castels = castels[castels.geometry.within(polygon_bbox)] # chateux
sg_arcs =arcs[arcs.geometry.within(polygon_bbox)] # arcs
sg_water = all_water[all_water.geometry.within(polygon_bbox)] #extract segments of water
sg_sport = data_sport[data_sport.geometry.within(polygon_bbox)] #extract segments of sport things
sg_cemetries = data_cemetries[data_cemetries.geometry.within(polygon_bbox)] # cemetries
sg_aero = data_aero[data_aero.geometry.within(polygon_bbox)] # aeroports
sg_railroads = data_rail[data_rail.geometry.within(polygon_bbox)] # railroads
sg_greenery = all_greenery[all_greenery.geometry.within(polygon_bbox)] # forests
fig, ax = plt.subplots(figsize=(x_width/float(10*my_dpi), y_width/float(10*my_dpi)))
rasterio.plot.show(raster, ax=ax)
sg_greenery.plot(color='#00FF00', ax = ax)
sg_water.plot(color ='#0000FF', ax=ax)
sg_roads.plot(linewidth=0.21, edgecolor='#FFA500', ax = ax)
sg_sport.plot (color ='#8A2BE2', ax = ax)
sg_houses.plot(color ='#FF0000', ax = ax)
sg_churches.plot(color ='#FFFF00', ax = ax)
sg_towers.plot(color ="#A52A2A", ax = ax)
sg_monuments.plot(color ='#F5F5DC', ax = ax)
sg_forts.plot(color='#808080', ax = ax)
sg_castels.plot(color='#000000', ax = ax)
sg_arcs.plot(color='#C2B280', ax = ax)
sg_cemetries.plot(color ='#4B0082', ax = ax)
sg_aero.plot(color ='#5F021F', ax = ax)
sg_railroads.plot(color = '#FF00FF', ax = ax)
plt.margins(2.0)
plt.autoscale(False)
plt.savefig("D:/allegoria/topo_ortho/mozelle/example.png", type="png", dpi= float(my_dpi) * 10) # save the resulting figure
plt.show()
# -
# ## image Patching and saving
# +
## An attempt to cut all the images right away given the coordinates
import csv
import os
from PIL import Image
from matplotlib import transforms
resolution = 1000 # the resolution in geo coordinates, the real pixel size will be Resolution X 2
# +
my_dpi=300
for i in range(1): # range - number of images len(image_files)
name = image_files[i][-36:]
print(name)
with rasterio.open(image_files[i]) as dataset:
# Read the dataset's valid data mask as a ndarray.
mask = dataset.dataset_mask()
# Extract feature shapes and values from the array.
for geom, val in rasterio.features.shapes(
mask, transform=dataset.transform):
# Transform shapes from the dataset's own coordinate
# reference system to CRS84 (EPSG:4326).
geom = rasterio.warp.transform_geom(
dataset.crs, 'epsg:2154', geom, precision=0)
bb_box = geom['coordinates'] # GeoJSON shapes to stdout.
polygon_bbox = Polygon(bb_box[0])
polygon_bbox.bounds # image geo bounds
polygons = return_polygons(image_bounds=polygon_bbox.bounds, resolution = (1000,1000)) # cut image into patches, the geo res is used
# create a directory where the patches will be stored
try:
os.mkdir("D:/allegoria/topo_ortho/ING_processed_margo/moselle/" + name[:-4])
except:
print("already exists!")
pd_poly = pd.DataFrame(polygons)
pd_poly.to_csv("D:/allegoria/topo_ortho/ING_processed_margo/moselle/" + name[:-4] +"/geo_polygons.csv")
for count, polygon_patch in enumerate(polygons):
# get rastr patch and save
out_image, _ = rasterio.mask.mask(raster, [polygon_patch], crop=True)
new_im = Image.fromarray(np.swapaxes(out_image,0,2))
new_im.save("D:/allegoria/topo_ortho/ING_processed_margo/moselle/"
+ name[:-4] +"/"+str(count).zfill(4)+"_img.png")
# get vector data pixelized and save
# shapefiles
sg_roads = all_roads[all_roads.geometry.within(polygon_patch)] #extract segments of roads
sg_houses = ordinary_buildings[ordinary_buildings.geometry.within(polygon_patch)] #extract segments of ordinary buildings
sg_churches = churches[churches.geometry.within(polygon_patch)] #churches
sg_towers = towers[towers.geometry.within(polygon_patch)] #towers
sg_monuments = monuments[monuments.geometry.within(polygon_patch)] #monuments
sg_forts = monuments[monuments.geometry.within(polygon_patch)] #forts
sg_castels = castels[castels.geometry.within(polygon_patch)] # chateux
sg_arcs =arcs[arcs.geometry.within(polygon_patch)] # arcs
sg_water = all_water[all_water.geometry.within(polygon_patch)] #extract segments of water
sg_sport = data_sport[data_sport.geometry.within(polygon_patch)] #extract segments of sport things
sg_cemetries = data_cemetries[data_cemetries.geometry.within(polygon_patch)] # cemetries
sg_aero = data_aero[data_aero.geometry.within(polygon_patch)] # aeroports
sg_railroads = data_rail[data_rail.geometry.within(polygon_patch)] # railroads
sg_greenery = all_greenery[all_greenery.geometry.within(polygon_patch)] # forests
# now get them as image
fig, ax = plt.subplots(figsize=(20.0, 20.0), dpi=100) # resolution is fixed for 2000
sg_greenery.plot(color='#00FF00', ax = ax)
sg_water.plot(color ='#0000FF', ax=ax)
sg_roads.plot(linewidth=4.0, edgecolor='#FFA500', color ='#FFA500' , ax = ax)
sg_sport.plot (color ='#8A2BE2', ax = ax)
sg_houses.plot(color ='#FF0000', ax = ax)
sg_churches.plot(color ='#FFFF00', ax = ax)
sg_towers.plot(color ="#A52A2A", ax = ax)
sg_monuments.plot(color ='#F5F5DC', ax = ax)
sg_forts.plot(color='#808080', ax = ax)
sg_castels.plot(color='#000000', ax = ax)
sg_arcs.plot(color='#C2B280', ax = ax)
sg_cemetries.plot(color ='#4B0082', ax = ax)
sg_aero.plot(color ='#5F021F', ax = ax)
sg_railroads.plot(color = '#FF00FF', ax = ax)
ax.set_xlim([polygon_patch.bounds[0],polygon_patch.bounds[2]])
ax.set_ylim([polygon_patch.bounds[3],polygon_patch.bounds[1]])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axis('off')
plt.subplots_adjust(left=0., right=1., top=1., bottom=0.)
plt.savefig("D:/allegoria/topo_ortho/ING_processed_margo/moselle/" + name[:-4] +"/"+ str(count).zfill(4) +"_lbl.png", dpi= 100, bbox_inches='tight', pad_inches=0) # save the resulting figure
# +
# get vector data pixelized and save
out_image, _ = rasterio.mask.mask(raster, [polygon_patch], crop=True)
new_im = Image.fromarray(np.swapaxes(out_image,0,2))
new_im.save("D:/allegoria/topo_ortho/ING_processed_margo/moselle/"
+ name[:-4] +"/"+str(count).zfill(4)+"_img.png")
# shapefiles
sg_roads = all_roads[all_roads.geometry.within(polygon_patch)] #extract segments of roads
sg_houses = ordinary_buildings[ordinary_buildings.geometry.within(polygon_patch)] #extract segments of ordinary buildings
sg_churches = churches[churches.geometry.within(polygon_patch)] #churches
sg_towers = towers[towers.geometry.within(polygon_patch)] #towers
sg_monuments = monuments[monuments.geometry.within(polygon_patch)] #monuments
sg_forts = monuments[monuments.geometry.within(polygon_patch)] #forts
sg_castels = castels[castels.geometry.within(polygon_patch)] # chateux
sg_arcs =arcs[arcs.geometry.within(polygon_patch)] # arcs
sg_water = all_water[all_water.geometry.within(polygon_patch)] #extract segments of water
sg_sport = data_sport[data_sport.geometry.within(polygon_patch)] #extract segments of sport things
sg_cemetries = data_cemetries[data_cemetries.geometry.within(polygon_patch)] # cemetries
sg_aero = data_aero[data_aero.geometry.within(polygon_patch)] # aeroports
sg_railroads = data_rail[data_rail.geometry.within(polygon_patch)] # railroads
sg_greenery = all_greenery[all_greenery.geometry.within(polygon_patch)] # forests
# now get them as image
fig, ax = plt.subplots(figsize=(20.0, 20.0), dpi=100)
#ax.imshow(new_im)
sg_greenery.plot(color='#00FF00', ax = ax)
sg_water.plot(color ='#0000FF', ax=ax)
sg_roads.plot(linewidth=0.51, edgecolor='#FFA500', ax = ax)
sg_sport.plot (color ='#8A2BE2', ax = ax)
sg_houses.plot(color ='#FF0000', ax = ax)
sg_churches.plot(color ='#FFFF00', ax = ax)
sg_towers.plot(color ="#A52A2A", ax = ax)
sg_monuments.plot(color ='#F5F5DC', ax = ax)
sg_forts.plot(color='#808080', ax = ax)
sg_castels.plot(color='#000000', ax = ax)
sg_arcs.plot(color='#C2B280', ax = ax)
sg_cemetries.plot(color ='#4B0082', ax = ax)
sg_aero.plot(color ='#5F021F', ax = ax)
sg_railroads.plot(color = '#FF00FF', ax = ax)
ax.set_xlim([polygon_patch.bounds[0],polygon_patch.bounds[2]])
ax.set_ylim([polygon_patch.bounds[3],polygon_patch.bounds[1]])
ax.set_xticklabels([])
ax.set_yticklabels([])
#ax.axis('off')
plt.subplots_adjust(left=0., right=1., top=1., bottom=0.)
plt.savefig("D:/allegoria/topo_ortho/ING_processed_margo/moselle/" + name[:-4] +"/"+ str(count).zfill(4) +"_lbl.png", dpi= 100, bbox_inches='tight', pad_inches=0) # save the resulting figure
# -
print(polygon_patch.bounds)
print(out_transform)
print(len(polygons))
import rasterio
import rasterio.mask
out_image, out_transform = rasterio.mask.mask(raster, [polygons[0]],
crop=True)
print(out_image.shape)
plt.imshow(np.swapaxes(out_image,0,2))
print(polygons[0])
def return_polygons(image_bounds, resolution = (1000,1000)):
list_polygons = []
index = 0
for height in range(int(image_bounds[0]), int(image_bounds[2]), resolution[0]):
for width in range(int(image_bounds[1]),int(image_bounds[3]), resolution[1]):
poly = Polygon([[height, width], [height ,width+resolution[1]], [height+resolution[0], width+resolution[1]], [height+resolution[0],width]])
list_polygons.append(poly)
return list_polygons
for i in range(905000, 910000, 1000):
print(i)
print(len(polygons))
| data_parser_test_alignment-2019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
idx = pd.IndexSlice
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.markers as markers
import matplotlib.axes as axes
# #%matplotlib notebook
# %matplotlib inline
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
#plt.rcParams['axes.labelsize'] = 15
plt.rcParams["font.family"] = 'serif'
plt.rcParams["figure.figsize"] = [15,5]
#plt.rcParams('font',**{'family':'serif','serif':['Times']})
df = pd.read_pickle('fata.pkl')
df.plot()
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Titanic Kaggle Competition Solution
# ### Importing the Training and Test data from CSV files
# +
from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
# %matplotlib inline
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/Project Midas/Competition'):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv('../Competition/train.csv')
test = pd.read_csv('../Competition/test.csv')
IDtest = test["PassengerId"]
# -
# ### Detecting Outliers in the Age, SibSp, Parch, Fare Features
# +
def detect_outliers(df,n,features):
"""
Takes a dataframe df of features and returns a list of the indices
corresponding to the observations containing more than n outliers according
to the Tukey method.
"""
outlier_indices = []
# iterate over features(columns)
for col in features:
# 1st quartile (25%)
Q1 = np.percentile(df[col], 25)
# 3rd quartile (75%)
Q3 = np.percentile(df[col],75)
# Interquartile range (IQR)
IQR = Q3 - Q1
# outlier step
outlier_step = 1.5 * IQR
# Determine a list of indices of outliers for feature col
outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index
# append the found outlier indices for col to the list of outlier indices
outlier_indices.extend(outlier_list_col)
# select observations containing more than 2 outliers
outlier_indices = Counter(outlier_indices)
multiple_outliers = list( k for k, v in outlier_indices.items() if v > n )
return multiple_outliers
# detect outliers from Age, SibSp , Parch and Fare
Outliers_to_drop = detect_outliers(train,2,["Age","SibSp","Parch","Fare"])
train.loc[Outliers_to_drop] # Show the outliers rows
# -
# Drop outliers
train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)
# ### Combine Dataset to apply feature engineering techniques evenly on the datasets
## Join train and test datasets in order to obtain the same number of features during categorical conversion
train_len = len(train)
dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)
# ### Create New feature 'Title' from 'Name' and group them under 4 major titles - Mr, Mrs, Miss, Master
# +
#Create Title from Name feature
def create_title(data):
data["Title"] = data["Name"].map(lambda x:x.split(',')[1].split('.')[0].strip())
return data
dataset = create_title(dataset)
#replacing all titles with mr, mrs, miss, master
def replace_titles(x):
title=x['Title']
if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir']:
return 'Mr'
elif title in ['the Countess', 'Mme', 'Lady']:
return 'Mrs'
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='male':
return 'Mr'
else:
return 'Mrs'
else:
return title
dataset['Title']=dataset.apply(replace_titles, axis=1)
# -
# ### Transform Feature "Sex" as Male: '1' & Female: '0'
# +
SX = LabelEncoder()
dataset['Sex'] = SX.fit_transform(dataset.Sex)
# -
# ### Dropping Name feature from the Feature set
#Dropping Name column
dataset = dataset.drop(['Name'], axis =1)
# ### Filling the missing Embarked feature
#Countplot of Passenger by Port of Embarkation by class
g = sns.countplot(x="Embarked", hue = "Pclass", data=dataset)
#Fill the missing Port of Embarkation with Mode Function
dataset["Embarked"].fillna(dataset["Embarked"].mode()[0],inplace=True)
# ### Filling the Missing Age values
# +
# Filling missing value of Age
# Fill Age with the median age of similar rows according to Title
# Index of NaN age rows
index_NaN_age = list(dataset["Age"][dataset["Age"].isnull()].index)
for i in index_NaN_age :
age_med = dataset["Age"].median()
age_pred = dataset["Age"][(dataset['Title'] == dataset.iloc[i]["Title"])].median()
if not np.isnan(age_pred) :
dataset['Age'].iloc[i] = age_pred
else :
dataset['Age'].iloc[i] = age_med
# -
#Fill the missing Fare with Median value
dataset["Fare"].fillna(dataset.groupby("Pclass")["Fare"].transform("median"),inplace=True)
# ### Feature Engineering - Creating New Features
# +
# Create Deck Feature from Cabin
dataset["Deck"] = dataset["Cabin"].str[0]
# Filling missing value of Deck
# Fill Deck with the median value of similar rows according to Pclass
# Index of NaN deck rows
index_NaN_deck = list(dataset["Deck"][dataset["Deck"].isnull()].index)
for i in index_NaN_deck :
deck_med = dataset["Deck"].mode()[0]
deck_pred = dataset["Deck"][(dataset['Pclass'] == dataset.iloc[i]["Pclass"])].mode()[0]
if not np.isnan(age_pred) :
dataset['Deck'].iloc[i] = deck_pred
else :
dataset['Deck'].iloc[i] = deck_med
# +
# Creating new features from Deck Column
dataset['Deck_A'] = dataset['Deck'].map(lambda s: 1 if s == 'A' else 0)
dataset['Deck_B'] = dataset['Deck'].map(lambda s: 1 if s == 'B' else 0)
dataset['Deck_C'] = dataset['Deck'].map(lambda s: 1 if s == 'C' else 0)
dataset['Deck_D'] = dataset['Deck'].map(lambda s: 1 if s == 'D' else 0)
dataset['Deck_E'] = dataset['Deck'].map(lambda s: 1 if s == 'E' else 0)
dataset['Deck_F'] = dataset['Deck'].map(lambda s: 1 if s == 'F' else 0)
dataset['Deck_G'] = dataset['Deck'].map(lambda s: 1 if s == 'G' else 0)
dataset['Deck_X'] = dataset['Deck'].map(lambda s: 1 if s == 'X' else 0)
#Dropping Fare feature
dataset = dataset.drop(['Deck'], axis =1)
# +
# Create family size feature from SibSp and Parch
dataset["Fsize"] = dataset["SibSp"] + dataset["Parch"]
#Dropping Fare feature
#dataset = dataset.drop(['SibSp'], axis =1)
#dataset = dataset.drop(['Parch'], axis =1)
# -
# Create new feature of family size
dataset['Single'] = dataset['Fsize'].map(lambda s: 1 if s == 1 else 0)
dataset['SmallF'] = dataset['Fsize'].map(lambda s: 1 if s == 2 else 0)
dataset['MedF'] = dataset['Fsize'].map(lambda s: 1 if 3 <= s <= 4 else 0)
dataset['LargeF'] = dataset['Fsize'].map(lambda s: 1 if s >= 5 else 0)
# +
# Create New Feature - Gender & Class
dataset['GClass'] = dataset['Sex'].map(lambda s: 1 if s == 0 else 0) * (1/dataset['Pclass'])
# +
# Create New Feature - Age & Gender
dataset['GenderAge'] = dataset['Sex'].map(lambda s: 1 if s == 0 else 0) * (1/dataset['Age'])
# +
# Create new features - First, Second & Third Class off of PClass
dataset['First'] = dataset['Pclass'].map(lambda s: 1 if s == 1 else 0)
dataset['Second'] = dataset['Pclass'].map(lambda s: 1 if s == 2 else 0)
dataset['Third'] = dataset['Pclass'].map(lambda s: 1 if s >= 3 else 0)
#Dropping Pclass column
dataset = dataset.drop(['Pclass'], axis =1)
# +
# Create new features - Fare Ranges off of Fare Feature
dataset['FreeTicket'] = dataset['Fare'].map(lambda s: 1 if s == 0 else 0)
dataset['Lowest_Fare'] = dataset['Fare'].map(lambda s: 1 if (s >= -2 and s < 10) else 0)
dataset['Low_Fare'] = dataset['Fare'].map(lambda s: 1 if (s >= 10 and s < 25) else 0)
dataset['Medium_Fare'] = dataset['Fare'].map(lambda s: 1 if (s >= 25 and s < 35) else 0)
dataset['MHigh_Fare'] = dataset['Fare'].map(lambda s: 1 if (s >= 35 and s < 100) else 0)
dataset['High_Fare'] = dataset['Fare'].map(lambda s: 1 if (s >= 100 and s < 300) else 0)
dataset['Highest_Fare'] = dataset['Fare'].map(lambda s: 1 if s >= 300 else 0)
#Dropping Fare feature
dataset = dataset.drop(['Fare'], axis =1)
# +
# Create new features - Age bands off of Age Feature
dataset['Infant'] = dataset['Age'].map(lambda s: 1 if (s >= 0 and s < 4) else 0)
dataset['Toddler'] = dataset['Age'].map(lambda s: 1 if (s >= 4 and s < 12) else 0)
dataset['Teens'] = dataset['Age'].map(lambda s: 1 if (s >= 12 and s < 18) else 0)
dataset['Young Adult'] = dataset['Age'].map(lambda s: 1 if (s >= 18 and s < 25) else 0)
dataset['Adult'] = dataset['Age'].map(lambda s: 1 if (s >= 25 and s < 35) else 0)
dataset['Adult+'] = dataset['Age'].map(lambda s: 1 if (s >= 35 and s < 45) else 0)
dataset['Middle_Aged'] = dataset['Age'].map(lambda s: 1 if (s >= 45 and s < 60) else 0)
dataset['Seniors'] = dataset['Age'].map(lambda s: 1 if (s >= 60 and s < 70) else 0)
dataset['Seniors+'] = dataset['Age'].map(lambda s: 1 if (s >= 70) else 0)
#Dropping Age Feature
dataset = dataset.drop(['Age'], axis =1)
# +
# Create new features based on port of Embarkation
dataset['Em_C'] = dataset['Embarked'].map(lambda s: 1 if s == 'C' else 0)
dataset['Em_Q'] = dataset['Embarked'].map(lambda s: 1 if s == 'Q' else 0)
dataset['Em_S'] = dataset['Embarked'].map(lambda s: 1 if s == 'S' else 0)
#Dropping Embarked Column
dataset = dataset.drop(['Embarked'], axis =1)
# +
# Create new features based on Title
dataset['Mr'] = dataset['Title'].map(lambda s: 1 if s == 'Mr' else 0)
dataset['Mrs'] = dataset['Title'].map(lambda s: 1 if s == 'Mrs' else 0)
dataset['Miss'] = dataset['Title'].map(lambda s: 1 if s == 'Miss' else 0)
dataset['Master'] = dataset['Title'].map(lambda s: 1 if s == 'Master' else 0)
#Dropping Embarked Column
dataset = dataset.drop(['Title'], axis =1)
# -
#Dropping Ticket column
dataset = dataset.drop(['Ticket'], axis =1)
#Dropping Cabin column
dataset = dataset.drop(['Cabin'], axis =1)
# Dropping Passenger Id
dataset = dataset.drop(['PassengerId'], axis =1)
## Separate out train and test data from dataset
train = dataset[:train_len]
test = dataset[train_len:]
test.drop(labels=["Survived"],axis = 1,inplace=True)
# +
#Separate X_train & y_train from train dataframe
y_train = train["Survived"].astype(int)
X_train = train.drop(labels = ["Survived"],axis = 1)
# -
# Cross validate model with Kfold stratified cross val
kfold = StratifiedKFold(n_splits=30)
# +
# Modeling step Test differents algorithms
random_state = 2
classifiers = []
classifiers.append(SVC(random_state=random_state))
classifiers.append(DecisionTreeClassifier(random_state=random_state))
classifiers.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state),random_state=random_state,learning_rate=0.1))
classifiers.append(RandomForestClassifier(random_state=random_state))
classifiers.append(ExtraTreesClassifier(random_state=random_state))
classifiers.append(GradientBoostingClassifier(random_state=random_state))
classifiers.append(MLPClassifier(random_state=random_state))
classifiers.append(KNeighborsClassifier())
classifiers.append(LogisticRegression(random_state = random_state))
classifiers.append(LinearDiscriminantAnalysis())
cv_results = []
for classifier in classifiers :
cv_results.append(cross_val_score(classifier, X_train, y = y_train, scoring = "accuracy", cv = kfold, n_jobs=6))
cv_means = []
cv_std = []
for cv_result in cv_results:
cv_means.append(cv_result.mean())
cv_std.append(cv_result.std())
cv_res = pd.DataFrame({"CrossValMeans":cv_means,"CrossValerrors": cv_std,"Algorithm":["SVC","DecisionTree","AdaBoost",
"RandomForest","ExtraTrees","GradientBoosting","MultipleLayerPerceptron","KNeighboors","LogisticRegression","LinearDiscriminantAnalysis"]})
g = sns.barplot("CrossValMeans","Algorithm",data = cv_res, palette="Set3",orient = "h",**{'xerr':cv_std})
g.set_xlabel("Mean Accuracy")
g = g.set_title("Cross validation scores")
# +
### META MODELING WITH ADABOOST, RF, SVC, EXTRATREES and GRADIENTBOOSTING
# Adaboost
DTC = DecisionTreeClassifier()
adaDTC = AdaBoostClassifier(DTC, random_state=7)
ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"algorithm" : ["SAMME","SAMME.R"],
"n_estimators" :[1,2],
"learning_rate": [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3,1.5]}
gsadaDTC = GridSearchCV(adaDTC,param_grid = ada_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = 1)
gsadaDTC.fit(X_train,y_train)
ada_best = gsadaDTC.best_estimator_
# Best score
gsadaDTC.best_score_
# +
#ExtraTrees
ExtC = ExtraTreesClassifier()
## Search grid for optimal parameters
ex_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsExtC = GridSearchCV(ExtC,param_grid = ex_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = 1)
gsExtC.fit(X_train,y_train)
ExtC_best = gsExtC.best_estimator_
# Best score
gsExtC.best_score_
# +
# RFC Parameters tunning
RFC = RandomForestClassifier()
## Search grid for optimal parameters
rf_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsRFC = GridSearchCV(RFC,param_grid = rf_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = 1)
gsRFC.fit(X_train,y_train)
RFC_best = gsRFC.best_estimator_
# Best score
gsRFC.best_score_
# +
# Gradient boosting tunning
GBC = GradientBoostingClassifier()
gb_param_grid = {'loss' : ["deviance"],
'n_estimators' : [100,200,300],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [4, 8],
'min_samples_leaf': [100,150],
'max_features': [0.3, 0.1]
}
gsGBC = GridSearchCV(GBC,param_grid = gb_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = 1)
gsGBC.fit(X_train,y_train)
GBC_best = gsGBC.best_estimator_
# Best score
gsGBC.best_score_
# +
### SVC classifier
SVMC = SVC(probability=True)
svc_param_grid = {'kernel': ['rbf'],
'gamma': [ 0.001, 0.01, 0.1, 1],
'C': [1, 10, 50, 100,200,300, 1000]}
gsSVMC = GridSearchCV(SVMC,param_grid = svc_param_grid, cv=kfold, scoring="accuracy", n_jobs= -1, verbose = 1)
gsSVMC.fit(X_train,y_train)
SVMC_best = gsSVMC.best_estimator_
# Best score
gsSVMC.best_score_
# +
votingC = VotingClassifier(estimators=[('rfc', RFC_best), ('extc', ExtC_best),
('svc', SVMC_best), ('adac',ada_best),('gbc',GBC_best)], voting='soft', n_jobs=-1)
votingC = votingC.fit(X_train, y_train)
# +
test_Survived = pd.Series(votingC.predict(test), name="Survived")
results = pd.concat([IDtest,test_Survived],axis=1)
results.to_csv("ensemble_python_voting.csv",index=False)
# -
# ##### References
#
# https://www.kaggle.com/yassineghouzam/titanic-top-4-with-ensemble-modeling
# https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/
# https://github.com/ishanbhandari-19/Titanic-Challenge/blob/master/PreProcessing_and_Feature_Engineering.ipynb
# https://www.kaggle.com/soham1024/titanic-data-science-eda-solutions
dataset.info()
| Titanic/Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
inFile = '../input/mAP_5_95.txt'
import matplotlib.pyplot as plt
import json
# Load Data
data = []
with open(inFile) as f:
for line in f:
j_content = json.loads(line)
data.append(j_content)
mAP = [x['mAP'] for x in data]
models = set([x['model'] for x in data])
iou = [x['iouThresh'] for x in data]
# +
fig, (ax1) = plt.subplots(ncols=1, figsize=(10,10))
for model in models:
mAP = [x['mAP'] for x in data if x['model'] == model ]
iou = [x['iouThresh'] for x in data if x['model']== model]
ax1.plot(iou, mAP)
ax1.legend(models)
ax1.set_ylabel("mAP")
ax1.set_xlabel("IOU Threshold")
ax1.set_title("Mean Average Precision values at increasing IOU Thresholds for 4 different models")
# ax1.set_ylim(0, 0.01)
plt.show()
# -
| InteractiveTools/mAP5-95.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Learning How to Learn
# > A course summary
#
# - toc: true
# - branch: master
# - badges: true
# - comments: true
# - author: <NAME>
# - image: images/
# - categories: []
# - hide: true
# # Overview
#
# Imagine having the most powerful computer on this planet and not one instruction manual in sight. The human brain is the most advanced natural technology planet Earth has produced and each computer is custom made from scratch. Not even the most advanced supercomputers can keep up with our brain.
#
#
# Taught by:
# <NAME>, Professor of Engineering and Dr. <NAME>,
#
# This course gives you easy access to the invaluable learning techniques used by experts in art, music, literature, math, science, sports, and many other disciplines. We’ll learn about how the brain uses two very different learning modes and how it encapsulates (“chunks”) information. We’ll also cover illusions of learning, memory techniques, dealing with procrastination, and best practices shown by research to be most effective in helping you master tough subjects.
#
# Using these approaches, no matter what your skill levels in topics you would like to master, you can change your thinking and change your life. If you’re already an expert, this peep under the mental hood will give you ideas for turbocharging successful learning, including counter-intuitive test-taking tips and insights that will help you make the best use of your time on homework and problem sets. If you’re struggling, you’ll see a structured treasure trove of practical techniques that walk you through what you need to do to get on track. If you’ve ever wanted to become better at anything, this course will help serve as your guide.
#
# This course can be taken independent of, concurrent with, or prior to, its companion course, Mindshift. (Learning How to Learn is more learning-focused, and Mindshift is more career-focused.) A related course by the same instructors is Uncommon Sense Teaching.
# Learning How to Learn is meant to help you reframe how you think about learning, to help reduce your frustration and increase the depth of your learning.
#
# There have been enormous strides from research in discovering how we learn most effectively. Finding a way to simply and effectively share these ideas with you has been an big undertaking, but we feel it's well worth doing—you'll see that many of these ideas, although simple, are incredibly powerful.
# ## Introduction to the Focused and Diffuse Modes
| _notebooks/2021-06-30-how-to-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What is Pandas? Getting started with pandas
#
# To analyze data, we like to use two-dimensional tables – like in SQL and in Excel. Originally, Python didn’t have this feature. But that’s why Pandas is so important! I like to say, Pandas is the “SQL of Python.” Pandas is the library that will help us to handle two-dimensional data tables in Python. In many senses it’s really similar to SQL, though.
# ## Why should I use Pandas to work with data?
#
# Because:
# - handle headings better than with Numpy
# - Pandas Series and DataFrame are also Numpy arrays
# - provide some clarity in your data
# - especially important if you work with time series as you can downscale and upscale your data based on the time series
# # Essential functions
#
# Top 20 most popular pandas function on GitHub (in 2016)
# https://galeascience.wordpress.com/2016/08/10/top-10-pandas-numpy-and-scipy-functions-on-github/
import pandas as pd
import numpy as np
# # How can I import data with Pandas?
#
# - Array can be created as in Numpy (*Series*)
# - Data can be imported from text files (.csv, .txt and other type of text formats) or directly from excel files (*DataFrames*)
# ## Series
# One-dimensional array-like object (very similar to Numpy 1-D arrays). ```pd.Series``` is characterized by an index and a single column.
# +
temperature_Oslo = pd.Series([4, 6, 6, 9, 8, 7, 8]) # temperature in the middle of the day at Blindern for the next 7 days
temperature_Oslo
# -
# By default, the index starts at 0 and stop at the last value in your ```pd.Series``` dataset
temperature_Oslo.index # same as range(7) or np.arange(7)
# The index can be modified by accessing the index key ```pd.Series.index```
# indexes can be later modified by doing
temperature_Oslo.index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
temperature_Oslo.index
# The easiest way to create a ```pd.Series``` is to define a dictionnary and pass it to the ```pd.Series```
# +
ndata = {'Monday' : 4 , 'Tuesday' : 6 , 'Wednesday' : 6 , 'Thursday' : 9 , 'Friday' : 8 , 'Saturday' : 7, 'Sunday' : 8}
# feed pd.Series the previously created dictionary
temperature_Oslo = pd.Series(ndata)
# print the data on the screen
temperature_Oslo
# -
# Values within the ```pd.Series``` can be accessed either by ```pd.Series[variable]``` or ```pd.Series.variable```
# +
# change in temperature between Monday and Tuesday
print (temperature_Oslo['Monday'] - temperature_Oslo['Tuesday'])
# similar to
print (temperature_Oslo.Monday - temperature_Oslo.Tuesday)
# -
# ## DataFrame
# DataFrame is the most popular function used in Pandas. Help to deal with large rectangular datasets. There are many ways to construct a DataFrame, and can mostly be separated in two groups:
# - Manually, by specifying a dictionary containing a string (the header) and a list of numbers or strings
# - Importing a text or excel file, which contains a x number of data
# ## Creating a DataFrame manually
#
# We can again make us of dictionnary variables but this time we will have a dataset with a multiple number of columns
# +
data = {'Day of the week' : ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
'Temperature in the middle of the day' : [4, 6, 6, 9, 8, 7, 8],
'Wind (m/s)' : [1, 6, 2, 2, 4, 3, 3],
'Weather conditions' : ['Cloud & Sun', 'Cloud', 'Cloud', 'Cloud & Sun', 'Cloud & Sun', 'Sun', 'Cloud']}
frame = pd.DataFrame(data)
frame
# -
# ## Interesting functionalities and how can I access specific data within my data set?
#
frame.head() # print the first five rows
print (frame.columns) # print name of the columns
# ### Apply numpy functions to pandas arrays
np.mean(frame['Temperature in the middle of the day'])
# ### Switch columns in the DataFrame
pd.DataFrame(data, columns=['Day of the week', 'Weather conditions', 'Temperature in the middle of the day', 'Wind (m/s)'])
# ### Locate row based on your index
# +
print (frame.loc[0])
print (frame.loc[6])
# -
# ### Assign values in the DataFrame
# +
# we create a new DataFrame with a new column
nframe = pd.DataFrame(data, columns=['Day of the week', 'Weather conditions', 'Temperature in the middle of the day',
'Wind (m/s)', 'Precipitation (mm)'])
nframe
# -
# Modify a single value
# +
nframe['Precipitation (mm)'].values[0] = 2.3
nframe
# -
# Modify a slice of values
# +
nframe['Precipitation (mm)'].values[1:4] = 1.0
nframe
# -
# Change all values
# +
nframe['Precipitation (mm)'] = 0.0 #equivalent to nframe['Precipitation (mm)'].values[:] = 0.0
nframe
# -
# ### Indexing, reindexing, selection with loc and iloc
# +
nframe = nframe.set_index('Day of the week')
nframe
# -
nframe.loc['Monday']
nframe.iloc[0]
# +
# re-indexing
nframe2 = nframe.reindex(['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday','Monday', 'Tuesday'])
nframe2
# -
# ### Sorting of columns
#
# Sort by a column
nframe2.sort_values(by='Temperature in the middle of the day') #default ascending
nframe2.sort_values(by='Temperature in the middle of the day', ascending=False) #descending
# Sorting by multiple columns
nframe2.sort_values(by=['Temperature in the middle of the day', 'Wind (m/s)'], ascending=False) #descending
# ## Importing a DataFrame
# Data is provided by Manon
# +
path = 'C:/Users/yellow_chocobo/Desktop/python_class/data/Manon/' #directory to the file we would like to import
filename = 'data_manon_python.xlsx' # filename
frame = pd.read_excel(path + filename)
frame.head()
# +
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# %matplotlib inline
fig = plt.figure(figsize=(16,8))
gs=GridSpec(2,4) # 2 rows, 4 columns
ax1=fig.add_subplot(gs[:,:2]) # Span all rows, firs two columns
ax2=fig.add_subplot(gs[:,2]) # Span all rows, third column
ax3=fig.add_subplot(gs[:,3]) # Span all rows, fourth column
ax1.set_title('Calibrated Age')
ax2.set_title('Aluminium content')
ax3.set_title('Silicate content')
ax1.plot(frame['Age. Cal. CE'],frame['depth (m)'],"ko")
ax1.set_ylim(ax1.get_ylim()[::-1])
ax2.plot(frame['Al'],frame['depth (m)'],"bo")
ax2.set_ylim(ax2.get_ylim()[::-1])
ax3.plot(frame['Si'],frame['depth (m)'],"ro")
ax3.set_ylim(ax3.get_ylim()[::-1])
fig.tight_layout()
# -
# # Time series
# For some of you, most of your data are time series. Time series can be differentiated in two main groups:
# - Fixed periods, such as a dataset where you will have data once a day (daily)
# - Intervals of time, i.e, discontinuous periods, indicated by a start and end time
#
# **we will here see how you can work with timeseries in Pandas**
#
#
# ## Most important is to have a date format that will be recognized by Python!
# ## Date format
# +
from datetime import datetime
now = datetime.now()
now
# -
now.year, now.month, now.day
# ## Convertion between String and Datetime
#
# More information about the different datetime format can be found here: http://strftime.org/
#
# Most commonly used functions are within the ```datetime``` and ```dateutil``` modules
#
# ### Date to string format
# +
timestamp = datetime(2019, 1, 1) # date format
str(timestamp) # string format
# -
timestamp.strftime('%Y-%m-%d') # string format
# ### String to date format
# +
timestamp_str = '2019/04/01'
datetime.strptime(timestamp_str, '%Y/%m/%d')
# -
# ```parse``` is also a handy function to convert string to date format
#
# Here are few examples:
# +
from dateutil.parser import parse
parse('2019/04/01')
# -
parse('01/04/2019', dayfirst=True) # day in front
# In most cases we will work with intervals of date (either continuous or discontinuous).
#
# The easiest case is for **continuous measurements** as the integrated function ```pd.date_range``` can be used
# +
# pd.date_range?
# +
dates_2019 = pd.date_range(start='2019/01/01', end='2019/04/01') # default is daily data
dates_2019
# -
# For **discontinuous measurements**, you have to build your own list of dates either:
# - manually
# - or by importing your datasets with specific timestamps
# +
list_of_dates = ['01/01/2019', '19/01/2019', '25/02/2019', '07/03/2019', '01/04/2019'] # day-first european style
converted_list_of_dates = [parse(x, dayfirst=True) for x in list_of_dates]
converted_list_of_dates
# -
# Let's work with the data Manon provided where a calibrated age ```frame['Age. Cal. CE']``` is available
frame['Age. Cal. CE'].head() #print the first 5 values
# We need to transform this column into a date format pandas will familiar with
#
# There are several ways of doing that!
# **Extracting the year**
# +
years = np.floor(frame['Age. Cal. CE'].values)
years
# +
years = years.astype('int')
years_str = years.astype('str')
years_str
# -
# **Extracting the month and the day**
# +
months_and_days = frame['Age. Cal. CE'].values - years
months_and_days
# -
months_and_days2 = np.round(months_and_days * 365.25)
months_and_days3 = months_and_days2.astype('int') # integer
months_and_days3
# +
# set 0 equal to 1 (avoid problem with day of the year = 0)
months_and_days3[months_and_days3 < 1] = 1
months_and_days3
# -
months_and_days_str = months_and_days3.astype('str')
months_and_days_str
# +
tmp_date = years_str[0].zfill(4) + "/" + months_and_days_str[0].zfill(3) #zfill write 331 as 0331
tmp_date
# +
datetime_viking = [] # create an empty list
# need to loop through the numpy array
for ix, dates_i in np.ndenumerate(months_and_days_str): #loop through strings
# we save only the dates before Jesus
if years[ix] > 0: #datetime does not support negative years or BC dates, need to work with Julian date format?
tmp_date = years_str[ix].zfill(4) + "/" + months_and_days_str[ix].zfill(3)
# save to originally empty list
datetime_viking.append(datetime.strptime(tmp_date, '%Y/%j'))
datetime_viking[:5] # print first five dates
# +
# need to convert to pandas datetime (datetime_viking is a list while we need to work with pandas or numpy array)
# empty panda series
date_pandas = pd.Series([])
#need to use period if we want to work with period younger than 1677
for ix, datev in enumerate(datetime_viking):
date_tmp = pd.Period(year= datetime_viking[ix].year, month =datetime_viking[ix].month, day = datetime_viking[ix].day, freq='D')
#print (date_tmp)
date_pandas = date_pandas.append(pd.Series([date_tmp]))
date_pandas.head()
# +
# we need to create a new data frame containing only data before JC
frame_shortened = frame.iloc[0:len(datetime_viking)]
# and we set the dates as index
frame_shortened.index = date_pandas
# let's have a look at the dataframe
frame_shortened.head()
# -
# ## Resampling and Frequency Conversion
#
# Let's say we would like to downsample for every year.
# +
nframe_resampled = frame_shortened.resample('25Y').mean() # seems that it's indexing from year 0000 and to our time now
nframe_resampled.head()
#every 10 years
#frame_shortened.resample('10Y').mean().head()
# you can also sum the values
#frame_shortened.resample('Y').sum().head()
#frame_shortened.resample('Y', closed='right').sum().head()
# -
frame_shortened.tail()
# ## Does this help with the interpretation of the data?
# +
fig = plt.figure(figsize=(16,8))
gs=GridSpec(2,4) # 2 rows, 4 columns
ax1=fig.add_subplot(gs[:,:2]) # Span all rows, firs two columns
ax2=fig.add_subplot(gs[:,2]) # Span all rows, third column
ax3=fig.add_subplot(gs[:,3]) # Span all rows, fourth column
ax1.set_title('Calibrated Age')
ax2.set_title('Aluminium content')
ax3.set_title('Silicate content')
ax1.plot(nframe_resampled['Age. Cal. CE'],nframe_resampled['depth (m)'],"k-o")
ax1.set_xlim(ax1.get_xlim()[::-1])
ax1.set_ylim(ax1.get_ylim()[::-1])
ax2.plot(nframe_resampled['Al'],nframe_resampled['depth (m)'],"b-o")
ax2.set_xlim(ax2.get_xlim()[::-1])
ax2.set_ylim(ax1.get_ylim()[::-1])
ax3.plot(nframe_resampled['Si'],nframe_resampled['depth (m)'],"r-o")
ax3.set_xlim(ax3.get_xlim()[::-1])
ax3.set_ylim(ax3.get_ylim()[::-1])
fig.tight_layout()
# -
| code/06-pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
DICT_FILEPATH = './data/dict.opcorpora.xml'
import xml.etree.ElementTree as ET
root = ET.parse(DICT_FILEPATH).getroot()
# -
CORPUS_FILEPATH = './data/annot.opcorpora.no_ambig.xml'
corpus_root = ET.parse(CORPUS_FILEPATH).getroot()
# +
from collections import defaultdict
def canonicalize_tag_name(tag_name):
if tag_name in ['ADJF', 'ADJS', 'COMP']:
return 'A'
elif tag_name in ['NOUN']:
return 'S'
elif tag_name in ['VERB', 'INFN', 'PRTF', 'PRTS', 'GRND']:
return 'V'
elif tag_name in ['ADVB', 'PRCL', 'INTJ', 'PRED']:
return 'ADV'
elif tag_name in ['PREP']:
return 'PR'
elif tag_name in ['CONJ']:
return 'CONJ'
return tag_name
lemmas = root.findall('lemmata/lemma')
links = root.findall('links/link')
base_lemmas_id = defaultdict()
transformed_to_main = defaultdict(set)
lemmas_by_id = {}
print(len(lemmas))
for lemma in lemmas:
base_lemmas_id[lemma.get('id')] = lemma.get('id')
lemmas_by_id[lemma.get('id')] = lemma
for link in links:
if link.get('type') == '26':
continue
base_lemmas_id[link.get('to')] = base_lemmas_id[link.get('from')]
for lemma in lemmas:
lemma_id = base_lemmas_id[lemma.get('id')]
#print(lemma_id)
main_lemma = lemmas_by_id[lemma_id][0]
main_form = main_lemma.get('t')
tag = canonicalize_tag_name(main_lemma[0].get('v'))
for f in lemma[1:]:
transformed = f.get('t')
transformed_to_main[transformed].add((main_form, tag))
print(len(transformed_to_main))
test_word = 'ст<PASSWORD>'
print(transformed_to_main[test_word])
# -
tokens = corpus_root.findall('./text/paragraphs/paragraph/sentence/tokens/token')
print(len(tokens))
frequencies = defaultdict(lambda: defaultdict(lambda: 1))
for token in tokens:
transformed = token[0].get('t').lower()
main = token[0][0][0].get('t')
tag = canonicalize_tag_name(token[0][0][0][0].get('v'))
frequencies[transformed][(main, tag)] += 1
print(len(frequencies))
print(frequencies['стекло'])
# +
import numpy as np
def get_best_explanation(transformed):
if transformed in ["и", "а", "но", "или", "чтобы", "что"]:
return transformed, "CONJ"
if transformed in ["по", "на", "у", "за", "для", "при", "через", "до", "среди", "между", "возле"]:
return transformed, "PR"
if transformed in ["не", "ни", "затем", "тогда", "итак", "наверно", "бы", "ли", "же", "вот", "только", "уже", "видимо", "потом"]:
return transformed, "ADV"
if transformed not in transformed_to_main:
if len(transformed) >= 2 and transformed[-2:] == "ть":
return transformed, "V"
if len(transformed) >= 2 and transformed[-2:] == "ся":
return transformed, "V"
return transformed, "ADV"
list_of_choices = list(transformed_to_main[transformed])
freqs = frequencies[transformed]
probs = np.array([freqs[choice] for choice in list_of_choices])
probs = probs / sum(probs)
return list_of_choices[np.random.choice(len(probs), 1, p=probs)[0]]
def proccess_test(test_filepath, output_filepath):
with open(test_filepath) as test_file, open(output_filepath, 'w') as output_file:
lines_to_write = proccess_testfile(test_file)
print(len(lines_to_write))
output_file.write('\n'.join(lines_to_write))
def proccess_testfile(test_file):
lines_to_write = []
for sentence in test_file:
words = sentence.replace(',', '').replace('.', '').replace('?', '').replace('!', '').replace('\n', '').split(' ')
preproccessed_words = [word.lower() for word in words]
words_lemmatization = []
for word in preproccessed_words:
main_form, tag = get_best_explanation(word)
words_lemmatization.append((main_form, tag))
line_to_write = ' '.join([word + '{' + word_lemmatization[0] + '=' + word_lemmatization[1] + '}' for word, word_lemmatization in zip(words, words_lemmatization)])
lines_to_write.append(line_to_write)
return lines_to_write
# -
golden_test = ['Стала стабильнее экономическая и политическая обстановка, предприятия вывели из тени зарплаты сотрудников.\n',
'Все Гришины одноклассники уже побывали за границей, он был чуть ли не единственным, кого не вывозили никуда дальше Красной Пахры.']
correct_result = """Стала{стать=V} стабильнее{стабильный=A} экономическая{экономический=A} и{и=CONJ} политическая{политический=A} обстановка{обстановка=S} предприятия{предприятие=S} вывели{вывести=V} из{из=PR} тени{тень=S} зарплаты{зарплата=S} сотрудников{сотрудник=S}
Все{весь=NI} Гришины{гришин=A} одноклассники{одноклассник=S} уже{уже=ADV} побывали{побывать=V} за{за=PR} границей{граница=S} он{он=NI} был{быть=V} чуть{чуть=ADV} ли{ли=ADV} не{не=ADV} единственным{единственный=A} кого{кто=NI} не{не=ADV} вывозили{вывозить=V} никуда{никуда=NI} дальше{далеко=ADV} Красной{красный=A} Пахры{Пахра=S}"""
result = '\n'.join(proccess_testfile(golden_test))
print(correct_result == result)
print()
print("RESULT")
print(result)
print()
print("EXPECTED")
print(correct_result)
print(transformed_to_main['стала'])
print(transformed_to_main['стабильнее'])
# +
TEST_FILEPATH = './data/test.txt'
OUTPUT_FILEPATH = './data/output.txt'
proccess_test(TEST_FILEPATH, OUTPUT_FILEPATH)
# -
| hw01/lemmatizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import sys
sys.path.append('../')
sys.path.append('../support/')
sys.path.append('../lung_segmentation/')
import os
import SimpleITK
from lung_separation import *
from ct_reader import *
from os.path import join, basename, isfile
from glob import glob
from scipy.ndimage import morphology
from skimage import filters
from skimage import exposure
from tqdm import tqdm
from skimage import morphology as skm
from skimage.morphology import watershed
from scipy.ndimage import label
from scipy.ndimage import generate_binary_structure
from skimage import measure
from multiprocessing import Pool
import pickle
from pure_ws_segmentation import *
import lung_separation_frontal
from numpy import *
# import warnings
# warnings.filterwarnings('ignore')
import SimpleITK as sitk
from paths import *
from skimage.transform import resize
# -
erroneus = sorted(list(set(pickle.load(open(join(PATH['STAGE_MASKS'], 'still_erroneus_ncrash'), 'rb')))))
erroneus = [join(PATH['STAGE_DATA'],err) for err in erroneus]
def operate(path, out_dir='STAGE_MASKS'):
ct_scan = read_ct_scan(path)
ct_scan_px = get_pixels_hu(ct_scan)
ct_excluded = []
for sl in (ct_scan_px):
ct_excluded.append(exclude_lungs(sl))
# with Pool(34) as pool:
# ct_excluded = pool.map(exclude_lungs, ct_scan_px)
# end = time.time()
# print(end - start)
lung_filter = asarray(ct_excluded)
a128 = lung_filter.min()
a255 = lung_filter.max()
lung_filter[lung_filter==a128] = 0
lung_filter[lung_filter==a255] = 1
left, right = lung_separation_frontal.separate_lungs3d(lung_filter)
save(join(join('/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/NEW_STAGE/MASKS','FIXED'),basename(path)),left+2*right)
for err in tqdm(erroneus):
operate(err)
| lung_segmentation/ws_fix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# http://docs.cython.org/en/latest/src/quickstart/build.html
# %load_ext Cython
# + pycharm={"name": "#%%cython\n"}
cdef int a = 0
for i in range(10):
a += i
print(a)
# + pycharm={"name": "#%%cython --annotate\n"}
cdef int a = 0
for i in range(10):
a += i
print(a)
| pandas/cpython_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/5_pytorch_retinanet/VOC%20Type%20to%20Coco%20-%20Via%20Monk%20Type%20Annotation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Installation
#
# - Run these commands
#
# - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
#
# - cd Monk_Object_Detection/5_pytorch_retinanet/installation
#
# - Select the right requirements file and run
#
# - cat requirements_cuda.txt | xargs -n 1 -L 1 pip install
# ! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
# +
# For colab use the command below
# ! cd Monk_Object_Detection/5_pytorch_retinanet/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install
# For Local systems and cloud select the right CUDA version
# #! cd Monk_Object_Detection/5_pytorch_retinanet/installation && cat requirements_cuda.txt | xargs -n 1 -L 1 pip install
# -
# # VOC Format
#
# ## Dataset Directory Structure
#
# ../sample_dataset/sample_pascal_voc/ (root_dir)
# |
# |-----------Images (img_dir)
# | |
# | |------------------img1.jpg
# | |------------------img2.jpg
# | |------------------.........(and so on)
# |
# |
# |-----------Annotations (anno_dir)
# | |
# | |------------------img1.xml
# | |------------------img2.xml
# | |------------------.........(and so on)
#
#
# # Monk Format
#
# ## Dataset Directory Structure
#
# ../sample_dataset/sample_pascal_voc (root)
# |
# |-----------Images (img_dir)
# | |
# | |------------------img1.jpg
# | |------------------img2.jpg
# | |------------------.........(and so on)
# |
# |
# |-----------train_labels.csv (anno_file)
#
#
# ## Annotation file format
#
# | Id | Labels |
# | img1.jpg | x1 y1 x2 y2 label1 x1 y1 x2 y2 label2 |
#
# - Labels: xmin ymin xmax ymax label
# - xmin, ymin - top left corner of bounding box
# - xmax, ymax - bottom right corner of bounding box
# # COCO Format
#
# ## Dataset Directory Structure
#
# ../sample_dataset (root_dir)
# |
# |------sample_pascal_voc (coco_dir)
# | |
# | |---Images (img_dir)
# | |----|
# | |-------------------img1.jpg
# | |-------------------img2.jpg
# | |-------------------.........(and so on)
# |
# |
# | |---annotations (anno_dir)
# | |----|
# | |--------------------instances_Images.json
# | |--------------------classes.txt
#
#
# - instances_Train.json -> In proper COCO format
# - classes.txt -> A list of classes in alphabetical order
#
# # Sample Dataset Credits
#
# - credits: https://github.com/wujixiu/helmet-detection
# # Step - 1: VOC to Monk type
# +
import os
import sys
import numpy as np
import pandas as pd
import xmltodict
import json
from tqdm.notebook import tqdm
from pycocotools.coco import COCO
# -
root_dir = "Monk_Object_Detection/example_notebooks/sample_dataset/sample_pascal_voc/";
img_dir = "Images/";
anno_dir = "Annotations/";
files = os.listdir(root_dir + anno_dir);
combined = [];
for i in tqdm(range(len(files))):
annoFile = root_dir + "/" + anno_dir + "/" + files[i];
f = open(annoFile, 'r');
my_xml = f.read();
anno = dict(dict(xmltodict.parse(my_xml))["annotation"])
fname = anno["filename"];
label_str = "";
if(type(anno["object"]) == list):
for j in range(len(anno["object"])):
obj = dict(anno["object"][j]);
label = anno["object"][j]["name"];
bbox = dict(anno["object"][j]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
if(j == len(anno["object"])-1):
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
else:
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
else:
obj = dict(anno["object"]);
label = anno["object"]["name"];
bbox = dict(anno["object"]["bndbox"])
x1 = bbox["xmin"];
y1 = bbox["ymin"];
x2 = bbox["xmax"];
y2 = bbox["ymax"];
label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label;
combined.append([fname, label_str])
df = pd.DataFrame(combined, columns = ['ID', 'Label']);
df.to_csv(root_dir + "/train_labels.csv", index=False);
# # Step - 2: Monk Type to COCO
import os
import numpy as np
import cv2
import dicttoxml
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from tqdm import tqdm
import shutil
import json
import pandas as pd
# +
# Provide details on directory in Monk Format
# -
root = "Monk_Object_Detection/example_notebooks/sample_dataset/sample_pascal_voc/";
img_dir = "Images/";
anno_file = "train_labels.csv";
# +
# Need not change anything below
# -
dataset_path = root;
images_folder = root + "/" + img_dir;
annotations_path = root + "/annotations/";
# +
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
input_images_folder = images_folder;
input_annotations_path = root + "/" + anno_file;
# +
output_dataset_path = root;
output_image_folder = input_images_folder;
output_annotation_folder = annotations_path;
tmp = img_dir.replace("/", "");
output_annotation_file = output_annotation_folder + "/instances_" + tmp + ".json";
output_classes_file = output_annotation_folder + "/classes.txt";
# -
if not os.path.isdir(output_annotation_folder):
os.mkdir(output_annotation_folder);
df = pd.read_csv(input_annotations_path);
columns = df.columns
delimiter = " ";
# +
list_dict = [];
anno = [];
for i in range(len(df)):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
for j in range(len(tmp)//5):
label = tmp[j*5+4];
if(label not in anno):
anno.append(label);
anno = sorted(anno)
for i in tqdm(range(len(anno))):
tmp = {};
tmp["supercategory"] = "master";
tmp["id"] = i;
tmp["name"] = anno[i];
list_dict.append(tmp);
anno_f = open(output_classes_file, 'w');
for i in range(len(anno)):
anno_f.write(anno[i] + "\n");
anno_f.close();
# +
coco_data = {};
coco_data["type"] = "instances";
coco_data["images"] = [];
coco_data["annotations"] = [];
coco_data["categories"] = list_dict;
image_id = 0;
annotation_id = 0;
for i in tqdm(range(len(df))):
img_name = df[columns[0]][i];
labels = df[columns[1]][i];
tmp = labels.split(delimiter);
image_in_path = input_images_folder + "/" + img_name;
print(image_in_path)
img = cv2.imread(image_in_path, 1);
h, w, c = img.shape;
images_tmp = {};
images_tmp["file_name"] = img_name;
images_tmp["height"] = h;
images_tmp["width"] = w;
images_tmp["id"] = image_id;
coco_data["images"].append(images_tmp);
for j in range(len(tmp)//5):
x1 = int(tmp[j*5+0]);
y1 = int(tmp[j*5+1]);
x2 = int(tmp[j*5+2]);
y2 = int(tmp[j*5+3]);
label = tmp[j*5+4];
annotations_tmp = {};
annotations_tmp["id"] = annotation_id;
annotation_id += 1;
annotations_tmp["image_id"] = image_id;
annotations_tmp["segmentation"] = [];
annotations_tmp["ignore"] = 0;
annotations_tmp["area"] = (x2-x1)*(y2-y1);
annotations_tmp["iscrowd"] = 0;
annotations_tmp["bbox"] = [x1, y1, x2-x1, y2-y1];
annotations_tmp["category_id"] = anno.index(label);
coco_data["annotations"].append(annotations_tmp)
image_id += 1;
outfile = open(output_annotation_file, 'w');
json_str = json.dumps(coco_data, indent=4);
outfile.write(json_str);
outfile.close();
# -
| example_notebooks/5_pytorch_retinanet/VOC Type to Coco - Via Monk Type Annotation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Transfer Learning
#
# A Convolutional Neural Network (CNN) for image classification is made up of multiple layers that extract features, such as edges, corners, etc; and then use a final fully-connected layer to classify objects based on these features. You can visualize this like this:
#
# <table>
# <tr><td rowspan=2 style='border: 1px solid black;'>⇒</td><td style='border: 1px solid black;'>Convolutional Layer</td><td style='border: 1px solid black;'>Pooling Layer</td><td style='border: 1px solid black;'>Convolutional Layer</td><td style='border: 1px solid black;'>Pooling Layer</td><td style='border: 1px solid black;'>Fully Connected Layer</td><td rowspan=2 style='border: 1px solid black;'>⇒</td></tr>
# <tr><td colspan=4 style='border: 1px solid black; text-align:center;'>Feature Extraction</td><td style='border: 1px solid black; text-align:center;'>Classification</td></tr>
# </table>
#
# *Transfer Learning* is a technique where you can take an existing trained model and re-use its feature extraction layers, replacing its final classification layer with a fully-connected layer trained on your own custom images. With this technique, your model benefits from the feature extraction training that was performed on the base model (which may have been based on a larger training dataset than you have access to) to build a classification model for your own specific set of object classes.
#
# How does this help? Well, think of it this way. Suppose you take a professional tennis player and a complete beginner, and try to teach them both how to play raquetball. It's reasonable to assume that the professional tennis player will be easier to train, because many of the underlying skills involved in raquetball are already learned. Similarly, a pre-trained CNN model may be easier to train to classify specific set of objects because it's already learned how to identify the features of common objects, such as edges and corners. Fundamentally, a pre-trained model can be a great way to produce an effective classifier even when you have limited data with which to train it.
#
# In this notebook, we'll see how to implement transfer learning for a classification model using PyTorch.
#
# ## Install and import libraries
#
# First, let's install and import the PyTorch libraries we're going to use.
# !pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
# + tags=[]
# Import PyTorch libraries
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
# Other libraries we'll use
import numpy as np
import os
import matplotlib.pyplot as plt
# %matplotlib inline
print("Libraries imported - ready to use PyTorch", torch.__version__)
# -
# ## Prepare the base model
#
# To use transfer learning, we need a base model from which we can use the trained feature extraction layers. The ***resnet*** model is an CNN-based image classifier that has been pre-trained using a huge dataset containing a large number of images of 1000 classes of object, so let's download it and take a look at its layers.
# + tags=[]
# Load the model (download if not already present)
model = torchvision.models.resnet34(pretrained=True)
print(model)
# -
# ## Prepare the image data
#
# The pretrained model has many layers, starting with a convolutional layer that starts the feature extraction process from image data, and ending with a fully-connected linear layer that maps the extracted features to 1000 class labels.
#
# For feature extraction to work with our own images, we need to ensure that the image data we use the train our prediction layer has the same number of features (pixel values) as the images originally used to train the feaure extraction layers. The model does not explicitly give this size, but the first convolutional layer applies by a 7x7 kernel with a stride of 2x2 and results in 64 feature values, so the original size must be 64 x (7 ÷ 2), which is 224.
#
# PyTorch includes functions for loading and transforming data. We'll use these to create an iterative loader for training data, and a second iterative loader for test data (which we'll use to validate the trained model). The loaders will transform the image data to match the format used to train the original resnet CNN model, convert the image data into *tensors* (which are the core data structure used in PyTorch), and normalize them.
#
# Run the following cell to define the data loaders and list the classes for our images.
# + tags=[]
# Function to ingest data using training and test loaders
def load_dataset(data_path):
# Resize to 256 x 256, then center-crop to 224x224 (to match the resnet image size)
transformation = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# Load all of the images, transforming them
full_dataset = torchvision.datasets.ImageFolder(
root=data_path,
transform=transformation
)
# Split into training (70%) and testing (30%) datasets)
train_size = int(0.7 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
# define a loader for the training data we can iterate through in 30-image batches
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=30,
num_workers=0,
shuffle=False
)
# define a loader for the testing data we can iterate through in 30-image batches
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=30,
num_workers=0,
shuffle=False
)
return train_loader, test_loader
# Now load the images from the shapes folder
import os
data_path = 'data/shapes/'
# Get the iterative dataloaders for test and training data
train_loader, test_loader = load_dataset(data_path)
# Get the class names
classes = os.listdir(data_path)
classes.sort()
print('class names:', classes)
# -
# ## Create a prediction layer
#
# We downloaded the complete *resnet* model including its final **fc** linear layer. This fully-connected linear layer takes 512 inputs (the extracted features) and produces 1000 outputs (class predictions based on the original training image classes). We need to replace this layer with one that takes the same number of inputs (so we can use the same number of extracted features), but produces a prediction for each of our image classes.
#
# We also need to freeze the feature extraction layers to retain the trained weights. Then when we train the model using our images, only the final prediction layer will learn new weight and bias values - the pre-trained weights already learned for feature extraction will remain the same.
# + tags=[]
# Set the existing feature extraction layers to read-only
for param in model.parameters():
param.requires_grad = False
# Replace the prediction layer
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(classes))
# Now print the full model, which will include the feature extraction layers of the base model and our prediction layer
print(model)
# -
# ## Train the model
#
# With the layers of the CNN defined, we're ready to train it using our image data. The weights used in the feature extraction layers from the base resnet model will not be changed by training, only the final linear layer that maps the features to our shape classes will be trained.
# + tags=[]
def train(model, device, train_loader, optimizer, epoch):
# Set the model to training mode
model.train()
train_loss = 0
print("Epoch:", epoch)
# Process the images in batches
for batch_idx, (data, target) in enumerate(train_loader):
# Use the CPU or GPU as appropriate
data, target = data.to(device), target.to(device)
# Reset the optimizer
optimizer.zero_grad()
# Push the data forward through the model layers
output = model(data)
# Get the loss
loss = loss_criteria(output, target)
# Keep a running total
train_loss += loss.item()
# Backpropagate
loss.backward()
optimizer.step()
# Print metrics for every 10 batches so we see some progress
if batch_idx % 10 == 0:
print('Training set [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# return average loss for the epoch
avg_loss = train_loss / (batch_idx+1)
print('Training set: Average loss: {:.6f}'.format(avg_loss))
return avg_loss
def test(model, device, test_loader):
# Switch the model to evaluation mode (so we don't backpropagate or drop)
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
batch_count = 0
for data, target in test_loader:
batch_count += 1
data, target = data.to(device), target.to(device)
# Get the predicted classes for this batch
output = model(data)
# Calculate the loss for this batch
test_loss += loss_criteria(output, target).item()
# Calculate the accuracy for this batch
_, predicted = torch.max(output.data, 1)
correct += torch.sum(target==predicted).item()
# Calculate the average loss and total accuracy for this epoch
avg_loss = test_loss/batch_count
print('Validation set: Average loss: {:.6f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
avg_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# return average loss for the epoch
return avg_loss
# Now use the train and test functions to train and test the model
device = "cpu"
if (torch.cuda.is_available()):
# if GPU available, use cuda (on a cpu, training will take a considerable length of time!)
device = "cuda"
print('Training on', device)
# Create an instance of the model class and allocate it to the device
model = model.to(device)
# Use an "Adam" optimizer to adjust weights
# (see https://pytorch.org/docs/stable/optim.html#algorithms for details of supported algorithms)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Specify the loss criteria
loss_criteria = nn.CrossEntropyLoss()
# Track metrics in these arrays
epoch_nums = []
training_loss = []
validation_loss = []
# Train over 3 epochs (in a real scenario, you'd likely use many more)
epochs = 3
for epoch in range(1, epochs + 1):
train_loss = train(model, device, train_loader, optimizer, epoch)
test_loss = test(model, device, test_loader)
epoch_nums.append(epoch)
training_loss.append(train_loss)
validation_loss.append(test_loss)
# -
# ## View the loss history
#
# We tracked average training and validation loss for each epoch. We can plot these to verify that the loss reduced over the training process and to detect *over-fitting* (which is indicated by a continued drop in training loss after validation loss has levelled out or started to increase).
# +
# %matplotlib inline
from matplotlib import pyplot as plt
plt.plot(epoch_nums, training_loss)
plt.plot(epoch_nums, validation_loss)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['training', 'validation'], loc='upper right')
plt.show()
# -
# ## Evaluate model performance
#
# We can see the final accuracy based on the test data, but typically we'll want to explore performance metrics in a little more depth. Let's plot a confusion matrix to see how well the model is predicting each class.
# + tags=[]
#Pytorch doesn't have a built-in confusion matrix metric, so we'll use SciKit-Learn
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# %matplotlib inline
# Set the model to evaluate mode
model.eval()
# Get predictions for the test data and convert to numpy arrays for use with SciKit-Learn
print("Getting predictions from test set...")
truelabels = []
predictions = []
for data, target in test_loader:
for label in target.cpu().data.numpy():
truelabels.append(label)
for prediction in model.cpu()(data).data.numpy().argmax(1):
predictions.append(prediction)
# Plot the confusion matrix
cm = confusion_matrix(truelabels, predictions)
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.xlabel("Predicted Shape")
plt.ylabel("Actual Shape")
plt.show()
# -
# ## Use the trained model
#
# Now that we've trained the model, we can use it to predict the class of an image.
# + tags=[]
# Function to create a random image (of a square, circle, or triangle)
def create_image (size, shape):
from random import randint
import numpy as np
from PIL import Image, ImageDraw
xy1 = randint(10,40)
xy2 = randint(60,100)
col = (randint(0,200), randint(0,200), randint(0,200))
img = Image.new("RGB", size, (255, 255, 255))
draw = ImageDraw.Draw(img)
if shape == 'circle':
draw.ellipse([(xy1,xy1), (xy2,xy2)], fill=col)
elif shape == 'triangle':
draw.polygon([(xy1,xy1), (xy2,xy2), (xy2,xy1)], fill=col)
else: # square
draw.rectangle([(xy1,xy1), (xy2,xy2)], fill=col)
del draw
return img
# Function to predict the class of an image
def predict_image(classifier, image):
import numpy
# Set the classifer model to evaluation mode
classifier.eval()
# Apply the same transformations as we did for the training images
transformation = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all inputs as batches
image_tensor = image_tensor.unsqueeze_(0)
# Turn the input into a Variable
input_features = Variable(image_tensor)
# Predict the class of the image
output = classifier(input_features)
index = output.data.numpy().argmax()
return index
# Now let's try it with a new image
from random import randint
from PIL import Image
import os, shutil
# Create a random test image
shape = classes[randint(0, len(classes)-1)]
img = create_image ((128,128), shape)
# Display the image
plt.imshow(img)
index = predict_image(model, img)
print(classes[index])
# -
# ## Learn more
#
# * [PyTorch Documentation](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html)
| 05c - Transfer Learning (PyTorch).ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.4
# language: julia
# name: julia-1.5
# ---
# # Introduction to Turing
#
# ## Introduction
#
# This is the first of a series of tutorials on the universal probabilistic programming language **Turing**.
#
# Turing is a probabilistic programming system written entirely in Julia. It has an intuitive modelling syntax and supports a wide range of sampling-based inference algorithms. Most importantly, Turing inference is composable: it combines Markov chain sampling operations on subsets of model variables, e.g. using a combination of a Hamiltonian Monte Carlo (HMC) engine and a particle Gibbs (PG) engine. This composable inference engine allows the user to easily switch between black-box style inference methods such as HMC and customized inference methods.
#
# Familiarity with Julia is assumed through out this tutorial. If you are new to Julia, [Learning Julia](https://julialang.org/learning/) is a good starting point.
#
# For users new to Bayesian machine learning, please consider more thorough introductions to the field, such as [Pattern Recognition and Machine Learning](https://www.springer.com/us/book/9780387310732). This tutorial tries to provide an intuition for Bayesian inference and gives a simple example on how to use Turing. Note that this is not a comprehensive introduction to Bayesian machine learning.
# ### Coin Flipping Without Turing
# The following example illustrates the effect of updating our beliefs with every piece of new evidence we observe. In particular, assume that we are unsure about the probability of heads in a coin flip. To get an intuitive understanding of what "updating our beliefs" is, we will visualize the probability of heads in a coin flip after each observed evidence.
#
# First, let's load some of the packages we need to flip a coin (`Random`, `Distributions`) and show our results (`Plots`). You will note that Turing is not an import here — we do not need it for this example. If you are already familiar with posterior updates, you can proceed to the next step.
# +
# Using Base modules.
using Random
# Load a plotting library.
using Plots
# Load the distributions library.
using Distributions
# -
# Next, we configure our posterior update model. First, let's set the true probability that any coin flip will turn up heads and set the number of coin flips we will show our model:
# +
# Set the true probability of heads in a coin.
p_true = 0.5
# Iterate from having seen 0 observations to 100 observations.
Ns = 0:100;
# -
# We will now use the Bernoulli distribution to flip 100 coins, and collect the results in a variable called `data`:
# +
# Draw data from a Bernoulli distribution, i.e. draw heads or tails.
Random.seed!(12)
data = rand(Bernoulli(p_true), last(Ns))
# Here's what the first five coin flips look like:
data[1:5]
# -
# After flipping all our coins, we want to set a prior belief about what we think the distribution of coin flips look like. In this case, we are going to choose a common prior distribution called the [Beta](https://en.wikipedia.org/wiki/Beta_distribution) distribution.
# Our prior belief about the probability of heads in a coin toss.
prior_belief = Beta(1, 1);
# With our priors set and our data at hand, we can perform Bayesian inference.
#
# This is a fairly simple process. We expose one additional coin flip to our model every iteration, such that the first run only sees the first coin flip, while the last iteration sees all the coin flips. Then, we set the `updated_belief` variable to an updated version of the original Beta distribution that accounts for the new proportion of heads and tails.
# For the mathematically inclined, the `Beta` distribution is updated by adding each coin flip to the distribution's $\alpha$ and $\beta$ parameters, which are initially defined as $\alpha = 1, \beta = 1$. Over time, with more and more coin flips, $\alpha$ and $\beta$ will be approximately equal to each other as we are equally likely to flip a heads or a tails, and the plot of the beta distribution will become more tightly centered around 0.5.
#
# This works because mean of the `Beta` distribution is defined as the following:
#
# $$ \text{E}[\text{Beta}] = \dfrac{\alpha}{\alpha+\beta} $$
#
# Which is 0.5 when $\alpha = \beta$, as we expect for a large enough number of coin flips. As we increase the number of samples, our variance will also decrease, such that the distribution will reflect less uncertainty about the probability of receiving a heads. The definition of the variance for the `Beta` distribution is the following:
#
# $$ \text{var}[\text{Beta}] = \dfrac{\alpha\beta}{(\alpha + \beta)^2 (\alpha + \beta + 1)} $$
#
# The intuition about this definition is that the variance of the distribution will approach 0 with more and more samples, as the denominator will grow faster than will the numerator. More samples means less variance.
# +
# Import StatsPlots for animating purposes.
using StatsPlots
# Make an animation.
animation = @gif for (i, N) in enumerate(Ns)
# Count the number of heads and tails.
heads = sum(data[1:i-1])
tails = N - heads
# Update our prior belief in closed form (this is possible because we use a conjugate prior).
updated_belief = Beta(prior_belief.α + heads, prior_belief.β + tails)
# Plotting
plot(updated_belief,
size = (500, 250),
title = "Updated belief after $N observations",
xlabel = "probability of heads",
ylabel = "",
legend = nothing,
xlim = (0,1),
fill=0, α=0.3, w=3)
vline!([p_true])
end;
animation
# -
# The animation above shows that with increasing evidence our belief about the probability of heads in a coin flip slowly adjusts towards the true value. The orange line in the animation represents the true probability of seeing heads on a single coin flip, while the mode of the distribution shows what the model believes the probability of a heads is given the evidence it has seen.
# ### Coin Flipping With Turing
#
# In the previous example, we used the fact that our prior distribution is a [conjugate prior](https://en.wikipedia.org/wiki/Conjugate_prior). Note that a closed-form expression (the `updated_belief` expression) for the posterior is not accessible in general and usually does not exist for more interesting models.
#
# We are now going to move away from the closed-form expression above and specify the same model using **Turing**. To do so, we will first need to import `Turing`, `MCMCChains`, `Distributions`, and `StatPlots`. `MCMCChains` is a library built by the Turing team to help summarize Markov Chain Monte Carlo (MCMC) simulations, as well as a variety of utility functions for diagnostics and visualizations.
# +
# Load Turing and MCMCChains.
using Turing, MCMCChains
# Load the distributions library.
using Distributions
# Load StatsPlots for density plots.
using StatsPlots
# -
# First, we define the coin-flip model using Turing.
@model coinflip(y) = begin
# Our prior belief about the probability of heads in a coin.
p ~ Beta(1, 1)
# The number of observations.
N = length(y)
for n in 1:N
# Heads or tails of a coin are drawn from a Bernoulli distribution.
y[n] ~ Bernoulli(p)
end
end;
# After defining the model, we can approximate the posterior distribution by drawing samples from the distribution. In this example, we use a [Hamiltonian Monte Carlo](https://en.wikipedia.org/wiki/Hamiltonian_Monte_Carlo) sampler to draw these samples. Later tutorials will give more information on the samplers available in Turing and discuss their use for different models.
# +
# Settings of the Hamiltonian Monte Carlo (HMC) sampler.
iterations = 1000
ϵ = 0.05
τ = 10
# Start sampling.
chain = sample(coinflip(data), HMC(ϵ, τ), iterations, progress=false);
# -
# After finishing the sampling process, we can visualize the posterior distribution approximated using Turing against the posterior distribution in closed-form. We can extract the chain data from the sampler using the `Chains(chain[:p])` function, exported from the `MCMCChain` module. `Chains(chain[:p])` creates an instance of the `Chain` type which summarizes the MCMC simulation — the `MCMCChain` module supports numerous tools for plotting, summarizing, and describing variables of type `Chain`.
# Construct summary of the sampling process for the parameter p, i.e. the probability of heads in a coin.
p_summary = chain[:p]
plot(p_summary, seriestype = :histogram)
# Now we can build our plot:
# +
# Compute the posterior distribution in closed-form.
N = length(data)
heads = sum(data)
updated_belief = Beta(prior_belief.α + heads, prior_belief.β + N - heads)
# Visualize a blue density plot of the approximate posterior distribution using HMC (see Chain 1 in the legend).
p = plot(p_summary, seriestype = :density, xlim = (0,1), legend = :best, w = 2, c = :blue)
# Visualize a green density plot of posterior distribution in closed-form.
plot!(p, range(0, stop = 1, length = 100), pdf.(Ref(updated_belief), range(0, stop = 1, length = 100)),
xlabel = "probability of heads", ylabel = "", title = "", xlim = (0,1), label = "Closed-form",
fill=0, α=0.3, w=3, c = :lightgreen)
# Visualize the true probability of heads in red.
vline!(p, [p_true], label = "True probability", c = :red)
# -
# As we can see, the Turing model closely approximates the true probability. Hopefully this tutorial has provided an easy-to-follow, yet informative introduction to Turing's simpler applications. More advanced usage will be demonstrated in later tutorials.
| 0_Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="7kfJNJEricBi"
# <center>
# <h1>JaxTon</h1>
# <i>💯 JAX exercises</i>
# <br>
# <br>
# <a href='https://github.com/vopani/jaxton/blob/master/LICENSE'>
# <img src='https://img.shields.io/badge/license-Apache%202.0-blue.svg?logo=apache'>
# </a>
# <a href='https://github.com/vopani/jaxton'>
# <img src='https://img.shields.io/github/stars/vopani/jaxton?color=yellowgreen&logo=github'>
# </a>
# <a href='https://twitter.com/vopani'>
# <img src='https://img.shields.io/twitter/follow/vopani'>
# </a>
# </center>
# + [markdown] id="rE5JA08gicBv"
# <center>
# This is Set 3: Pseudorandom Numbers (Exercises 21-30) of <b>JaxTon</b>: <i>💯 JAX exercises</i>
# <br>
# You can find all the exercises and solutions on <a href="https://github.com/vopani/jaxton#exercises-">GitHub</a>
# </center>
# + [markdown] id="oNlhFCaticBx"
# **Prerequisites**
#
# * The configuration of jax should be set as shown in the code snippet below in order to use TPUs.
# * A sample array `sample_data` will be used for the exercises.
# + _kg_hide-input=true _kg_hide-output=true id="XCcSL2Z_icBz" outputId="84777e70-71a4-4ad9-fea4-0cbe1f5f6a23" colab={"base_uri": "https://localhost:8080/"}
# !python3 -m pip install jax
# + _kg_hide-input=false id="jsitm334icB2" outputId="5681eb51-7f41-4786-e1ff-3ee701478e50" colab={"base_uri": "https://localhost:8080/"}
## import packages
import jax
import jax.numpy as jnp
import os
import requests
## setup JAX to use TPUs if available
try:
url = 'http:' + os.environ['TPU_NAME'].split(':')[1] + ':8475/requestversion/tpu_driver_nightly'
resp = requests.post(url)
jax.config.FLAGS.jax_xla_backend = 'tpu_driver'
jax.config.FLAGS.jax_backend_target = os.environ['TPU_NAME']
except:
pass
jax.devices()
# + id="YcZcxqUbicB5" outputId="4924f04a-3d1f-42ec-ea1b-daf523f43cbd" colab={"base_uri": "https://localhost:8080/"}
## sample data
sample_data = jnp.array([10, 1, 24, 20, 15, 14])
sample_data
# + [markdown] id="gUat92LEicB8"
# **Exercise 21: Create a pseudorandom number generator key with seed=100 and assign it to `key`**
# + id="EdZfNuI0icB-" outputId="46bf5e10-25cc-408a-c4e4-67ac0d0e6194" colab={"base_uri": "https://localhost:8080/"}
key = jax.random.PRNGKey(100)
key
# + [markdown] id="JgGAad3MicCA"
# **Exercise 22: Create a subkey from `key` and assign it to `subkey`**
# + id="wmLVKWG2icCB" outputId="45d36b86-238a-4dd4-8b6d-57b2e22f4979" colab={"base_uri": "https://localhost:8080/"}
key, subkey = jax.random.split(key)
key, subkey
# + [markdown] id="izDjQKSricCC"
# **Exercise 23: Split `key` into seven subkeys `key_1`, `key_2`, `key_3`, `key_4`, `key_5`, `key_6` and `key_7`**
# + id="7X-Wsg8oicCD" outputId="1aeb5ea5-4fbf-4139-b02a-4a2fe3c6459e" colab={"base_uri": "https://localhost:8080/"}
key_1, key_2, key_3, key_4, key_5, key_6, key_7 = jax.random.split(key, num=7)
key_1
# + [markdown] id="Ud7BK4tlicCD"
# **Exercise 24: Create a random permutation of `sample_data` using `key_1` and assign it to `data_permutation`**
# + id="_lrpKVomicCE" outputId="d2f100fd-2c2f-40c1-d0bf-6c088f1dc844" colab={"base_uri": "https://localhost:8080/"}
data_permutation = jax.random.permutation(key_1, sample_data)
data_permutation
# + [markdown] id="tA8zm6s4icCF"
# **Exercise 25: Choose a random element from `sample_data` using `key_2` and assign it to `random_selection`**
# + id="Z1g3aGaeicCF" outputId="50b4c5e3-d01a-4689-d236-fa5c53083ad8" colab={"base_uri": "https://localhost:8080/"}
random_selection = jax.random.choice(key_2, sample_data)
random_selection
# + [markdown] id="WuLsD9bUicCF"
# **Exercise 26: Sample an integer between 10 and 24 using `key_3` and assign it to `sample_int`**
# + id="FL4HWXEYicCG" outputId="92fc44f7-bd43-49ac-983f-7f32bc5a48e6" colab={"base_uri": "https://localhost:8080/"}
sample_int = jax.random.randint(key_3, shape=(1,), minval=10, maxval=24)
sample_int
# + [markdown] id="lD-C8EJ6icCG"
# **Exercise 27: Sample two values from uniform distribution between 1 and 2 using `key_4` and assign it to `sample_uniform`**
# + id="gft1z7xxicCH" outputId="af62bc5e-0020-4c0c-c478-e308d0ee92d2" colab={"base_uri": "https://localhost:8080/"}
sample_uniform = jax.random.uniform(key_4, shape=(2,), minval=1, maxval=2)
sample_uniform
# + [markdown] id="UVQEpnoYicCI"
# **Exercise 28: Sample three values from bernoulli distribution using `key_5` and assign it to `sample_bernoulli`**
# + id="vHuc2R5BicCI" outputId="f998ada5-b5ba-491f-f228-b53a12a65437" colab={"base_uri": "https://localhost:8080/"}
sample_bernoulli = jax.random.bernoulli(key_5, shape=(3,))
sample_bernoulli
# + [markdown] id="aBBSXdYaicCI"
# **Exercise 29: Sample a 2x3 matrix from poisson distribution with λ=100 using `key_6` and assign it to `sample_poisson`**
# + id="7n3M8R1QicCK" outputId="f751c153-ee11-4612-8fe8-024ec5b8f9ae" colab={"base_uri": "https://localhost:8080/"}
sample_poisson = jax.random.poisson(key_6, lam=100, shape=(2, 3))
sample_poisson
# + [markdown] id="TDm1ORe-icCK"
# **Exercise 30: Sample a 2x3x4 array from normal distribution using `key_7` and assign it to `sample_normal`**
# + id="pv-Qho3KicCL" outputId="42b02be7-59ed-4caa-efbe-be1508a1439e" colab={"base_uri": "https://localhost:8080/"}
sample_normal = jax.random.normal(key_7, shape=(2, 3, 4))
sample_normal
# + [markdown] id="deFVF3HiicCL"
# <center>
# This completes Set 3: Pseudorandom Numbers (Exercises 21-30) of <b>JaxTon</b>: <i>💯 JAX exercises</i>
# <br>
# You can find all the exercises and solutions on <a href="https://github.com/vopani/jaxton#exercises-">GitHub</a>
# </center>
| notebooks/03_pseudorandom_numbers_exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # %matplotlib widget
# # %matplotlib ipympl
# # %matplotlib qt
# +
from mpl_toolkits.mplot3d import Axes3D
import h5py
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# import ipympl
# -
filename = './data/ModelNet40_cloud.h5'
# +
exclude_classes = (0,2) # (6,9) # [x for x in range( 40 )]
filename0 = './data/ModelNet38_cloud.h5'
filename1 = './data/ModelNet02_cloud.h5'
# -
h5py_data = h5py.File( filename, 'r' )
# +
train_data = np.array( h5py_data[ 'tr_cloud' ] )
train_lbls = np.array( h5py_data[ 'tr_labels' ] )
test_data = np.array( h5py_data[ 'test_cloud' ] )
test_lbls = np.array( h5py_data[ 'test_labels' ] )
# +
print( "Train data shape is {}.".format( np.shape( train_data ) ) )
print( "Train labels shape is {}.".format( np.shape( train_lbls ) ) )
print( "Test data shape is {}.".format( np.shape( test_data ) ) )
print( "Test labels shape is {}.".format( np.shape( test_lbls ) ) )
# -
test_flag = False
train_flag = False
for cc in exclude_classes:
test_flag = np.logical_or( test_flag, cc==test_lbls )
train_flag = np.logical_or( train_flag, cc==train_lbls )
# +
flag = np.logical_not( train_flag )
train_data0 = train_data[flag,:,:]
train_lbls0 = train_lbls[flag]
flag = np.logical_not( test_flag )
test_data0 = test_data[flag,:,:]
test_lbls0 = test_lbls[flag]
with h5py.File( filename0, 'w' ) as hf:
hf.create_dataset( 'tr_cloud', data=train_data0 )
hf.create_dataset( 'tr_labels', data=train_lbls0 )
hf.create_dataset( 'test_cloud', data=test_data0 )
hf.create_dataset( 'test_labels', data=test_lbls0 )
# -
def update_class( lbls, exclude_classes ):
for ii, cc in enumerate( exclude_classes ):
flag = ( lbls == cc )
lbls[flag] = np.ones( (np.sum( flag ) ) ) * ii
print( "Replaced {} {}'s with {}.".format( np.sum( flag ), cc, ii ) )
# +
flag = train_flag
train_data1 = train_data[flag,:,:]
train_lbls1 = train_lbls[flag]
update_class( train_lbls1, exclude_classes )
flag = test_flag
test_data1 = test_data[flag,:,:]
test_lbls1 = test_lbls[flag]
update_class( test_lbls1, exclude_classes )
with h5py.File( filename1, 'w' ) as hf:
hf.create_dataset( 'tr_cloud', data=train_data1 )
hf.create_dataset( 'tr_labels', data=train_lbls1 )
hf.create_dataset( 'test_cloud', data=test_data1 )
hf.create_dataset( 'test_labels', data=test_lbls1 )
# +
tmp = train_data1[-1]
fig = plt.figure()
ax = fig.add_subplot( 111, projection='3d' )
ax.scatter( tmp[:,0], tmp[:,1], tmp[:,2] )
ax.tick_params( axis='both', which='major', labelsize=12 )
ax.set_xlabel( 'x', fontsize="large" )
ax.set_ylabel( 'y', fontsize="large" )
ax.set_zlabel( 'z', fontsize="large" )
ax.set_title( "Example Model\nBoundary Points (10,000)", fontsize="large" )
# fig.savefig( '../images/bed_10000.png', format='png', dpi=216 )
# +
fig = plt.figure()
ax = fig.add_subplot( 111, projection='3d' )
ax.scatter( tmp[::100,0], tmp[::100,1], tmp[::100,2] )
ax.tick_params( axis='both', which='major', labelsize=12 )
ax.set_xlabel( 'x', fontsize="large" )
ax.set_ylabel( 'y', fontsize="large" )
ax.set_zlabel( 'z', fontsize="large" )
ax.set_title( "Example Model\nBoundary Points (100)", fontsize="large" )
# fig.savefig( '../images/bed_100.png', format='png', dpi=216 )
# -
class_names = [
"airplane", "bathtub", "bed", "bench", "bookshelf", "bottle", "bowl", "car", "chair", "cone",
"cup", "curtain", "desk", "door", "dresser", "flower_pot", "glass_box", "guitar", "keyboard", "lamp",
"laptop", "mantel", "monitor", "night_stand", "person", "piano", "plant", "radio", "range_hood", "sink",
"sofa", "stairs", "stool", "table", "tent", "toilet", "tv_stand", "vase", "wardrobe", "xbox"
]
def hide_axes( ax ):
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.zaxis.set_ticks([])
# +
_, ind = np.unique( train_lbls, return_index=True )
fig = plt.figure( figsize=(20,16) )
C = 8
R = 5
for aa in range( R * C ):
ax = fig.add_subplot( R, C, aa+1, projection='3d' )
data = train_data[ind[aa]]
ax.scatter( data[:,0], data[:,1], data[:,2] )
hide_axes( ax )
ax.set_title( class_names[aa], fontsize='large' )
fig.savefig( '../images/modelnet40.png', format='png', dpi=216 )
# -
| extract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <p><font size="6"><b>Rasterio</b></font></p>
#
#
# > *DS Python for GIS and Geoscience*
# > *October, 2021*
# >
# > *© 2021, <NAME> and <NAME>. Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
#
# ---
# > __In this notebook, the tupical workflow when doing operations with Rasterio to handle spatial data is explained .__
#
# Both when working with Numpy as in xarray, [Rasterio](https://rasterio.readthedocs.io/en/latest/) is the interface to read the data from disk.
#
# Whereas Numpy is a powerful package for calculation, it does not provide any spatial information so lacking support for reprojection, warping,... Apart from reading in a broad set of GIS raster formats, Rasterio also provides some of these GIS raster operations.
# +
import numpy as np
import matplotlib.pyplot as plt
import geopandas
import rasterio
from rasterio.plot import plotting_extent, show, show_hist, reshape_as_image
# -
# Let's recap with what we did so far with Rasterio:
data_file = "./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff"
with rasterio.open(data_file) as src:
herstappe_data = src.read([1, 2, 3], out_dtype=float, masked=False) # Note the float as output
herstappe_profile = src.profile
herstappe_ext = plotting_extent(src)
# + jupyter={"outputs_hidden": false}
herstappe_data.shape, herstappe_data.dtype
# -
# ## Convenient plot functions for data exploration
# Rasterio provides dedicated plot functions `show` and `show_hist` for quick ddata exploration:
# + jupyter={"outputs_hidden": false}
with rasterio.open(data_file) as src:
# convenience plot functions from rasterio
fig, (axr, axg, axb) = plt.subplots(1,3, figsize=(20, 10))
show((src, 1), ax=axr, cmap='Reds', title='Red channel')
show((src, 2), ax=axg, cmap='Greens', title='Green channel')
show((src, 3), ax=axb, cmap='Blues', title='Blue channel')
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
show_hist(src, bins=50, lw=0.0, stacked=False,
alpha=0.3, histtype='stepfilled', title="Histogram",
label=[1, 2, 3])
# -
# __Note:__ Rasterio directly interacts with Matplotlib objects, so adjustments starting from the figure can be done using the Matplotlib syntax. For example, change the title and the legend labels:
# + jupyter={"outputs_hidden": false}
_, current_labels = ax.get_legend_handles_labels()
current_labels
# + jupyter={"outputs_hidden": false}
ax.set_title("Histogram of RGB bands of Herstappe")
ax.legend(["Blue channel", "Green channel", "Blue channel"])
fig
# -
# __Reminder on opening and Closing File Connections__
#
# The Rasterio library is efficient as it establishes a connection with the raster file rather than directly reading it into memory. Because it creates a connection, it is important that you close the connection after it is opened AND after you’ve finished working with the data! That is why we use the context manager to work with raster data in rasterio. This will handle opening and closing the raster file for you.
# __Utility functions for reshaping__
# +
#plt.imshow(herstappe_data)
# -
# Python image processing software packages and Matplotlib organize arrays differently than Rasterio. The interpretation of a 3-dimension array read from rasterio is:
#
# `(bands, rows, columns)`
#
# while Matplotlib and image processing software like scikit-image, pillow are generally ordered:
#
# `(rows, columns, bands)`
#
# We can rely on Numpy to transpose the data :
# + jupyter={"outputs_hidden": false}
plt.imshow(herstappe_data.transpose(1, 2, 0))
# -
# But Rasterio also provides the utility functions `reshape_as_image` and `reshape_as_raster` to support the conversion:
from rasterio.plot import reshape_as_image, reshape_as_raster
# + jupyter={"outputs_hidden": false}
plt.imshow(reshape_as_image(herstappe_data))
# -
# ## Extract the data you need
# In many applications, a specific research area is used. Extracting the data you need from a given raster data set by a vector (polygon) file is a common operation in GIS analysis. We use the clipping example to explain the typical workflow in Rasterio.
#
# For our Herstappe example, the study area is available as vector data `./data/herstappe/vector/herstappe.geojson`:
# + jupyter={"outputs_hidden": false}
herstappe_vect = geopandas.read_file("./data/herstappe/vector/herstappe.geojson")
herstappe_vect
# + jupyter={"outputs_hidden": false}
herstappe_vect.plot()
# +
# herstappe_vect.crs # vs herstappe_profile["crs"]
# -
# Make sure both data sets are defined in the same CRS and extracting the geometry can be used as input for the masking:
herstappe_vect = herstappe_vect.to_crs(epsg=3857)
# [Masking a raster](https://rasterio.readthedocs.io/en/latest/quickstart.html#dataset-georeferencing) with a geometry in Rasterio is supported by the `rasterio.mask` module:
import rasterio.mask
# +
# #?rasterio.mask.mask
# -
# The `mask` function by default just masks the image, but it can also be used to `crop` the data by setting the `crop` argument to True:
# As the required input for the `shape` to mask with, the documentation defines:
#
# The values must be a GeoJSON-like dict or an object that implements
# the Python geo interface protocol (such as a Shapely Polygon).
#
# We learned earlier that GeoPandas relies on Shapely as spatial entities:
# + jupyter={"outputs_hidden": false}
type(herstappe_vect.geometry), type(herstappe_vect.geometry[0])
# -
# We can do the data extraction (masking) within the context manager:
data_file = "./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff"
with rasterio.open(data_file) as src:
out_image, out_transform = rasterio.mask.mask(src, herstappe_vect.geometry, crop=True)
herstappe_profile = src.profile
herstappe_ext = plotting_extent(src)
# + jupyter={"outputs_hidden": false}
type(out_image)
# -
# We can work with `out_image` as a numpy array, but we are lacking the spatial information by doing so, which makes it hard to combine it with other spatial data sets for further analysis:
# + jupyter={"outputs_hidden": false}
plt.imshow(reshape_as_image(out_image)) # extent ?!? extent=herstappe_ext would not be correct.
# -
# Remember the `meta` and the `profile` attributes of the Rasterio object? The latter contains the spatial information and the settings for the GeoTiff file:
# + jupyter={"outputs_hidden": false}
herstappe_profile
# + jupyter={"outputs_hidden": false}
out_image.shape
# -
# To contain the spatial information or save the file for later usage keeping the spatial information, make sure to __update the profile metadata__ according to the transformations you did. In this case:
#
# - width/height of the cropped image
# - affine transform function
herstappe_profile.update({"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform})
# + jupyter={"outputs_hidden": false}
herstappe_profile
# -
# Just as before, we can use the `plotting_extent` function to extract the extent of our data, using the outputs of the masking:
new_extent = plotting_extent(out_image[0], out_transform) # see docstring of plotting_extent on [0] requirement
# + jupyter={"outputs_hidden": false}
plt.imshow(reshape_as_image(out_image), extent=new_extent)
# -
# The updated profile information also allows us to store the output as a Geotiff file:
with rasterio.open("./test.tiff", "w", **herstappe_profile) as dest:
dest.write(out_image)
# Let's read the data we just saved to a file:
# + jupyter={"outputs_hidden": false}
with rasterio.open("./test.tiff") as clipped:
fig, ax = plt.subplots(figsize=(12, 5))
show(clipped, (1, 2, 3), cmap='Greys')
clipped_ext = plotting_extent(clipped)
clipped_array = clipped.read([1, 2, 3],
out_dtype=float, masked=False)
# -
# Quick check if the extent in both situations are the same:
# + jupyter={"outputs_hidden": false}
new_extent == clipped_ext
# -
# ## Rasterio workflow
# The workflow applied to clip the data set is typical when working with Rasterio. It consists of the following main steps:
#
# ```
# # 1
# with rasterio.open(data_file) as src:
#
# # 2
# out_image, out_transform = rasterio.mask.mask(src,
# herstappe_vect.geometry, crop=True)
#
# # 3
# herstappe_profile = src.profile
# herstappe_profile.update({"height": out_image.shape[1],
# "width": out_image.shape[2],
# "transform": out_transform})
# # 4
# with rasterio.open("./test.tiff", "w", **herstappe_profile) as dest:
# dest.write(out_image)
# ```
#
# - 1. Read in a data set using the context manager
# - 2. Read and transform the data set by clipping, resampling,...
# - 3. Update the spatial metadata/profile of the data set
# - 4. Save the new data set with the updated metadata/profile
# Similar operations are:
#
# - Reprojecting a data set, see https://rasterio.readthedocs.io/en/latest/topics/reproject.html
# - Resampling a data set, see https://rasterio.readthedocs.io/en/latest/topics/resampling.html
# - Create a raster mosaic, see https://rasterio.readthedocs.io/en/latest/api/rasterio.merge.html and https://automating-gis-processes.github.io/CSC/notebooks/L5/raster-mosaic.html
# Let's apply this workflow to resample our data set with a factor 2, i.e. doubling the pixel size:
data_file = "./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff"
# +
import rasterio
from rasterio.enums import Resampling
scaling_factor = 2
with rasterio.open(data_file) as src: # 1
# resample data to target shape # 2
new_width = int(src.width / scaling_factor)
new_height = int(src.height / scaling_factor)
out_image = src.read(
out_shape=(src.count, new_height, new_width),
resampling=Resampling.bilinear
)
# scale image transform and update metadata # 3
data_profile = src.profile
out_transform = src.transform * src.transform.scale(scaling_factor)
data_profile.update({"height": new_height,
"width": new_width,
"transform": out_transform})
# save the output to disk # 4
with rasterio.open("./test_resample.tiff", "w", **data_profile) as dst:
dst.write(out_image)
# -
# __Note:__ Other resampling algorithms are available in the `Resampling` module of rasterio.
# +
# #!gdalinfo ./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff
# #!gdalinfo ./test_resample.tiff
# -
# <div class="alert alert-info" style="font-size:120%">
#
# **REMEMBER**: <br>
#
# Many of the typical GIS operations provided by Rasterio have a common workflow. In order to preserve the spatial information, make sure to update the attributes such as transform, width and height accordingly before saving the file.
#
# </div>
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# You received remote sensing data from Ghent. You're wondering if the remote sensing image covers the entire administrative area of Ghent and decide to make a plot to make the comparison. The vector file of Ghent is available as well, see `./data/herstappe/vector/herstappe.geojson`.
#
# Make a plot of `gent_data` (band 4 `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff`) together with `gent_vect` (read from `./data/herstappe/vector/herstappe.geojson`) using Matplotlib.
#
#
# <details><summary>Hints</summary>
#
# * Make sure to align the CRS of both the vector as the raster data set.
# * Remember the `extent` trick to use the coordinates as Matplotlib axis?
# * The Matplotlib `plot` function provides `color`, `edgecolor`,... to adjust the style of the plot.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio1.py
# + tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio2.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio3.py
# -
# <div class="alert alert-success">
#
# **EXERCISE**:
#
# The files `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff` and `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B08.tiff` are respectively band 4 and 8. Combine them in a single Geotiff file:
#
# - Read in each of the data sets with rasterio
# - Stack the two data sets with Numpy
# - Write the resulting `(2, 317, 625)` array to a new geotiff file `./B0408.tiff` with data type `uint32`
#
#
# <details><summary>Hints</summary>
#
# * The workflow is similar to before: you want to read, update the metadata/profile and write the output
# * Stacking arrays in Numpy is done by `np.vstack`, converting the dtype by `astype(...)`
# * The metadata/profile to update is not the transform, neither the widht/height. Check the `count` and the `dtype` keys of the profile
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio4.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio5.py
# + tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio6.py
# -
# <div class="alert alert-success">
#
# **ADVANCED EXERCISE**:
#
# You received multiple remote sensing layers of Ghent that need to be resampled. Instead of copy pasting the same code over and over again, you decide to write your own function to resample a file with Rasterio. The inputs of your function are:
#
# - input_file : reference to a file on disk
# - output_file : where t write the output
# - resampling_method : method from rasterio.Resampling to use when resampling
# - scaling_factor : factor to which the resampling should be applied, default is 2
#
# Apply the function on each of the Geotiff files in the folder `./data/gent/raster` (make sure to not overwrite the original files!)
#
# <details><summary>Hints</summary>
#
# * Start from the code from the example
# * Try to make the function as reusable as possible
# * Make sure to document your own function using [numpy docstring](https://numpydoc.readthedocs.io/en/latest/format.html)
# * The [pathlib](https://docs.python.org/3/library/pathlib.html) module is very powerful for working with file systems. Use the `glob` method to find all files with a given extension.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio7.py
# + tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio8.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/91_package_rasterio9.py
# -
# <div class="alert alert-info" style="font-size:120%">
#
# **REMEMBER**: <br>
#
# When you need to do a particular operation a lot on different files, convert the functionality to a function that you can reuse!
#
# If you are familiar to the command line, using the power of [GDAL](https://gdal.org/index.html) is a valid alternative to programming these conversions with Python/Rasterio. Have a look at [gdalwarp](https://gdal.org/programs/gdalwarp.html) in particular. If interested, the [Using GDAL for preprocessing](https://ocw.un-ihe.org/course/view.php?id=11§ion=3) course provides a good introduction to GDAL.
#
# </div>
# ## BONUS: only download what you need
#
# Rasterio only reads the data from disk that is requested to overcome loading entire data sets into memory. The same applies to downloading data, overcoming entire downloads when only a fraction is required (when the online resource supports this). An example is https://zenodo.org/record/2654620, which is available as [Cloud Optimized Geotiff (COG)](https://www.cogeo.org/). Also cloud provides (AWS, google,...) do support COG files, e.g. [Landstat images](https://docs.opendata.aws/landsat-pds/readme.html).
#
# These files are typically very large to download, whereas we might only need a small subset of the data. COG files do support to downloading a subset of the data you need using the masking approach.
#
# Let's use the Averbode nature reserve data as an example, available at the URL: http://s3-eu-west-1.amazonaws.com/lw-remote-sensing/cogeo/20160401_ABH_1_Ortho.tif
averbode_cog_rgb = 'http://s3-eu-west-1.amazonaws.com/lw-remote-sensing/cogeo/20160401_ABH_1_Ortho.tif'
# Check the metadata, without downloading the data itself:
# + jupyter={"outputs_hidden": false}
with rasterio.open(averbode_cog_rgb) as averbode:
print(averbode.count, averbode.width, averbode.height)
print(averbode.meta)
averbode_extent = plotting_extent(averbode)
# -
# Downloading the entire data set would be 37645*35405 pixels of 8 bit, so more or less 1.3 GByte
# + jupyter={"outputs_hidden": false}
37645*35405 / 1e9 # Gb
# -
# Assume that we have a study area which is much smaller than the total extent of the available image:
left, right, bottom, top = averbode_extent
# + jupyter={"outputs_hidden": false}
averbode_study_area = geopandas.read_file("./data/averbode/study_area.geojson")
ax = averbode_study_area.plot();
ax.set_xlim(left, right);
ax.set_ylim(bottom, top);
# -
# In the case of COG data, the data can sometimes be requested on different resolution levels when stored as such. So, to get a very broad overview of the data, we can request the coarsest resolution by resampling and download the data at the resampled resolution:
# + jupyter={"outputs_hidden": false}
with rasterio.open(averbode_cog_rgb) as src:
print(f"Available resolutions are {src.overviews(1)}")
oview = src.overviews(1)[-1] # list of overviews, selecting the latest (most coarse)
print(f"Resampling factor= {oview}")
# Download a resampled version of the data (http://rasterio.readthedocs.io/en/latest/topics/resampling.html), aka thumbnail
thumbnail = src.read(1, out_shape=(1, int(src.height // oview), int(src.width // oview)))
# -
# Compare the thumbnail version of the data with our study area:
# + jupyter={"outputs_hidden": false}
fig, ax = plt.subplots()
ax.imshow(thumbnail, extent=averbode_extent);
averbode_study_area.plot(ax=ax, color='None', edgecolor='red', linewidth=2);
# -
# Downloading the entire data file would be overkill. Instead, we only want to download the data of the study area:
output_file = "./averbode_orthophoto.tiff"
# The resulting data set will still be around 120MB and will take a bit of time, but this is only a fraction of the original data file (GBs):
# Only run this cell when sufficient band width ;-)
with rasterio.open(averbode_cog_rgb) as averbode_rgb:
averbode_rgb_image, averbode_rgb_transform = rasterio.mask.mask(averbode_rgb, averbode_study_area.geometry, crop=True)
averbode_rgb_profile = averbode_rgb.profile
averbode_rgb_profile.update({"driver": "GTiff",
"height": averbode_rgb_image.shape[1],
"width": averbode_rgb_image.shape[2],
"transform": averbode_rgb_transform
})
with rasterio.open(output_file, "w", **averbode_rgb_profile) as dest:
dest.write(averbode_rgb_image)
# Thanks to https://geohackweek.github.io/raster/04-workingwithrasters/ for the inspiration
| notebooks/91_package_rasterio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Machine Learning</font>
# ## Regressão Linear com Scikit-Learn
# ## Definindo o Problema de Negócio
#
# Nosso objetivo é construir um modelo de Machine Learning que seja capaz de fazer previsões sobre a taxa média de ocupação de casas na região de Boston, EUA, por proprietários. A variável a ser prevista é um valor numérico que representa a mediana da taxa de ocupação das casas em Boston. Para cada casa temos diversas variáveis explanatórias. Sendo assim, podemos resolver este problema empregando Regressão Linear Simples ou Múltipla.
# ## Definindo o Dataset
#
# Usaremos o Boston Housing Dataset, que é um conjunto de dados que tem a taxa média de ocupação das casas, juntamente com outras 13 variáveis que podem estar relacionadas aos preços das casas. Esses são os fatores como condições socioeconômicas, condições ambientais, instalações educacionais e alguns outros fatores semelhantes. Existem 506 observações nos dados para 14 variáveis. Existem 12 variáveis numéricas em nosso conjunto de dados e 1 variável categórica. O objetivo deste projeto é construir um modelo de regressão linear para estimar a taxa média de ocupação das casas pelos proprietários em Boston.
# Dataset: https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
# 1. CRIM: per capita crime rate by town
# 2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
# 3. INDUS: proportion of non-retail business acres per town
# 4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
# 5. NOX: nitric oxides concentration (parts per 10 million)
# 6. RM: average number of rooms per dwelling
# 7. AGE: proportion of owner-occupied units built prior to 1940
# 8. DIS: weighted distances to five Boston employment centres
# 9. RAD: index of accessibility to radial highways
# 10. TAX: full-value property-tax rate per 10,000
# 11. PTRATIO: pupil-teacher ratio by town
# 12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
# 13. LSTAT: % lower status of the population
# 14. TARGET: Median value of owner-occupied homes in $1000's
# +
# Carregando o Dataset Boston Houses
from sklearn.datasets import load_boston
boston = load_boston()
# Carregando Bibliotecas Python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
# -
# ## Análise Exploratória
# Convertendo o dataset em um dataframe com Pandas
dataset = pd.DataFrame(boston.data, columns = boston.feature_names)
dataset['target'] = boston.target
dataset.head()
# Calculando a média da variável de resposta
valor_medio_esperado_na_previsao = dataset['target'].mean()
valor_medio_esperado_na_previsao
# Imprimindo o desvio padrão via NumPy da variável RM
np.std(dataset['RM'])
# +
#Correlação entre as variáveis com pearsonr do SciPy
from scipy.stats.stats import pearsonr
pearsonr(dataset['RM'], dataset['target'])[0]
# +
# Definindo o range dos valores de x e y
x_range = [dataset['RM'].min(),dataset['RM'].max()]
y_range = [dataset['target'].min(),dataset['target'].max()]
# Plot dos valores de x e y com a média
scatter_plot = dataset.plot(kind = 'scatter', x = 'RM', y = 'target', xlim = x_range, ylim = y_range)
# Cálculo da média
meanY = scatter_plot.plot(x_range, [dataset['target'].mean(),dataset['target'].mean()], '--', color = 'red', linewidth = 1)
meanX = scatter_plot.plot([dataset['RM'].mean(), dataset['RM'].mean()], y_range, '--', color = 'red', linewidth = 1)
# -
# ## Machine Learning
# Importando as funções
from sklearn import linear_model
# Cria o objeto
modelo_v2 = linear_model.LinearRegression(normalize = False, fit_intercept = True)
# Define os valores de x e y
num_observ = len(dataset)
X = dataset['RM'].values.reshape((num_observ, 1)) # X deve sempre ser uma matriz e nunca um vetor
y = dataset['target'].values # y pode ser um vetor
# Número de dimensões de X (matriz)
np.ndim(X)
# Número de dimensões de y (vetor)
np.ndim(y)
# Treinamento do modelo - fit()
modelo_v2.fit(X,y)
# Imprime os coeficientes
print (modelo_v2.coef_)
print (modelo_v2.intercept_)
# Imprime as previsões
predicts = modelo_v2.predict(X)
print (predicts[0:10])
# +
# Range de valores para x e y
x_range = [dataset['RM'].min(), dataset['RM'].max()]
y_range = [dataset['target'].min(), dataset['target'].max()]
# Primeira camada do Scatter Plot
scatter_plot = dataset.plot(kind = 'scatter', x = 'RM', y = 'target', xlim = x_range, ylim = y_range)
# Segunda camada do Scatter Plot (médias)
meanY = scatter_plot.plot(x_range, [dataset['target'].mean(),dataset['target'].mean()], '--', color = 'red', linewidth = 1)
meanX = scatter_plot.plot([dataset['RM'].mean(),dataset['RM'].mean()], y_range, '--', color = 'red', linewidth = 1)
# Terceira camada do Scatter Plot (linha de regressão)
regression_line = scatter_plot.plot(dataset['RM'], predicts, '-', color = 'orange', linewidth = 2)
# -
# Fazendo previsões com o modelo treinado
RM = 5
# Xp = np.array(RM)
Xp = np.array(RM).reshape(-1, 1)
print ("Se RM = %01.f nosso modelo prevê que a mediana da taxa de ocupação é %0.1f" % (RM, modelo_v2.predict(Xp)))
| Regressao_Linear_Simples_Scikit-Learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Deep Learning Tutorial for NLP with Tensorflow
# This tutorial borrows from here and tries to show how to work with NLP tasks using Tensorflow. Borrowed material from [here]([here](https://github.com/rguthrie3/DeepLearningForNLPInPytorch/blob/master/Deep%20Learning%20for%20Natural%20Language%20Processing%20with%20Pytorch.ipynb)
# ---
# +
import tensorflow as tf
import os
import numpy as np
tf.set_random_seed(1) # seed to obtain similar outputs
os.environ['CUDA_VISIBLE_DEVICES'] = '' # avoids using GPU for this session
# -
# ### 1. Introduction of Tensors in Tensorflow
# How to create and handle tensors, which are the building blocks for any deep learning architecture you wish to implement. You can find more information about tensors and why they are used for deep learning modeling here [LINK].
# #### Creating Tensors
# +
# start session
sess = tf.Session()
#initialize_op = tf.global_variables_initializer()
#sess.run(initialize_op)
# 1D tensor (also known as a 1D vector)
V_data = [1., 2., 3.]
V = tf.constant(V_data, tf.float32)
print(V, "\n",sess.run(V))
# 2D tensor (also known as a 2D matrix)
M_data = [[1.,2.,3.],[4.,5.,6.]]
M = tf.constant(M_data, tf.float32)
print(M,"\n", sess.run(M))
# 3D tensor of size 2x2x2
T_data = [[[1.,2., 3.], [4.,5., 6.]],
[[7.,8., 9.], [10.,11., 12]]]
T = tf.constant(T_data, tf.float32)
print(T, "\n", sess.run(T))
# -
# We can also operate on tensors the same way we would on standard numpy matrices. The following show some examples.
# +
# Index into V and produce a scalar
print(V, "\n",sess.run(V[0]))
# Index into M and produce a scalar
print(M, "\n",sess.run(M[0]))
# Index into T and produce a matrix
print(T, "\n",sess.run(T[0]))
# -
# We can also create a tensor with random data with a specified dimension
x = tf.random_normal([3,4,5])
print(x, "\n", sess.run(x))
# #### Operations with Tensors
x = tf.constant([1.,2.,3.])
y = tf.constant([4.,5.,6.])
z = x+y
print(sess.run(z))
# #### Concatenation
# +
# concatenate by rows (axis=0)
x_1 = tf.random_normal([2,5])
y_1 = tf.random_normal([3,5])
z_1 = tf.concat([x_1, y_1], 0)
print(sess.run(z_1))
print("\n")
# concatenate by columns (axis=1)
x_2 = tf.random_normal([2,3])
y_2 = tf.random_normal([2,5])
z_2 = tf.concat([x_2, y_2], 1)
print(sess.run(z_2))
# -
# More on concatenation [here](https://www.tensorflow.org/api_docs/python/tf/concat).
# #### Reshaping tensors
x = tf.random_normal([2,3,4])
print(sess.run(x))
print("\n")
print(sess.run(tf.reshape(x,[2,12])))
print("\n")
print(sess.run(tf.reshape(x,[2,-1]))) # -1 enable automatic infer of dimension
# Noticed that everything time we do sess.run(x) the values of the tensor keeps changing. We can fix this as follows:
x = tf.random_normal([2,3,4])
x_result = sess.run(x) # this makes the results stable
print(x_result)
print("\n")
print(sess.run(tf.reshape(x_result,[2,-1])))
# ### 2. Computation Graphs and Automatic Differentiation
# slightly different from pytorch but still the same effect applies
x = tf.random_normal([2,2])
y = tf.random_normal([2,2])
z = x+y
print(sess.run(z))
# we specify a session to run the evaluation
with sess.as_default():
var_x = tf.Variable(x, name="X")
var_y = tf.Variable(y, name="Y")
var_z = var_x + var_y
var_x.initializer.run()
var_y.initializer.run()
var_z.eval()
print()
tf.gradients(var_z, var_x) # compute gradients of var_z with regards to var_x
# ### 3. Deep Learning Building Blocks: Affine maps, non-linearities and objectives
# The affine map is a function $f(x)$ where
# $$ f(x) = Wx + b $$
#
# where $W$ refers to a weight matrix and vectors $x,b$ are input and bias, respectively.
# +
# We do this slightly different in tensorflow using matmul operation
X = tf.Variable(tf.random_normal([128,20]), name="X")
W = tf.Variable(tf.random_normal([20,30]), name="W")
b = tf.Variable(tf.random_normal([30]), name="b")
tf.matmul(X, W) + b
# -
# #### Non-linearities
# Examples: $tanh(x)$, $\sigma(x)$, and $ReLU(x)$
data = tf.Variable(tf.random_normal([2,2]), name="data")
data.initializer.run(session=sess)
print(sess.run(data))
print(sess.run(tf.nn.relu(data)))
# #### Softmax and Probabilities
# We are computing for $softmax(x)$ where the $i^{th}$ component of $softmax(x)$ is:
# $$ \frac{\exp(x_i)}{\sum_j \exp(x_j)} $$
data = tf.Variable(tf.random_normal([2]), name="data")
data.initializer.run(session=sess)
print(sess.run(data))
print(sess.run(tf.nn.softmax(data)))
print(sess.run(tf.reduce_sum(tf.nn.softmax(data))))
print(sess.run(tf.log(tf.nn.softmax(data))))
# ### 4. Optimization and Training
# We compute for gradients that are used to update our weights so as to minimize the loss function as the objective:
#
# $$ \theta^{(t+1)} = \theta^{(t)} - \eta \nabla_\theta L(\theta) $$
# ### 5. Creating Network Components in Tensorflow
# #### Logistic regression on BOW classifier
# Given a BOW vector representation $x$ of a sentence $s$. The output of our network is computed as follows:
# $$log(softmax(Wx+b))$$
# where
# $$\forall i\in \{|s|\}: x_i = count(s_i)$$ and $|s|$ is the number of words in a given sentence
# +
data = [ ("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH") ]
test_data = [ ("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# defining the training set labels or targets
label_to_ix = { "SPANISH": [1,0], "ENGLISH": [0,1] }
# word_to_ix maps each word in the vocab to a unique integer, which will be its
# index into the Bag of words vector
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print (word_to_ix)
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
# -
class BOWClassifier(object):
def __init__(self, num_labels, vocab_size):
# input
self.X = tf.placeholder(tf.float32, [None, vocab_size])
self.Y = tf.placeholder(tf.float32, [None, num_labels])
# weights and bias
self.W = tf.Variable(tf.random_uniform([vocab_size, num_labels]))
self.b = tf.Variable(tf.random_uniform([num_labels]))
# log probabilties of output
self.logits = tf.matmul(self.X, self.W) + self.b
self.log_probs = tf.log(tf.nn.softmax(self.logits))
# loss
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y))
# +
def make_bow_vector(sentence, word_to_ix):
vec = np.zeros(len(word_to_ix)) # need to find what is the way to assign to tf tensors
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.reshape(1, -1)
def make_target(label, label_to_ix):
# need to binarize the labels => [0,1] or [1,0]
return np.array([label_to_ix[label]])
# -
# My observation is that with Pytorch it is easy to create variables and assign values to them. With tensorflow this is not so easy, therefore I preferred to use numpy to assist with the make_bow_vector function.
model = BOWClassifier(NUM_LABELS, VOCAB_SIZE)
# parameters can be obtained directly
model.W
# Let's just obtain the log probabilities of the output
with tf.Session() as sess:
# initialize and run all variables so that we can use their values directly
init = tf.global_variables_initializer()
sess.run(init)
# prepare bow vector
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = sess.run(model.log_probs, feed_dict = {model.X: bow_vector})
print(log_probs)
# Now let's print the matrix parameters corresponding to as specific word such as "creo"
with tf.Session() as sess:
# initialize and run all variables so that we can use their values directly
init = tf.global_variables_initializer()
sess.run(init)
for instance, label in test_data:
bow_vector = make_bow_vector(instance, word_to_ix)
params, log_probs = sess.run([model.W, model.log_probs], feed_dict = {model.X: bow_vector})
print(log_probs)
print(params.T[:,word_to_ix["creo"]])
# Here we train the model and use optimization to minimize lost.
with tf.Session() as sess:
# training procedure
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(model.loss)
# initialize and run all variables so that we can use their values directly
init = tf.global_variables_initializer()
sess.run(init)
# train cycle with training data
for epoch in range(15):
for instance, label in data:
bow_vector = make_bow_vector(instance, word_to_ix)
target = make_target(label, label_to_ix)
_, loss, params, log_probs = sess.run([train_op, model.loss, model.W, model.log_probs],
feed_dict = {model.X: bow_vector, model.Y:target})
print(loss)
print("Optimization finished!!!")
# test with testing data
for instance, label in test_data:
bow_vec = make_bow_vector(instance, word_to_ix)
params, log_probs = sess.run([model.W, model.log_probs],
feed_dict={model.X: bow_vec})
print(log_probs)
print(params.T[:,word_to_ix["creo"]])
# We can observe that the log probability for Spanish is much higher in the first test sample, while the log probability for English is much higher in the second example, as it corresponds the testing data.
print(params.T[:,word_to_ix["gusta"]])
# ### 6. Word Embeddings: Enconding Lexical Semantics
# We aim to build dense representations of a vocabulary to obtain semantic similarity between words, which helps to support the distributional hypothesis (words appearing in similar contexts are related to each other semantically).
#
# Each vector representation of a word contains some semantics attributes that determine similar words through some similarity measure like cosine similarity.
#
# These latent semantic attributes (features) are determined or learned automatically by a neural network. In other words, the parameters of the model are the word embeddings, which are learned during training. The attributes learned cannot be interpreted since they are learned by the neural network during the training.
#
# "In summary, word embeddings are a representation of the semantics of a word, efficiently encoding semantic information that might be relevant to the task at hand. You can embed other things too: part of speech tags, parse trees, anything! The idea of feature embeddings is central to the field."
# ### Word Embeddings in Tensorflow
# First, we build a lookup table which keeps indexes. The results is a $|V|\times D$ matrix, where $|V|$ is the size of the vocabulary and $D$ is the dimensionality of the embeddings. This means that a word with index $i$ has its embedding stored in the $i$th row of the matrix.
# +
sess = tf.Session()
word_to_ix = { "hello": 0, "world": 1 }
word_ids = [0,1]
embeds = tf.Variable(tf.random_uniform([2,5]), name="word_embeddings")
embedded_word_ids = tf.nn.embedding_lookup(embeds, word_ids)
#print(sess.run(embedded_word_ids))
# -
# ---
# ### Example: N-gram Language Modeling
# In this model we aim to predict target word from word sequence; i.e., we aim to compute:
# $$ P(w_i | w_{i-1}, w_{i-2}, \dots, w_{i-n+1} ) $$
# , where $w_i$ is the ith word of the sequence.
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [ ([test_sentence[i], test_sentence[i+1]], test_sentence[i+2]) for i in range(len(test_sentence) - 2) ]
print (trigrams[:3]) # print the first 3, just so you can see what they look like
vocab = set(test_sentence)
word_to_ix = { word: i for i, word in enumerate(vocab) }
class NGramLanguageModeler(object):
def __init__(self, vocab_size, embedding_dim, context_size):
# inputs and outputs
self.X = tf.placeholder(tf.int32, [ context_size ]) # context (word sequence)
self.Y = tf.placeholder(tf.int32, [1,1]) # target word
# Embeddings
self.embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_dim], -1.0, 1.0))
self.embed = tf.nn.embedding_lookup(self.embeddings, self.X)
# layer
self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_dim]))
self.b = tf.Variable(tf.random_uniform([vocab_size]))
# loss
self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=self.W,
biases=self.b,
labels=self.Y,
inputs=self.embed,
num_sampled=64,
num_classes=vocab_size))
# +
# create model with class
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
losses = []
with tf.Session() as sess:
# training procedures
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(model.loss)
# initialize and run all variables so that we can use their values directly
init = tf.global_variables_initializer()
sess.run(init)
# train cycle with training data
for epoch in range(10):
total_loss = 0
for context, target in trigrams: # collecting batches
context_idxs = list(map(lambda w: word_to_ix[w], context))
_, embed,loss, params = sess.run([train_op, model.embed, model.loss, model.W],
feed_dict = {model.X:context_idxs,
model.Y:np.array(word_to_ix[target]).reshape(1,1)})
total_loss+=loss
print(total_loss)
# -
# #### TODO: To improve the graph structure by adding namescopes. We can also visualize embeddings using Tensorboard by loggin summaries.
# ### Computing Word Embeddings: Continuous Bag-of-Words
# The Continuous Bag-of-Words model (CBOW) is frequently used in NLP deep learning. It is a model that tries to predict words given the context of a few words before and a few words after the target word. This is distinct from language modeling, since CBOW is not sequential and does not have to be probabilistic. Typcially, CBOW is used to quickly train word embeddings, and these embeddings are used to initialize the embeddings of some more complicated model. Usually, this is referred to as pretraining embeddings. It almost always helps performance a couple of percent.
#
# The CBOW model is as follows. Given a target word $w_i$ and an $N$ context window on each side, $w_{i-1}, \dots, w_{i-N}$ and $w_{i+1}, \dots, w_{i+N}$, referring to all context words collectively as $C$, CBOW tries to minimize,
# $$ -\log p(w_i | C) = \log \text{Softmax}(A(\sum_{w \in C} q_w) + b) $$,
# where $q_w$ is the embedding for word $w$.
CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right
raw_text = """We are about to study the idea of a computational process. Computational processes are abstract
beings that inhabit computers. As they evolve, processes manipulate other abstract
things called data. The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()
word_to_ix = { word: i for i, word in enumerate(set(raw_text)) }
data = []
for i in range(2, len(raw_text) - 2):
context = [ raw_text[i-2], raw_text[i-1], raw_text[i+1], raw_text[i+2] ]
target = raw_text[i]
data.append( (context, target) )
print (data[:5])
# #### Building Model with Tensorflow
class CBOW(object):
def __init__(self, vocab_size, embedding_dim, context_size):
# inputs and outputs
self.X = tf.placeholder(tf.int32, [ context_size * 2 ]) # context (word sequence)
self.Y = tf.placeholder(tf.int32, [1,1]) # target word
# Embeddings
self.embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_dim], -1.0, 1.0))
self.embed = tf.nn.embedding_lookup(self.embeddings, self.X)
# layer
self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_dim]))
self.b = tf.Variable(tf.random_uniform([vocab_size]))
# loss
self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=self.W,
biases=self.b,
labels=self.Y,
inputs=self.embed,
num_sampled=64,
num_classes=vocab_size))
# +
# create model with class
model = CBOW(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
losses = []
with tf.Session() as sess:
# training procedures
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(model.loss)
# initialize and run all variables so that we can use their values directly
init = tf.global_variables_initializer()
sess.run(init)
# train cycle with training data
for epoch in range(10):
total_loss = 0
for context, target in data: # collecting batches
context_idxs = list(map(lambda w: word_to_ix[w], context))
_, embed,loss, params = sess.run([train_op, model.embed, model.loss, model.W],
feed_dict = {model.X:context_idxs,
model.Y:np.array(word_to_ix[target]).reshape(1,1)})
total_loss+=loss
print(total_loss)
# -
# ### References
# - [How to structure your model in Tensorflow](http://web.stanford.edu/class/cs20si/lectures/notes_04.pdf)
# - [Understanding RNNs and LSTMs](https://colah.github.io/posts/2015-08-Understanding-LSTMs/)
# - [Introduction to RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/)
# - [RNN Text Classification](https://github.com/LunaBlack/RNN-Classification)
| 2_Intro_NLP_Tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# In this example we implement simple STA imaging for ultrasound environment. As an example we use raw echo data from sl1543 probe applied to a string phantom (https://brain.fuw.edu.pl/edu/images/e/e3/Rys_fantom_nitkowy.png).
#
#
# In this example we use:
# 1. beamforming op: we use STA (delay-and sum + 1-D interpolation op), which converts input echo response data to a b-mode frame.
# 2. signal envelope detection op: we convert b-mode frame to analytical signal (https://en.wikipedia.org/wiki/Analytic_signal).
# +
import tensorflow as tf
import waveflow as wf
import numpy as np
import pylab
rf = np.load('sta_wire_phantom.npy')
sl1543 = wf.ultrasound.Probe(
name = "sl1543",
num_elements = 192,
subaperture_size= 64,
pitch = .21e-3,
sampling_frequency=50e6,
input_source=wf.io.NumpyArraySource(rf)
)
string_phantom = wf.physics.PhysicalEnv(
speed_of_sound=1490.
)
with wf.ultrasound.UltrasoundEnv(probe=sl1543, physical_env=string_phantom) as us_env:
input = us_env.step()
# Event, Sample, Channel -> Event, Channel, Sample
transposed = tf.transpose(input, perm=[0, 2, 1])
reduced = wf.signal.sta(transposed, output_shape=(256, 128), start_depth=.005, us_env=us_env)
transformed = wf.signal.analytic_signal(tf.cast(reduced, dtype=tf.complex64), axis=0)
frame = tf.abs(transformed)
with tf.Session() as sess:
img = frame.eval(session = sess)
pylab.imshow(img, cmap='gray')
| examples/sta_minimal_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hello Friend
import numpy as np
import scipy as sp
from scipy import stats
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.interpolate import *
import matplotlib.pyplot as plt
import pandas as pd
import datetime, math
import matplotlib.dates as dates
import holoviews as hv
from holoviews import opts
from holoviews import dim
hv.extension('bokeh')
results = pd.read_pickle('/home/jovyan/camhd_floc_model/data_camhd/results_for_dep_5_03.pickle')
results.tail()
# + active=""
# total_floc = [sum(i) for i in results['label_stats']]
# + active=""
# results['total_floc'] = total_floc
# -
x = results['timestamp'].values
y = results['total_floc'].values
date = results['datetime']
type(x[1])
data = pd.DataFrame({'x': x, 'y': y, 'date':date})
data.head()
import hvplot.pandas
data.hvplot.scatter('x', 'y', datashade = True)
ss = data.loc[data['y']<7000]
ss.describe()
xx = ss['x'].values
yy = ss['y'].values
datedate = ss['date']
len(ss)
np.shape(ss)
def func (xx, a1, a2):
return a1 + a2 * np.log10(xx)
popt, pcov = curve_fit(func, xx, yy)
popt
y2 = func(xx, popt[0], popt[1])
data2 = pd.DataFrame({'xx': xx, 'Floc Volume': yy, 'y2': y2, 'Date': datedate})
data2.head()
import hvplot.pandas
import hvplot.pandas
plot1= data2.hvplot.scatter('Date', 'Floc Volume', datashade = True,)
plot2= data2.hvplot.line('Date', 'y2', color = 'red', weight = 2)
plot1*plot2
# +
line = func(xx, popt[0], popt[1])
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
import matplotlib.patches as patches
plt.rc('font', size=11)
fig, ax = plt.subplots()
fig.set_size_inches(18, 6)
fig.frameon = False
hb1 = ax.hexbin(xx, yy, vmin=0, vmax=1.2, bins='log', linewidths=1.35,
gridsize=(90, 30), mincnt=1, cmap=plt.cm.PuBuGn)
fig.colorbar(hb1)
ax.set_ylim([0, 6500])
ax.set_xlim([datetime.date(2018, 7, 1), datetime.date(2019, 2, 10)])
ax.yaxis.grid(True)
ax.xaxis.grid(True)
months = dates.MonthLocator() #Months
monthsFmt = dates.DateFormatter('%m %Y')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
# slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# line = slope*x+intercept
#plt.plot(xx, y2, 'r', linewidth = 3, label='fitted line')
plt.ylabel('Volume of Floc Particles');
plt.savefig('floc_dep05_02.png')
# -
xs = data2['xx']
ys = data2['Floc Volume']
hex_with_values = hv.HexTiles((xs, ys, 0.01))
hex_line = data2.hvplot.line('xx', 'y2')
overlay = (hex_with_values*hex_line)
# +
# hex_with_values.opts(opts.HexTiles(width=500, height=400, tools=['hover'], colorbar=True))
#overlay = hex_with_values*hex_line
# -
overlay
# +
xs, ys = np.random.randn(2, 1000)
hex_with_values = hv.HexTiles((xs, ys, xs*(ys/2.), (xs**2)*ys), vdims=['Values', 'Hover Values'])
overlay = hex_with_values * hv.Points(hex_with_values)
overlay.opts(
opts.HexTiles(width=400, height=400, aggregator=np.sum, tools=['hover']),
opts.Points(size=1, color='black'))
# -
# +
We do not yet have a model yet...
Exploring this process
Straight line fit.....
Obviously these are not the winners .... looking at ways to refine our model. Open to ideas to what might work best
What are your ideas that for what might control this process.....
input output model. Constant... stokes settling output.
Input constant for some number of days..... settling
Alturnatively the model, the input is a decay function.
Step function for inupt, stokes setteling as an output.
box model.... inputs and outputs..... depends on particle size.
# -
def func2 (xx, aa1, aa2, aa3):
return aa1 * np.exp(-aa2*xx)+aa3
popt, pcov = curve_fit(func2, xx, yy)
popt
y3 = func2(xx, popt[0], popt[1], popt[2])
data3 = pd.DataFrame({'xx': xx, 'yy': yy, 'y3': y3})
data3.head()
plot1= data3.hvplot.scatter('xx', 'yy', datashade = True)
plot2= data3.hvplot.line('xx', 'y3')
plot1*plot2
| notebooks/log_bin_ds_et.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Iterative Closest Point Algorithm ( Assignment 1)
# In this approach, we iteratively find the closest point on the edge. At each iteration, once we have found the closest edge points, we apply an affine transformation on the original landmark points, to get new landmmark points that are closer to the edge points. We claim to have converged when the psi stops changing.
# Import required libraries
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
# Helper function to plot and display images.
# +
def display_image(image, title="random"):
cv.imshow(title, image)
cv.waitKey(0)
cv.destroyAllWindows()
def plot_image(image, title="random"):
plt.imshow(image, cmap=plt.get_cmap('gray'))
plt.show()
# -
# Function to find edges. We have used Canny edge detector for edge detection.
def find_edges(image, t1, t2):
edges = cv.Canny(img, t1, t2)
return edges
# Function to calculate distance transform. The function takes in edges, transforms them as we did in the previous
# assignment to eventually apply distance transform.
def distance_transform(edges):
edges[np.where(edges == 255)] = 1.0
edges[np.where(edges == 0)] = 255.0
edges[np.where(edges == 1)] = 0.0
dist = cv.distanceTransform(edges, cv.DIST_L2, 3).astype(np.uint8)
return dist
# Function to calculate gradient of the Distance Transform in 2D
def gradient_D(D):
Gy, Gx = np.gradient(D)
return Gy, Gx
# Function to read the given data
def read_text_file(filepath):
with open(filepath) as f:
data = [tuple(map(int, i[1:-2].split(','))) for i in f]
return np.array(data)
# We have used the following steps to achieve ICP goal.
# step 1 : Find edges of the hand image using Canny
# step 2 : Pre-compute the distance transform of the image
# step 3 : For each point w find the closest point in the edges.(Correspondence)
# a. w = Point on the shape model. (trasnformed)
# b. E = Point in the edge list.
# c. D : Distance transform at point (w)
# d. G = Find the gradient of the distance transform.
# e. x = (w - (D/Magnitude(G))*(Gx*Gy))
# step 4 : Find an affine transformation using closed form solution.
def iterartive_closest_points(img):
'''
w = Point on the shape model. (trasnformed)
E = Point in the edge list.
D : Distance transform at point (w)
G = Find the gradient of the distance transform.
x = (w - (D/Magnitude(G))*(Gx*Gy))
'''
E = find_edges(img, 40, 80)
D = distance_transform(E).astype(np.float32)
Gy, Gx = gradient_D(D)
G = np.array([Gy.T, Gx.T]).T
G_magnitude = np.hypot(Gx, Gy)
'''
Read the hand landmark points.
'''
hand_landmarks = read_text_file('data/hand_landmarks.txt')
hand_landmarks = np.array([hand_landmarks.T[1], hand_landmarks.T[0]]).T
hand_landmarks_org = np.copy(hand_landmarks)
plt.imshow(img, cmap=plt.get_cmap('gray'))
params = np.zeros((6,))
counter_iter = 0
transformed_points = np.zeros_like(hand_landmarks)
while (True):
'''
Find correspondence by putting value of w' in above values.
'''
D_sub = D[hand_landmarks.T[0], hand_landmarks.T[1]]
G_sub = G[hand_landmarks.T[0], hand_landmarks.T[1]]
G_magnitude_sub = G_magnitude[hand_landmarks.T[0], hand_landmarks.T[1]]
second_numerator = D_sub.reshape(-1, 1) * G_sub
second_denomenator = G_magnitude_sub.reshape(-1, 1)
second_term = np.divide(second_numerator, second_denomenator, where=second_denomenator != 0)
second_term = second_term
'''
Calculate final correspondence and store it in x
'''
x = (hand_landmarks - second_term).astype(int)
'''
Define the A.x=b components for performing the psuedo inverse.
A = [...[xi yi 0 1 0]
[0 0 xi yi 0 1]...]
x = [m1,m2,m3,m4,t1,t2]
b = [.....xi,yi,....]
'''
A = []
for point in hand_landmarks:
A = A + [[point[1], point[0], 0, 0, 1, 0]]
A = A + [[0, 0, point[1], point[0], 0, 1]]
A = np.array(A)
b = np.array([x.T[1],x.T[0]]).T.flatten()
'''
Apply psuedo inverse formula to get psi.
'''
params_new = np.dot(np.dot(np.linalg.inv(np.dot(A.T, A)), A.T), b)
'''
Check for convergence.
'''
if np.array_equal(params_new,params):
break
params = params_new
'''
Transform the points to update landmark points.
'''
transformed_points = np.dot(A, params).reshape(65, 2)
transformed_points = np.array([transformed_points.T[1], transformed_points.T[0]]).T.astype(int)
hand_landmarks = np.round(transformed_points).astype(int)
counter_iter += 1
'''
For plotting final points.
'''
plt.cla()
plt.imshow(img, cmap=plt.get_cmap('gray'))
plt.scatter(transformed_points[:, 1], transformed_points[:, 0], c='#67eb34', s=5)
plt.show()
# plt.pause(5)
plt.close()
return params_new
# Apply the function on the given image.
img = cv.imread('data/hand.jpg', 0)
params_new = iterartive_closest_points(img)
# The final affine transformation matrix.
print(params_new)
| iterative-closest-point/Iterative Closest Point.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Input/Output
#
# Often, it is important to import information from a variety of sources and output the result. A few ways of creating and saving files are demonstrated.
#
# By the end of this file you should have seen simple examples of:
# 1. Printing string output to the screen
# 2. Reading and writing string output to/from text files
# 3. Reading and writing string output to/from csv files
# 4. Reading and writing string output to/from binary files
# 5. Reading and writing string output to/from matlab files
#
# Further reading:
# http://docs.h5py.org/en/latest/index.html
#
#
# Python Imports:
import numpy as np
import scipy.io as sio
# %cd datafiles
# !ls
# ## From standard input/keyboard:
# The import of simple text files can be performed directly in python via:
kb_contents = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.'
print(kb_contents)
# ## Text (ascii) files:
# The import of simple text files can be performed directly in python by creating a file object and operating on that object:
# Read line by line:
file_obj = open('01-simpletext.txt','r')
for line in file_obj:
print(line)
file_obj.close()
# Use the read method:
file_obj = open('01-simpletext.txt','r')
file_contents = file_obj.read()
file_obj.close()
print(file_contents)
# Python 'with' statement automatically takes care of the close for us:
with open('01-simpletext.txt','r') as file_obj:
print(file_obj.read())
# +
# Write to ascii files:
file_obj = open('01-simpletext_write.txt','w')
file_obj.write(file_contents)
file_obj.close()
# Or, alternatively:
with open('01-simpletext_write.txt','w') as file_obj:
file_obj.write(file_contents)
# Check that our written output is good:
with open('01-simpletext_write.txt','r') as file_obj:
print(file_obj.read())
# -
# ## Comma Separated Values (.csv files):
# Here, we import data separated by a particular delimiter, as in tsv or csv files:
# +
# Creating a python list:
with open('01-simpledata.csv','r') as file_obj:
file_contents = file_obj.read().split(',')
print(file_contents)
# -
# Use numpy to read an array from a file
file_contents = np.loadtxt(open('01-simpledata.csv'), delimiter=",")
file_contents = file_contents.astype('float')
print(file_contents)
# +
# Save output of numpy array to csv file
file_contents_write = file_contents*2 #Double to differentiate read vs write data
np.savetxt('01-simpledata_write.csv',file_contents_write, '%0.3f', delimiter=",")
# %0.3f specifies scientific notation with 3 decimal places
file_contents = np.loadtxt(open('01-simpledata_write.csv'), delimiter=",")
print(file_contents)
# -
# ## Binary Files:
# Binary files store the same information as text or csv, but do so directly in bytes, rather than using ascii to encode. They have the advantage of being faster to read and smaller in size, but are not readily readable by a typical text editor (notepad, vim, sublime, etc).
#
# Note: be careful to avoid `numpy.fromfile` and `numpy.tofile` as they are not platform independent!
# Read in the csv from the previous step:
file_contents = np.loadtxt(open('01-simpledata_write.csv'), delimiter=",")
print(file_contents)
# +
# Save as a binary file:
np.savetxt('01-simpledata_write.bin', file_contents_write*2) # Note the lack of demiliter
file_contents = np.loadtxt('01-simpledata_write.bin')
# The following is not recommended, as it is platform dependent:
#np.ndarray.tofile(file_contents_write, '01-simpledata_write.bin')
#file_contents = np.fromfile('01-simpledata_write.bin')
print(file_contents)
# -
# ## Matlab (.mat) files:
# Generating matlab variables via:
#
# ``testvar = magic(9)``
#
# ``save('01-simplemat.mat','testvar')``
#
# These can then be loaded via scipy.io (imported as sio here):
# +
# Use scipy to read in .mat files:
mat_contents= sio.loadmat('01-simplemat.mat')
testvar = mat_contents['testvar']
print(testvar)
# +
# Use scipy to write .mat files:
testvar_write = testvar*2 # Double to make read data different from write data
sio.savemat('01-simplemat_write.mat' ,{'testvar_write':testvar_write})
mat_contents = sio.loadmat('01-simplemat_write.mat')
testvar = mat_contents['testvar_write']
print(testvar_write)
# -
# ## HDF5 files
#
# HDF5 or Hierarchical Data Format provides a file format that has a much greater amount of flexibility at the cost of a bit more complexity. HDF5 is ideal when there would otherwise have been many small files. There are two main objects:
# - Groups: folder-like containers that work like Python dictionaries
# - Datasets: NumPy-like arrays
import h5py
# +
# Load csv data:
data_csv = np.loadtxt(open('01-simpledata_write.csv'), delimiter=",")
# Load mat data:
data_mat = sio.loadmat('01-simplemat_write.mat')['testvar_write']
# Load text data:
with open('01-simpletext.txt','r') as file_obj:
data_txt = file_obj.read()
# -
# Create a h5py file object:
with h5py.File("01-data_write.hdf5", "w") as file_obj:
# Use file_obj to create data sets
# Create a dataset object and assign the values from data:
dataset1 = file_obj.create_dataset("data", data = data_csv)
# Check that the data has been written to the file by opening it:
with h5py.File("01-data_write.hdf5", 'r') as file_obj:
print(file_obj["data"].name)
print(file_obj["data"].value)
# The "Hierarchical" part of the HDF5 file format provides groups, which act like Python dictionaries or 'folders' for the various Datasets.
# Open the same h5py file object:
with h5py.File("01-data_write.hdf5", "w") as file_obj:
# Create a group object, and create datasets underneath it:
grp_nums = file_obj.create_group("Numbers")
dataset_csv = grp_nums.create_dataset("CSV", data=data_csv)
dataset_mat = grp_nums.create_dataset("MAT", data=data_mat)
# Create a second group object, and create datasets underneath it:
grp_txt = file_obj.create_group("Text")
txt_hf5 = np.asarray(data_txt, dtype="S") # Convert to NumPy S dtype:
dataset_txt = grp_txt.create_dataset("lorem", data=txt_hf5)
# After saving this data, check the file structure:
# +
def print_attrs(name, obj): # Function that prints the name and object
print(name)
print(obj)
with h5py.File("01-data_write.hdf5", 'r') as file_obj:
file_obj.visititems(print_attrs) # Use .visititems to get info
# -
with h5py.File("01-data_write.hdf5", 'r') as file_obj:
print(file_obj["/Numbers/CSV"].name)
print(file_obj["/Numbers/CSV"].value)
print(file_obj["/Numbers/MAT"].name)
print(file_obj["/Numbers/MAT"].value)
print(file_obj["/Text/lorem"].name)
print(file_obj["/Text/lorem"].value)
# For coinvenience, it's possible to print all of the information using `.visititems`:
# +
def print_attrs(name, obj):
print(name)
if isinstance(obj, h5py.Group):
print(obj)
if isinstance(obj, h5py.Dataset):
print(obj.value)
with h5py.File("01-data_write.hdf5", 'r') as file_obj:
file_obj.visititems(print_attrs)
# -
# h5py also allows storing of metadata relating to data - check the h5py documentation for more info: http://docs.h5py.org/en/latest/index.html
| 02 - Input Output (IO) - Numpy, Scipy, h5py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
import wave
import numpy as np
import matplotlib.pyplot as plt
wr = wave.open('recfile/file.wav', 'r')
sz = 48000 # Read and process 1 second at a time.
da = np.fromstring(wr.readframes(sz), dtype=np.int32)
left, right = da[0::2], da[1::2]
# -
lf, rf = np.fft.rfft(left), np.fft.rfft(right)
# +
import matplotlib.pyplot as plt
plt.figure(1)
a = plt.subplot(211)
r = 2**16/2
a.set_ylim([-r, r])
a.set_xlabel('time [s]')
a.set_ylabel('sample value [-]')
x = np.arange(48000)/48000
plt.plot(x, left)
b = plt.subplot(212)
b.set_xscale('log')
b.set_xlabel('frequency [Hz]')
b.set_ylabel('|amplitude|')
plt.plot(abs(lf))
plt.savefig('sample-graph.png')
# +
lowpass = 100 # Remove lower frequencies.
highpass = 500 # Remove higher frequencies.
lf[:lowpass], rf[:lowpass] = 0, 0 # low pass filter (1)
lf[55:66], rf[55:66] = 0, 0 # line noise filter (2)
lf[highpass:], rf[highpass:] = 0,0 # high pass filter (3)
nl, nr = np.fft.irfft(lf), np.fft.irfft(rf) # (4)
ns = np.column_stack((nl,nr)).ravel().astype(np.int16)
# -
plt.figure(2)
a = plt.subplot(211)
r = 2**16/2
a.set_ylim([-r, r])
a.set_xlabel('time [s]')
a.set_ylabel('sample value [-]')
x = np.arange(48000)/48000
plt.plot(x, left)
b = plt.subplot(212)
b.set_xscale('log')
b.set_xlabel('frequency [Hz]')
b.set_ylabel('|amplitude|')
plt.plot(abs(lf))
plt.savefig('sample-graph.png')
import wave
obj = wave.open('recfile/file.wav','r')
print( "Number of channels",obj.getnchannels())
print ( "Sample width",obj.getsampwidth())
print ( "Frame rate.",obj.getframerate())
print ("Number of frames",obj.getnframes())
print ( "parameters:",obj.getparams())
obj.close()
| backup/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''tnpsav'': conda)'
# name: python3
# ---
# # Usage
# In this tutorial we use [Braess network](https://en.wikipedia.org/wiki/Braess%27s_paradox#Mathematical_approach)
# as an example.
# ## Import Modules
# Import grapharray module.
import grapharray as ga
# ## Define Variables
# First, create a BaseGraph object, which describes the network on that variables are defined. It plays two roles when we define arrays:
#
# * Identifying on which network the variable is defined
# * Memory of the network structure.
BG = ga.BaseGraph()
# BaseGraph class is a subclass of [networkx.DiGraph](https://networkx.org/documentation/stable/reference/classes/digraph.html#networkx.DiGraph) so you can add edges and nodes to BaseGraph instance in the same way as to DiGraph.Note that GraphArray accepts any hashable objects as nodes, as does NetworkX.
BG.add_edges_from([
('start', 'A'),
('start', 'B') ,
('A', 'B'),
('A', 'end'),
('B', 'end')
])
# Then, freeze BaseGraph object to prevent being modified after defining variables.
BG.freeze()
# Finally, create NodeArray instance to define node variables or EdgeArray instance to define edge variables. We must pass a **frozen** BaseGraph object to array classes.
od_flow = ga.NodeArray(BG)
print(repr(od_flow))
edge_cost = ga.EdgeArray(BG)
print(repr(edge_cost))
# + [markdown] pycharm={"name": "#%% md\n"}
# These codes make variables defined on all nodes or edges of BG,
# all of whose values are zero.
# -
# You can set initial values of variables as you want by giving a
# keyword argument ```init_val```.
# The argument ```init_val``` accepts several types of variables.
# if you want to set all initial values as the same value, simply give a scalar:
od_flow = ga.NodeArray(BG, init_val=10)
print(repr(od_flow))
edge_cost = ga.EdgeArray(BG, init_val=10)
print(repr(edge_cost))
# + [markdown] pycharm={"name": "#%% md\n"}
# or if you want to set each value in detail, give
# * a dictionary that has node- or edge- indexes as keys and initial values as values:
# + pycharm={"name": "#%%\n"}
od_flow = ga.NodeArray(BG, init_val={
'start': -6,
'A': 0,
'B': 0,
'end': 6
})
print(repr(od_flow))
edge_cost = ga.EdgeArray(BG, init_val={
('start', 'A'): 0,
('start', 'B'): 50 ,
('A', 'B'): 10,
('A', 'end'): 50,
('B', 'end'): 0
})
print(repr(edge_cost))
# + [markdown] pycharm={"name": "#%% md\n"}
# * a NodeArray or EdgeArray object (initializing by them is faster than that
# by dictionary)
# + pycharm={"name": "#%%\n"}
new_od_flow = ga.NodeArray(BG, init_val=od_flow)
print(repr(new_od_flow))
new_edge_cost = ga.EdgeArray(BG, init_val=edge_cost)
print(repr(new_edge_cost))
# -
# ## Update array values
# + [markdown] pycharm={"name": "#%% md\n"}
# You can modify values after creating instances as we show below.
# + pycharm={"name": "#%%\n"}
new_od_flow['A'] = 100
print(repr(new_od_flow))
new_edge_cost['A', 'B'] = 100
print(repr(new_edge_cost))
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Mathematical Operations
# + [markdown] pycharm={"name": "#%% md\n"}
# NodeArray and EdgeArray objects can be added to, subtracted from, multiplied by and divided
# by another objects of the same classes.
# + nbsphinx="hidden"
import warnings
warnings.filterwarnings("ignore")
# + pycharm={"name": "#%%\n"}
print(repr(new_od_flow + od_flow))
print(repr(new_od_flow - od_flow))
print(repr(new_od_flow * od_flow))
print(repr(new_od_flow / od_flow)) # this raises warnings because of the zero division
# + [markdown] pycharm={"name": "#%% md\n"}
# NodeArray and EdgeArray objects also operated with scalar values.
# + nbsphinx="hidden"
import warnings
warnings.filterwarnings("ignore")
# + pycharm={"name": "#%%\n"}
print(repr(new_od_flow + 5))
print(repr(new_od_flow - 5))
print(repr(new_od_flow * 5))
print(repr(new_od_flow / 5))
# -
# ## Visualizing Values
# (Coming soon...)
# ## Computational Efficiency
# + [markdown] pycharm={"name": "#%% md\n"}
# NodeArray and EdgeArray stores variables' values as np.ndarray and
# the mathematical operations shown above are operated with these arrays.
# + pycharm={"name": "#%%\n"}
print(new_od_flow.array) # You can see the array by .array property.
new_od_flow.array[1] = 5 # .array is read-only
print(new_od_flow.array)
# + [markdown] pycharm={"name": "#%% md\n"}
# Thus, these operation is as fast as that of np.ndarray.
# The larger the network is, the smaller the difference between the speed of
# these two methods are.
# + pycharm={"name": "#%%\n"}
# Create a huge graph to show computational efficiency.
import random
import numpy as np
import time
import timeit
BG = ga.BaseGraph()
BG.add_nodes_from(list(range(10000)))
for i in range(20000):
edge = random.sample(BG.nodes, 2)
BG.add_edge(*edge)
BG.freeze()
timeit_args = {
'timer': time.process_time, 'number': 100000, 'globals': globals()
}
# + pycharm={"name": "#%%\n"}
print("calculation with NodeArray ============")
e1 = ga.NodeArray(BG, init_val = 1)
e2 = ga.NodeArray(BG, init_val = 2.5739)
print(timeit.timeit("e1 + e2", **timeit_args))
print("calculation with np.ndarray =========")
a1 = e1.array
a2 = e2.array
print(timeit.timeit("a1 + a2", **timeit_args))
# + pycharm={"name": "#%%\n"}
print("calculation with EdgeArray ============")
e1 = ga.EdgeArray(BG, init_val = 1)
e2 = ga.EdgeArray(BG, init_val = 2.5739)
print(timeit.timeit("e1 + e2", **timeit_args))
print("calculation with np.ndarray =========")
a1 = e1.array
a2 = e2.array
print(timeit.timeit("a1 + a2", **timeit_args))
# + pycharm={"is_executing": true, "name": "#%%\n"}
print("calculation with graphvar ============")
e = ga.EdgeArray(BG, init_val = 1)
A = ga.IncidenceMatrix(BG)
print(timeit.timeit("A @ e", **timeit_args))
print("calculation with np.ndarray =========")
e = e.array
A = A.array
print(timeit.timeit("A @ e", **timeit_args))
| docs/usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.core.interactiveshell import InteractiveShell
import seaborn as sns
# PyTorch
from torchvision import transforms, datasets, models
import torch
from torch import optim, cuda
from torch.utils.data import DataLoader, sampler
import torch.nn as nn
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# Data science tools
import numpy as np
import pandas as pd
import os
# Image manipulations
from PIL import Image
# Useful for examining network
from torchsummary import summary
# Timing utility
from timeit import default_timer as timer
# Visualizations
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['font.size'] = 14
# Printing out all outputs
InteractiveShell.ast_node_interactivity = 'all'
# +
#datafolder setup
datadir = 'AugData/'
traindir = datadir + 'Train/train0217/'
testdir = datadir + 'Test/test0217/'
save_file_name = 'vgg16-0217.pt'
checkpoint_path = 'vgg16-0217.pth'
# config
batch_size = 64
train_on_gpu = cuda.is_available()
print(train_on_gpu)
# Number of gpus
if train_on_gpu:
gpu_count = cuda.device_count()
print(gpu_count)
if gpu_count > 1:
multi_gpu = True
else:
multi_gpu = False
# +
#
categories = []
img_categories = []
n_train = []
n_test = []
# Iterate through each category
for d in os.listdir(traindir):
categories.append(d)
# Number of each image
train_imgs = os.listdir(traindir + d)
test_imgs = os.listdir(testdir + d)
n_train.append(len(train_imgs))
n_test.append(len(test_imgs))
# Find stats for train images
for i in train_imgs:
img_categories.append(d)
img = Image.open(traindir + d + '/' + i)
img_array = np.array(img)
# Dataframe of categories
df = pd.DataFrame({'category': categories,
'n_train': n_train, 'n_test': n_test}).\
sort_values('category')
# -
df
# +
#data augentation
image_transforms = {
#train is set to imagenet standard transform
'train':
transforms.Compose([
transforms.Resize(size=224),
transforms.ToTensor(),
]),
# no transform for test
'test':
transforms.Compose([
transforms.Resize(size=224),
transforms.ToTensor(),
])
}
# +
#dataloader
data = {
'train':datasets.ImageFolder(root=traindir,transform=image_transforms['train']),
'test':datasets.ImageFolder(root=testdir,transform=image_transforms['test'])
}
dataloaders = {
'train': DataLoader(data['train'], batch_size=batch_size, shuffle=True),
'test': DataLoader(data['test'], batch_size=batch_size, shuffle=True)
}
# -
trainiter = iter(dataloaders['train'])
features,labels = next(trainiter)
features.shape,labels.shape
n_classes = len(df)
print(n_classes)
model = models.vgg16(pretrained=True)
model
# +
#freeze early layers
for param in model.parameters():
param.requires_grad = False
# +
n_inputs = model.classifier[6].in_features
# Add on classifier
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.4),
nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))
model.classifier
# +
#move to gpu
if train_on_gpu:
model = model.to('cuda')
if multi_gpu:
model = nn.DataParallel(model)
# -
def get_pretrained_model(model_name):
"""Retrieve a pre-trained model from torchvision
Params
-------
model_name (str): name of the model (currently only accepts vgg16 and resnet50)
Return
--------
model (PyTorch model): cnn
"""
if model_name == 'vgg16':
model = models.vgg16(pretrained=True)
# Freeze early layers
for param in model.parameters():
param.requires_grad = False
n_inputs = model.classifier[6].in_features
# Add on classifier
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),
nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))
elif model_name == 'resnet50':
model = models.resnet50(pretrained=True)
for param in model.parameters():
param.requires_grad = False
n_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),
nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))
# Move to gpu and parallelize
if train_on_gpu:
model = model.to('cuda')
if multi_gpu:
model = nn.DataParallel(model)
return model
model = get_pretrained_model('vgg16')
if multi_gpu:
summary(
model.module,
input_size=(3, 224, 224),
batch_size=batch_size,
device='cuda')
else:
summary(
model, input_size=(3, 224, 224), batch_size=batch_size, device='cuda')
# +
model.class_to_idx = data['train'].class_to_idx
model.idx_to_class = {
idx: class_
for class_, idx in model.class_to_idx.items()
}
list(model.idx_to_class.items())[:10]
# -
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters())
def train(model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name,
max_epochs_stop=3,
n_epochs=20,
print_every=2):
"""Train a PyTorch Model
Params
--------
model (PyTorch model): cnn to train
criterion (PyTorch loss): objective to minimize
optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters
train_loader (PyTorch dataloader): training dataloader to iterate through
valid_loader (PyTorch dataloader): validation dataloader used for early stopping
save_file_name (str ending in '.pt'): file path to save the model state dict
max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping
n_epochs (int): maximum number of training epochs
print_every (int): frequency of epochs to print training stats
Returns
--------
model (PyTorch model): trained cnn with best weights
history (DataFrame): history of train and validation loss and accuracy
"""
# Early stopping intialization
epochs_no_improve = 0
valid_loss_min = np.Inf
valid_max_acc = 0
history = []
# Number of epochs already trained (if using loaded in model weights)
try:
print('Model has been trained for:',model.epochs, 'epochs.\n')
except:
model.epochs = 0
print('Starting Training from Scratch.\n')
overall_start = timer()
# Main loop
for epoch in range(n_epochs):
# keep track of training and validation loss each epoch
train_loss = 0.0
valid_loss = 0.0
train_acc = 0
valid_acc = 0
# Set to training
model.train()
start = timer()
# Training loop
for ii, (data, target) in enumerate(train_loader):
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Clear gradients
optimizer.zero_grad()
# Predicted outputs are log probabilities
output = model(data)
# Loss and backpropagation of gradients
loss = criterion(output, target)
loss.backward()
# Update the parameters
optimizer.step()
# Track train loss by multiplying average loss by number of examples in batch
train_loss += loss.item() * data.size(0)
# Calculate accuracy by finding max log probability
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
# Need to convert correct tensor from int to float to average
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples in batch
train_acc += accuracy.item() * data.size(0)
# Track training progress
print(
'Epoch:',epoch, '\t',timer() - start, 'seconds elapsed in epoch.',
end='\r')
# After training loops ends, start validation
else:
model.epochs += 1
# Don't need to keep track of gradients
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for data, target in valid_loader:
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Forward pass
output = model(data)
# Validation loss
loss = criterion(output, target)
# Multiply average loss times the number of examples in batch
valid_loss += loss.item() * data.size(0)
# Calculate validation accuracy
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(
correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples
valid_acc += accuracy.item() * data.size(0)
# Calculate average losses
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
# Calculate average accuracy
train_acc = train_acc / len(train_loader.dataset)
valid_acc = valid_acc / len(valid_loader.dataset)
history.append([train_loss, valid_loss, train_acc, valid_acc])
# Print training and validation results
if (epoch + 1) % print_every == 0:
print(
f'\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}'
)
print(
f'\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%'
)
# Save the model if validation loss decreases
if valid_loss < valid_loss_min:
# Save model
torch.save(model.state_dict(), save_file_name)
# Track improvement
epochs_no_improve = 0
valid_loss_min = valid_loss
valid_best_acc = valid_acc
best_epoch = epoch
# Otherwise increment count of epochs with no improvement
else:
epochs_no_improve += 1
# Trigger early stopping
if epochs_no_improve >= max_epochs_stop:
print(
f'\nEarly Stopping! Total epochs: {epoch}. Best epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%'
)
print('early stop')
total_time = timer() - overall_start
print(
f'{total_time:.2f} total seconds elapsed. {total_time / (epoch+1):.2f} seconds per epoch.'
)
print(total_time,'seconds')
# Load the best state dict
model.load_state_dict(torch.load(save_file_name))
# Attach the optimizer
model.optimizer = optimizer
# Format history
history = pd.DataFrame(
history,
columns=[
'train_loss', 'valid_loss', 'train_acc',
'valid_acc'
])
return model, history
# Attach the optimizer
model.optimizer = optimizer
# Record overall time and print out stats
total_time = timer() - overall_start
print(
f'\nBest epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%'
)
print(
f'{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} seconds per epoch.'
)
# Format history
history = pd.DataFrame(
history,
columns=['train_loss', 'valid_loss', 'train_acc', 'valid_acc'])
return model, history
model, history = train(
model,
criterion,
optimizer,
dataloaders['train'],
dataloaders['test'],
save_file_name=save_file_name,
max_epochs_stop=5,
n_epochs=30,
print_every=2)
# !python3 --version
| exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="11SUtIc04kMG"
# ### **PINN eikonal solver for a smooth v(z) model**
# + id="pXvXSBNbqSjT"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="GmVs1hdxkf0g"
# cd "/content/gdrive/My Drive/Colab Notebooks/Codes/PINN_isotropic_eikonal_R1"
# + id="WQ5-HMWpt6H6"
# !pip install sciann==0.5.4.0
# !pip install tensorflow==2.2.0
# #!pip install keras==2.3.1
# + id="FPKGPC2LumD_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1614583401961, "user_tz": -180, "elapsed": 3152, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="dacb2da6-5710-4b1b-8bf1-b8a25e293365"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import tensorflow as tf
from sciann import Functional, Variable, SciModel, PDE
from sciann.utils import *
import scipy.io
import time
import random
tf.config.threading.set_intra_op_parallelism_threads(1)
tf.config.threading.set_inter_op_parallelism_threads(1)
# + id="dFHCemaau4Bv" executionInfo={"status": "ok", "timestamp": 1614583401962, "user_tz": -180, "elapsed": 1794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}}
np.random.seed(123)
tf.random.set_seed(123)
# + id="4ckO5QuSvgzd" executionInfo={"status": "ok", "timestamp": 1614583402235, "user_tz": -180, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}}
#Model specifications
v0 = 2.; # Velocity at the origin of the model
vergrad = 0.5; # Vertical gradient
horgrad = 0.; # Horizontal gradient
zmin = 0.; zmax = 2.; deltaz = 0.02;
xmin = 0.; xmax = 2.; deltax = 0.02;
# Point-source location
sz = 1.0; sx = 1.0;
# Number of training points
num_tr_pts = 2500
# + id="PXwV6YzUYisV" executionInfo={"status": "ok", "timestamp": 1614583404918, "user_tz": -180, "elapsed": 668, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}}
# Creating grid, calculating refrence traveltimes, and prepare list of grid points for training (X_star)
z = np.arange(zmin,zmax+deltaz,deltaz)
nz = z.size
x = np.arange(xmin,xmax+deltax,deltax)
nx = x.size
Z,X = np.meshgrid(z,x,indexing='ij')
# Preparing velocity model
vs = v0 + vergrad*sz + horgrad*sx # Velocity at the source location
velmodel = vs + vergrad*(Z-sz) + horgrad*(X-sx);
# Traveltime solution
if vergrad==0 and horgrad==0:
# For homogeneous velocity model
T_data = np.sqrt((Z-sz)**2 + (X-sx)**2)/v0;
else:
# For velocity gradient model
T_data = np.arccosh(1.0+0.5*(1.0/velmodel)*(1/vs)*(vergrad**2 + horgrad**2)*((X-sx)**2 + (Z-sz)**2))/np.sqrt(vergrad**2 + horgrad**2)
X_star = [Z.reshape(-1,1), X.reshape(-1,1)] # Grid points for prediction
selected_pts = np.random.choice(np.arange(Z.size),num_tr_pts,replace=False)
Zf = Z.reshape(-1,1)[selected_pts]
Zf = np.append(Zf,sz)
Xf = X.reshape(-1,1)[selected_pts]
Xf = np.append(Xf,sx)
X_starf = [Zf.reshape(-1,1), Xf.reshape(-1,1)] # Grid points for training
# + id="n4wY9U-aJ2xz" colab={"base_uri": "https://localhost:8080/", "height": 369} executionInfo={"status": "ok", "timestamp": 1614583409537, "user_tz": -180, "elapsed": 1696, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="f637e26c-a64b-411b-8a29-16bf6d8f38ac"
# Plot the velocity model with the source location
plt.style.use('default')
plt.figure(figsize=(4,4))
ax = plt.gca()
im = ax.imshow(velmodel, extent=[xmin,xmax,zmax,zmin], aspect=1, cmap="jet")
ax.plot(sx,sz,'k*',markersize=8)
plt.xlabel('Offset (km)', fontsize=14)
plt.xticks(fontsize=10)
plt.ylabel('Depth (km)', fontsize=14)
plt.yticks(fontsize=10)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.5))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="6%", pad=0.15)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('km/s',size=10)
cbar.ax.tick_params(labelsize=10)
plt.savefig("./figs/vofz/velmodel.pdf", format='pdf', bbox_inches="tight")
# + id="m_uAMRGewCmc" executionInfo={"status": "ok", "timestamp": 1614583412288, "user_tz": -180, "elapsed": 660, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}}
# Analytical solution for the known traveltime part
vel = velmodel[int(round(sz/deltaz)),int(round(sx/deltax))] # Velocity at the source location
T0 = np.sqrt((Z-sz)**2 + (X-sx)**2)/vel;
px0 = np.divide(X-sx, T0*vel**2, out=np.zeros_like(T0), where=T0!=0)
pz0 = np.divide(Z-sz, T0*vel**2, out=np.zeros_like(T0), where=T0!=0)
# + id="bKWNNkwZxVC_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1614583414348, "user_tz": -180, "elapsed": 713, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="fda36ef9-b00b-47f2-b561-49b8af6c734d"
# Find source location id in X_starf
TOLX = 1e-6
TOLZ = 1e-6
sids,_ = np.where(np.logical_and(np.abs(X_starf[0]-sz)<TOLZ , np.abs(X_starf[1]-sx)<TOLX))
print(sids)
print(sids.shape)
print(X_starf[0][sids,0])
print(X_starf[1][sids,0])
# + id="iLqwKuV9xY5t" executionInfo={"status": "ok", "timestamp": 1614583423748, "user_tz": -180, "elapsed": 1719, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}}
# Preparing the Sciann model object
K.clear_session()
layers = [20]*10
# Appending source values
velmodelf = velmodel.reshape(-1,1)[selected_pts]; velmodelf = np.append(velmodelf,vs)
px0f = px0.reshape(-1,1)[selected_pts]; px0f = np.append(px0f,0.)
pz0f = pz0.reshape(-1,1)[selected_pts]; pz0f = np.append(pz0f,0.)
T0f = T0.reshape(-1,1)[selected_pts]; T0f = np.append(T0f,0.)
xt = Variable("xt",dtype='float64')
zt = Variable("zt",dtype='float64')
vt = Variable("vt",dtype='float64')
px0t = Variable("px0t",dtype='float64')
pz0t = Variable("pz0t",dtype='float64')
T0t = Variable("T0t",dtype='float64')
tau = Functional("tau", [zt, xt], layers, 'l-atan')
# Loss function based on the factored isotropic eikonal equation
L = (T0t*diff(tau, xt) + tau*px0t)**2 + (T0t*diff(tau, zt) + tau*pz0t)**2 - 1.0/vt**2
targets = [tau, PDE(L), (1-sign(tau*T0t))*abs(tau*T0t)]
target_vals = [(sids, np.ones(sids.shape).reshape(-1,1)), 'zeros', 'zeros']
model = SciModel(
[zt, xt, vt, pz0t, px0t, T0t],
targets
)
# + id="HEBMghwbx8V9" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1614583597759, "user_tz": -180, "elapsed": 169680, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="5089f0e6-a855-4331-e90b-52f90d4ebf10"
#Model training
start_time = time.time()
hist = model.train(
X_starf + [velmodelf,pz0f,px0f,T0f],
target_vals,
batch_size = X_starf[0].size,
epochs = 10000,
adaptive_weights=True,
learning_rate = 0.0002,
verbose=0,
save_weights_to='models/vofz_model',
save_weights_freq=1e8
)
elapsed = time.time() - start_time
print('Training time: %.2f minutes' %(elapsed/60.))
# + id="pbcc3qYFyG3I" colab={"base_uri": "https://localhost:8080/", "height": 321} executionInfo={"status": "ok", "timestamp": 1614583598510, "user_tz": -180, "elapsed": 163181, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="9c5c06c6-1288-4b1f-f020-91807e3f46dc"
# Convergence history plot for verification
fig = plt.figure(figsize=(5,3))
ax = plt.axes()
ax.semilogy(hist.history['loss'],LineWidth=2)
ax.set_xlabel('Epochs',fontsize=16)
plt.xticks(fontsize=12)
ax.xaxis.set_major_locator(plt.MultipleLocator(2500))
ax.set_ylabel('Loss',fontsize=16)
plt.yticks(fontsize=12);
plt.grid()
# + id="K1rHDMAF2Dgp" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1614583599009, "user_tz": -180, "elapsed": 138062, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="844be580-350b-45eb-e04a-969666e224c1"
# Predicting traveltime solution from the trained model
L_pred = L.eval(model, X_star + [velmodel,pz0,px0,T0])
tau_pred = tau.eval(model, X_star + [velmodel,pz0,px0,T0])
tau_pred = tau_pred.reshape(Z.shape)
T_pred = tau_pred*T0
print('Time at source: %.4f'%(tau_pred[int(round(sz/deltaz)),int(round(sx/deltax))]))
# + id="_fpP-s3tZIdt" colab={"base_uri": "https://localhost:8080/", "height": 368} executionInfo={"status": "ok", "timestamp": 1613979973708, "user_tz": -180, "elapsed": 135480, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="2645c3b5-677e-49d8-ea13-b55958a19f9d"
# Plot the PINN solution error
plt.style.use('default')
plt.figure(figsize=(4,4))
ax = plt.gca()
im = ax.imshow(np.abs(T_pred-T_data), extent=[xmin,xmax,zmax,zmin], aspect=1, cmap="jet")
plt.xlabel('Offset (km)', fontsize=14)
plt.xticks(fontsize=10)
plt.ylabel('Depth (km)', fontsize=14)
plt.yticks(fontsize=10)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.5))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="6%", pad=0.15)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('seconds',size=10)
cbar.ax.tick_params(labelsize=10)
plt.savefig("./figs/vofz/pinnerror.pdf", format='pdf', bbox_inches="tight")
# + id="dNZyabGTP-sh" executionInfo={"status": "ok", "timestamp": 1614583602757, "user_tz": -180, "elapsed": 1455, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}}
# Load fast sweeping traveltims for comparison
T_fsm = np.load('./inputs/vofz/traveltimes/Tcomp.npy')
# + id="l5joPvLCU6nP" colab={"base_uri": "https://localhost:8080/", "height": 369} executionInfo={"status": "ok", "timestamp": 1614583605292, "user_tz": -180, "elapsed": 1735, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="d6990edd-e5d6-42ca-b5d4-ec437c9184a4"
# Plot the first order FSM solution error
plt.style.use('default')
plt.figure(figsize=(4,4))
ax = plt.gca()
im = ax.imshow(np.abs(T_fsm-T_data), extent=[xmin,xmax,zmax,zmin], aspect=1, cmap="jet")
plt.xlabel('Offset (km)', fontsize=14)
plt.xticks(fontsize=10)
plt.ylabel('Depth (km)', fontsize=14)
plt.yticks(fontsize=10)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.5))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="6%", pad=0.15)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('seconds',size=10)
cbar.ax.tick_params(labelsize=10)
plt.savefig("./figs/vofz/fsmerror.pdf", format='pdf', bbox_inches="tight")
# + id="LhNyVPo3kF0g" colab={"base_uri": "https://localhost:8080/", "height": 474} executionInfo={"status": "ok", "timestamp": 1613979976875, "user_tz": -180, "elapsed": 138625, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="88baac1f-5da2-445e-88f3-5faa6cb3020f"
# Traveltime contour plots
plt.figure(figsize=(5,5))
ax = plt.gca()
im1 = ax.contour(T_data, 6, extent=[xmin,xmax,zmin,zmax], colors='r')
im2 = ax.contour(T_pred, 6, extent=[xmin,xmax,zmin,zmax], colors='k',linestyles = 'dashed')
im3 = ax.contour(T_fsm, 6, extent=[xmin,xmax,zmin,zmax], colors='b',linestyles = 'dotted')
ax.plot(sx,sz,'k*',markersize=8)
plt.xlabel('Offset (km)', fontsize=14)
plt.ylabel('Depth (km)', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=8)
plt.gca().invert_yaxis()
h1,_ = im1.legend_elements()
h2,_ = im2.legend_elements()
h3,_ = im3.legend_elements()
ax.legend([h1[0], h2[0], h3[0]], ['Analytical', 'PINN', 'Fast sweeping'],fontsize=12)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.5))
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.savefig("./figs/vofz/contours.pdf", format='pdf', bbox_inches="tight")
# + id="E-M9aLdTadoa" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613979976876, "user_tz": -180, "elapsed": 138619, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjqim6lA6I3m073KKV1QGaclFO3cGZY8egrRo3a=s64", "userId": "00809387776046207752"}} outputId="8abb0646-f863-4614-c7be-bc0f04a40560"
print(np.linalg.norm(T_pred-T_data)/np.linalg.norm(T_data))
print(np.linalg.norm(T_pred-T_data))
| codes/script1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# <p></p>
# <p style="text-align:center"><font size="20">BRAIN IMAGING</font></p>
# <p style="text-align:center"><font size="20">DATA STRUCTURE</font></p>
# The dataset for this tutorial is structured according to the [Brain Imaging Data Structure (BIDS)](http://bids.neuroimaging.io/). BIDS is a simple and intuitive way to organize and describe your neuroimaging and behavioral data. Neuroimaging experiments result in complicated data that can be arranged in many different ways. So far there is no consensus on how to organize and share data obtained in neuroimaging experiments. BIDS tackles this problem by suggesting a new standard for the arrangement of neuroimaging datasets.
# The idea of BIDS is that the file and folder names follow a strict set of rules:
#
# 
#
# Using the same structure for all of your studies will allow you to easily reuse all of your scripts between studies. But additionally, it also has the advantage that sharing code with and using scripts from other researchers will be much easier.
# # Tutorial Dataset
#
# For this tutorial, we will be using a subset of the [fMRI dataset (ds000114)](https://openfmri.org/dataset/ds000114/) publicly available on [openfmri.org](https://openfmri.org). **If you're using the suggested Docker image you probably have all data needed to run the tutorial within the Docker container.**
# If you want to have data locally you can use [Datalad](http://datalad.org/) to download a subset of the dataset, via the [datalad repository](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds000114). In order to install dataset with all subrepositories you can run:
# + language="bash"
# cd /data
# datalad install -r ///workshops/nih-2017/ds000114
# -
# In order to download data, you can use ``datalad get foldername`` command, to download all files in the folder ``foldername``. For this tutorial we only want to download part of the dataset, i.e. the anatomical and the functional `fingerfootlips` images:
# + language="bash"
# cd /data/ds000114
# datalad get -J 4 /data/ds000114/derivatives/fmriprep/sub-*/anat/*preproc.nii.gz \
# /data/ds000114/sub-*/ses-test/func/*fingerfootlips*
# -
# So let's have a look at the tutorial dataset.
# !tree -L 4 /data/ds000114/
# As you can, for every subject we have one anatomical T1w image, five functional images, and one diffusion weighted image.
#
# **Note**: If you used `datalad` or `git annex` to get the dataset, you can see symlinks for the image files.
# # Behavioral Task
#
# Subject from the ds000114 dataset did five behavioral tasks. In our dataset two of them are included.
#
# The **motor task** consisted of ***finger tapping***, ***foot twitching*** and ***lip poaching*** interleaved with fixation at a cross.
#
# The **landmark task** was designed to mimic the ***line bisection task*** used in neurological practice to diagnose spatial hemineglect. Two conditions were contrasted, specifically judging if a horizontal line had been bisected exactly in the middle, versus judging if a horizontal line was bisected at all. More about the dataset and studies you can find [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3641991/).
#
# To each of the functional images above, we therefore also have a tab-separated values file (``tva``), containing information such as stimuli onset, duration, type, etc. So let's have a look at one of them:
# + language="bash"
# cd /data/ds000114
# datalad get /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-linebisection_events.tsv
# -
# !cat /data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-linebisection_events.tsv
| notebooks/introduction_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VacationPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
from pprint import pprint
# Import API key
from config import g_key
# -
# ### Store Part I results into DataFrame
# * Load the csv exported in Part I to a DataFrame
cityWeatherDF = pd.read_csv("../output_data/city_weather.csv")
cityWeatherDF.head()
# ### Humidity Heatmap
# * Configure gmaps.
# * Use the Lat and Lng as locations and Humidity as the weight.
# * Add Heatmap layer to map.
# Access maps with unique API key
gmaps.configure(api_key = g_key)
cities = cityWeatherDF['City']
locations = cityWeatherDF[['Lat', 'Lng']]
weights = cityWeatherDF['Humidity']
# +
# Customize the size of the figure
figure_layout = {
'width': '1500px',
'height': '800px',
'border': '1px solid black',
'padding': '1px',
'margin': '0 auto 0 auto'
}
fig = gmaps.figure(layout=figure_layout, center = (40.71427, -74.00597), zoom_level = 4)
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=weights,
dissipating=False, max_intensity=100,
point_radius=1)
# Add layer
fig.add_layer(heat_layer)
fig
# -
# ### Create new DataFrame fitting weather criteria
# * Narrow down the cities to fit weather conditions.
# * Drop any rows will null values.
# +
# Ideal weather conditions are considered to be Max Temperature between 65F and 80F with
# Wind speed less than 10 mph and zero cloudiness
idealCityWeatherDF = cityWeatherDF.loc[((cityWeatherDF['Max Temp'] < 80) & (cityWeatherDF['Max Temp'] > 65))] \
.loc[(cityWeatherDF['Wind Speed'] < 10)] \
.loc[(cityWeatherDF['Cloudiness'] == 0)] \
.dropna()
idealCityWeatherDF
# -
# ### Hotel Map
# * Store into variable named `hotel_df`.
# * Add a "Hotel Name" column to the DataFrame.
# * Set parameters to search for hotels with 5000 meters.
# * Hit the Google Places API for each city's coordinates.
# * Store the first Hotel result into the DataFrame.
# * Plot markers on top of the heatmap.
hotel_df = idealCityWeatherDF.reset_index(drop=True)
hotel_df["Hotel Name"] = ""
hotel_df
# +
# base_url
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# geocoordinates
target_radius = 5000
target_type = 'lodging'
target_search = 'hotel'
# Loop through each City to identify the first hotel in the vicinity
for row in hotel_df.index:
try:
target_coordinates = f"{hotel_df.loc[row, 'Lat']}, {hotel_df.loc[row, 'Lng']}"
# set up a parameters dictionary
params = {
"key": g_key,
"location": target_coordinates,
"radius": target_radius,
"type": target_type,
"keyword": target_search
}
# run a request using our params dictionary
places_data = requests.get(base_url, params=params).json()
hotel_name = places_data['results'][0]['name']
print(f"Identified Hotel `{hotel_name}` in the City `{hotel_df.loc[row, 'City']}`")
except:
hotel_name = "COULD NOT FIND A MATCH!"
print(f"SKIPPING.. Could not find hotel in the City `{hotel_df.loc[row, 'City']}`")
hotel_df.loc[row, "Hotel Name"] = hotel_name
hotel_name = ""
print("Hotel Data loaded successfully!")
# -
hotel_df
# +
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# +
# Assign the marker layer to a variable
markers = gmaps.marker_layer(locations, info_box_content=hotel_info)
# Add the layer to the map
fig.add_layer(markers)
fig
# -
| VacationPy/VacationPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
CATEGORIES=['cardboard','glass','metal','paper','plastic','trash']
model = tf.keras.models.load_model("Trashnet98.model")
def prepare(filepath):
IMG_SIZE = 150 # 50 in txt-based
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
file='cardboard64.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
#print(prediction.shape)
print(CATEGORIES[np.argmax(prediction[0])])
file='trash112.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
print(CATEGORIES[np.argmax(prediction[0])])
file='plastic14.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
print(CATEGORIES[np.argmax(prediction[0])])
file='paper29.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
print(CATEGORIES[np.argmax(prediction[0])])
file='glass124.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
print(CATEGORIES[np.argmax(prediction[0])])
file='metal100.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
print(CATEGORIES[np.argmax(prediction[0])])
file='plastic199.jpg'
img=cv2.imread(file)
plt.imshow(img)
plt.show()
prediction = model.predict([prepare(file)])
print(prediction)
print(CATEGORIES[np.argmax(prediction[0])])
| Notebooks/PredictTrash 40%.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Pharmacohophores with OpenPharmacophore
# According to the IUPAC, a pharmacophore model is an ensemble of steric and electronic features that is necessary to ensure the optimal supramolecular interactions with a specific biological target and to trigger (or block) its biological response [1]. In pharmacophore modeling, the fundamental premise is that related chemical groups, such as hydrogen bond donors/ acceptors and aromatic/hydrophobic centers, if oriented in spatially and geometrically similar arrangements, can facilitate comparable intramolecular interactions with a target receptor, thereby conferring similar biological activity [3].
#
# Pharmacophore models are mainly used in virtual screening, which involves computationally screening libraries of molecules to discover new hits, that is molecules that bind to a therapeutuc target of interest.
#
# A pharmacophore model can be established either in a ligand based manner, by superposing a set of active molecules and extracting common chemical features that are essential for their bioactivity, or in a structure based manner, by probing possible interaction points between the macromolecular target and ligands [2].
#
# OpenPharmacophore is a python library designed to work with pharmacophore models. It can derive pharmacophore models from ligand-based, structured-based and from molecular dynamics simulations and use them for virtual screening. In this notebook we will explore how pharmacophores work in OpenPharmacophore.
# +
from openpharmacophore import Pharmacophore
from openpharmacophore import PharmacophoricPoint
import pyunitwizard as puw
import os
# -
# ## Creating a pharmacophore model manually
#
# In OpenPharmacophore, a pharmacophore is an object that contains a list of pharmacophoric points. Pharmacophores can be obtained via ligand or structured based methods. However, the simplest way to create a pharmacophore is from a list of pharmacophoric points. Altough this is not really useful, it will help us understand how pharmacophore objects work in OpenPharmacophore.
# ### Pharmacophoric Points
#
# In OpenPharmacophore, a pharmacophoric point is a chemical feature in 3D space, with a tolerance radius. A pharmacophoric point can be created from the PharmacophoricPoint class. To create a new pharmacophoric point, the feature type, the center and radius are required. A direction vector can be optionally passed to create a pharmacophoric point with directionality.
#
# To begin, we will create an hydrogen bond donor, an aromatic ring and a positive charge pharmacophoric point. The constructor expects the center and radius to be quantities, for which we are using the pyunitwizard library.
# +
donor = PharmacophoricPoint(feat_type="hb donor",
center=puw.quantity([1.0, 1.0, 1.0], "angstroms"),
radius=puw.quantity(1.5, "angstroms"),
direction=[0.5, 0.8, 0.2]
)
aromatic = PharmacophoricPoint(feat_type="aromatic ring",
center=puw.quantity([-1.0, 0.0, 2.0], "angstroms"),
radius=puw.quantity(1.0, "angstroms"),
direction=[0.5, 0.8, 0.2]
)
pos_charge = PharmacophoricPoint(feat_type="positive charge",
center=puw.quantity([0.0, -1.5, 2.0], "angstroms"),
radius=puw.quantity(1.75, "angstroms"),
)
print(donor, "\n")
print(aromatic, "\n")
print(pos_charge, "\n")
# -
# Pharmacophoric points can be of different types including: hydrogen bond donor and acceptors, aromatic rings, hydrophobic areas, positve and negative charges, and excluded and included volumes.
#
# To see the list of accepted chemical features we can call the static method get_valid_features of the PharmacophoricPoint class
PharmacophoricPoint.get_valid_features()
# Let's inspect a pharmacophoric point
print(f"Center is {donor.center}")
print(f"Radius is {donor.radius}")
print(f"Direction is {donor.direction}")
print(f"Feature name is {donor.feature_name}")
# Pharmacophoric points can be modified after they been created.
new_center = puw.quantity([0.25, -1.15, 2.34], "angstroms")
donor.center = new_center
print(donor)
# ### Create the Pharmacophore
#
# We are now ready to create a pharmacophore from the previosly defined points.
pharma_points = [donor, aromatic, pos_charge]
pharmacophore = Pharmacophore(pharma_points)
print(pharmacophore)
print(pharmacophore.elements)
# Once we have created a pharmacophore, new pharmacophoric points can be added or removed
# +
# Create a new hb acceptor and add it to the pharmacophore
acceptor = PharmacophoricPoint(
feat_type="hb acceptor",
center=puw.quantity([-1.20, -2.3, 0.5], "angstroms"),
radius=puw.quantity(1.0, "angstroms")
)
pharmacophore.add_element(acceptor)
# Remove the positive charge
pos_charge_index = 2
pharmacophore.remove_elements(pos_charge_index)
print(f"{pharmacophore.n_elements} pharmacophoric points\n")
print(pharmacophore.elements, "\n")
# -
# We can visualize the pharmacophore using the show method, this will return an nglview widget. Pharmacophoric points are visualized as spheres and vectors if they have directionality. If a pharmacophore is associated with a molecular system it will also be shown. Nevertheless, in this case the pharmacophore does not contain a molecular system.
view = pharmacophore.show()
view
view.render_image()
view._display_image()
# ## Importing Pharmacophores from other software
#
# OpenPharmacophore can load pharmacophores generated with other software such as LigandScout, MOE, ZincPharmer and Pharmagist. It accepts a variety of different file formats inlcuding pml, ph4, json, and mol2.
# +
# Load a pharmacophore from LigandScout (pml format).
data_dir = "./data/pharmacophores"
pharma_file = "ligscout-pharma.pml"
file = os.path.join(data_dir, pharma_file)
ligscout_pharmacophore = Pharmacophore().from_file(file)
print(ligscout_pharmacophore)
print(ligscout_pharmacophore.elements)
# +
# Load a pharmacophore from pharmer (json format).
pharma_file = "1M70.json"
file = os.path.join(data_dir, pharma_file)
pharmer_pharmacophore = Pharmacophore().from_file(file)
print(pharmer_pharmacophore)
print(pharmer_pharmacophore.elements)
# -
view = pharmer_pharmacophore.show()
view
view.render_image()
# +
# Load a pharmacophore from moe (ph4 format).
pharma_file = "gmp.ph4"
file = os.path.join(data_dir, pharma_file)
moe_pharmacophore = Pharmacophore().from_file(file)
print(moe_pharmacophore)
print(moe_pharmacophore.elements)
# +
# Load a pharmacophore from pharmagist (mol2 format).
pharma_file = "elastase.mol2"
file = os.path.join(data_dir, pharma_file)
pharmagist_pharmacophore = Pharmacophore().from_file(file)
print(pharmagist_pharmacophore)
print(pharmagist_pharmacophore.elements)
# -
# ## Exporting Pharmacophores
# Pharmacophores can also be saved to different file formats. Let's save the pharmacophore that we created at the beginning of the tutorial.
pharmacophore.to_ligandscout("./mypharmacophore.pml")
pharmacophore.to_pharmer("./mypharmacophore.json")
pharmacophore.to_pharmagist("./mypharmacophore.mol2")
pharmacophore.to_moe("./mypharmacophore.ph4")
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# <strong>References</strong>
#
# [1] <NAME>. et al. (1998) Glossary of terms used in medicinal chemistry (IUPAC Recommendations 1997). Annu. Rep. Med. Chem. 33, 385–395
#
# [2] <NAME>. "Pharmacophore modeling and applications in drug discovery: challenges and recent advances." Drug discovery today 15, no. 11-12 (2010): 444-450.
#
# [3] Cournia, Zoe, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Rigorous free energy simulations in virtual screening." Journal of Chemical Information and Modeling 60, no. 9 (2020): 4153-4169.
| docs/contents/Pharmacophore/Pharmacophore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from platform import python_version
print(python_version())
# +
import sys
# Add the path to system, local or mounted S3 bucket, e.g. /dbfs/mnt/<path_to_bucket>
sys.path.append('./secrets.py')
import logging
import math
import os
from influxdb import DataFrameClient
import numpy as np
import matplotlib.mlab as mlab
import pandas as pd
import matplotlib.pyplot as plt
from tabulate import tabulate
from tqdm import tqdm
# %matplotlib inline
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
# Need to ssh tunnel for this to work
# ssh -L 8086:localhost:8086 aq.byu.edu -N
influx = DataFrameClient(
host=HOST,
port=PORT,
username=USERNAME,
password=PASSWORD,
database=DATABASE,
)
def large_query(influx, measurement, query, total=None, limit=100_000):
if total is not None:
total = math.ceil(total / limit)
with tqdm(total=total) as pbar:
offset = 0
while True:
new_query = query + " LIMIT {} OFFSET {}".format(limit, offset)
data = influx.query(new_query)
data = data[measurement]
received = len(data)
pbar.update(1)
yield data
offset += limit
if received != limit:
break
def load_data(filename):
if os.path.exists(filename):
LOGGER.info("Loading cached data...")
return pd.read_hdf(filename)
LOGGER.info("Downloading data...")
result = influx.query(
"SELECT COUNT(sequence) FROM air_quality_sensor WHERE time > '2019-10-01' AND time <= '2020-04-30'"
)
count = result["air_quality_sensor"].values[0][0]
queries = large_query(
influx,
"air_quality_sensor",
"SELECT * FROM air_quality_sensor WHERE time > '2019-10-01' AND time <= '2020-04-30'",
count,
)
all_data = pd.concat(list(queries), sort=False)
all_data.to_hdf(filename, "data")
return all_data
data = load_data("aq_data.h5")
gold_data = load_data("aq_data.h5")
LOGGER.info("Done loading data...")
# +
# all_modified_gers - This is the working boxplot for all_modified_gers only Mongolia deployed sensors
# https://stackoverflow.com/questions/22800079/converting-time-zone-pandas-dataframe
# https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.between_time.html
# Don't include sensors: FL, IA, KS, MB, NB, NJ, NL, OR, WA, WY
# Look more closely at: NE, NS
from IPython.core.debugger import set_trace
# https://matplotlib.org/3.1.3/gallery/statistics/boxplot_color.html
print(data.index[1])
data.index = data.index.tz_convert('Asia/Ulaanbaatar')
print(data.index[1])
labels = ['All Modified Gers']
plt.xlabel('')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for All Modified Gers Sensors')
plt.grid(True)
days = ['05', '06', '07', '08', '09', '10', '11']
# Clean up data this way:
data.loc[data['pm2_5'] > 1000, 'pm2_5'] = 1000
data.loc[data['pm2_5'] < 0, 'pm2_5'] = 0
# https://www.geeksforgeeks.org/create-a-new-column-in-pandas-dataframe-based-on-the-existing-columns/
data['pm2_5'] = data['pm2_5'] / (1 + ((0.4/1.65)/(-1+(1/(35/100)))))
# data['pm2_5'] = np.where(data['pm2_5'] >= 5000, 5000, data['pm2_5'])
data = data[data.location_name == 'Mongolia']
# start clean up data mode when in office or switched from outdoor to indoor or vice versa
# -------------------------- In Office ----------------------------------------------------------------------------------------------------------------------------
ak = data[data.index < '2020-02-15'].groupby("name").get_group('AK')
co = data[data.index < '2020-02-15'].groupby("name").get_group('CO')
ky = data[data.index < '2020-02-15'].groupby("name").get_group('KY')
# mb = data[data.index < '2020-02-15'].groupby("name").get_group('MB')
# mb = mb[(mb.index < '2020-01-26') | (mb.index >= '2020-02-04')]
# nj = data[(data.index < '2020-01-28') | (data.index >= '2020-02-04')].groupby("name").get_group('NJ')
nu = data[(data.index < '2020-01-26') | (data.index >= '2020-02-04')].groupby("name").get_group('NU')
# oregon = data[(data.index < '2020-01-26') | (data.index >= '2020-02-04')].groupby("name").get_group('OR')
pe = data[(data.index < '2020-02-11')].groupby("name").get_group('PE') #outdoor sensor we are no longer using these sensors data
# wy = data[(data.index < '2020-02-11')].groupby("name").get_group('WY')
# --------------------------------------- Switched ---------------------------------------------------------------------------------------------------------------
ab = data[(data.index > '2020-01-28') & (data.index <= '2020-02-14')].groupby("name").get_group('AB') # outdoor sensor we are no longer using these sensors data before but will use after the switch to indoor
ns = data[(data.index >= '2020-01-28')].groupby("name").get_group('NS') # outdoor sensor we are no longer using these sensors data before but will use after the switch to indoor
# oregon = oregon[(oregon.index >= '2020-01-28')] # outdoor sensor we are no longer using these sensors data before but will use after the switch to indoor
ut = data[(data.index >= '2020-01-29')].groupby("name").get_group('UT') # outdoor sensor we are no longer using these sensors data before but will use after the switch to indoor
# finish clean up data mode when in office or switched from outdoor to indoor or vice versa
# ------------------------------------------------------------------------------------------------------------------------------------------------------
modified_gers = ['AL', 'AR', 'AZ', 'CA', 'CT', 'DE', 'ID', 'IL', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MS', 'MT', 'NC', 'NH', 'NM', 'GA', 'ND', 'NE']
modified_gers_data = data[(data.name == modified_gers[0]) | (data.name == modified_gers[1]) | (data.name == modified_gers[2]) | (data.name == modified_gers[3]) | (data.name == modified_gers[4]) | (data.name == modified_gers[5]) | (data.name == modified_gers[6]) | (data.name == modified_gers[7]) | (data.name == modified_gers[8]) | (data.name == modified_gers[9]) | (data.name == modified_gers[10]) | (data.name == modified_gers[11]) | (data.name == modified_gers[12]) | (data.name == modified_gers[13]) | (data.name == modified_gers[14]) | (data.name == modified_gers[15]) | (data.name == modified_gers[16]) | (data.name == modified_gers[17]) | (data.name == modified_gers[18]) | (data.name == modified_gers[19]) | (data.name == modified_gers[20]) | (data.name == modified_gers[21])]
modified_gers_data = modified_gers_data.append(ak)
modified_gers_data = modified_gers_data.append(co)
modified_gers_data = modified_gers_data.append(ky)
# unmodified_gers = ['NJ', 'NS', 'NU', 'OK', 'OR', 'PA', 'RI', 'SD', 'UT', 'VA', 'WI']
# unmodified_gers_data = data[(data.name == unmodified_gers[0]) | (data.name == unmodified_gers[1]) | (data.name == unmodified_gers[2]) | (data.name == unmodified_gers[3]) | (data.name == unmodified_gers[4]) | (data.name == unmodified_gers[5]) | (data.name == unmodified_gers[6]) | (data.name == unmodified_gers[7]) | (data.name == unmodified_gers[8]) | (data.name == unmodified_gers[9]) | (data.name == unmodified_gers[10])]
unmodified_gers = ['OK', 'PA', 'RI', 'SD', 'VA', 'WI']
unmodified_gers_data = data[(data.name == unmodified_gers[0]) | (data.name == unmodified_gers[1]) | (data.name == unmodified_gers[2]) | (data.name == unmodified_gers[3]) | (data.name == unmodified_gers[4]) | (data.name == unmodified_gers[5])]
unmodified_gers_data = unmodified_gers_data.append(ab)
# unmodified_gers_data = unmodified_gers_data.append(mb)
# unmodified_gers_data = unmodified_gers_data.append(nj)
unmodified_gers_data = unmodified_gers_data.append(nu)
unmodified_gers_data = unmodified_gers_data.append(ns)
# unmodified_gers_data = unmodified_gers_data.append(oregon)
unmodified_gers_data = unmodified_gers_data.append(ut)
# +
labels = ['All Modified Gers']
plt.xlabel('')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for All Modified Gers Sensors')
plt.grid(True)
all_sensors_names = []
for name, sensor_data in modified_gers_data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = modified_gers_data.index.day_name()
print(type(day_names))
print(day_names[:1000])
all_modified_gers = [modified_gers_data.pm2_5.dropna()]
print('{} list size: {}'.format(labels[0], len(data.pm2_5)))
results = plt.boxplot(all_modified_gers, showfliers=False, labels=labels, showmeans=True, meanline=True)
# print(results)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
# +
labels = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
plt.xlabel('Dates')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors')
plt.grid(True)
all_sensors_names = []
for name, sensor_data in modified_gers_data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = modified_gers_data.index.day_name()
print(type(day_names))
print(day_names[:10])
monday = modified_gers_data[(modified_gers_data.index.day_name() == 'Monday')]['pm2_5'].dropna()
tuesday = modified_gers_data[modified_gers_data.index.day_name() == 'Tuesday']['pm2_5'].dropna()
wednesday = modified_gers_data[modified_gers_data.index.day_name() == 'Wednesday']['pm2_5'].dropna()
thursday = modified_gers_data[modified_gers_data.index.day_name() == 'Thursday']['pm2_5'].dropna()
friday = modified_gers_data[modified_gers_data.index.day_name() == 'Friday']['pm2_5'].dropna()
saturday = modified_gers_data[modified_gers_data.index.day_name() == 'Saturday']['pm2_5'].dropna()
sunday = modified_gers_data[modified_gers_data.index.day_name() == 'Sunday']['pm2_5'].dropna()
all_days = [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
results = plt.boxplot(all_days, showfliers=False, labels=labels, showmeans=True, meanline=True)
i = 0
cumulative = 0
for day in all_days:
print('{} list size: {}'.format(labels[i], len(day)))
cumulative += len(day)
i+= 1
print('total for all days: {}'.format(cumulative))
print()
# print(results)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
# +
# modified_gers each sensors - This is the working boxplot for modified_gers only Mongolia deployed sensors
plt.xlabel('Sensors')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors')
plt.grid(True)
days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
all_sensors_names = []
for name, sensor_data in modified_gers_data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = modified_gers_data.index.day_name()
print(type(day_names))
print(day_names[:10])
for day in days_of_week:
plt.xlabel('Sensors for {0}'.format(day))
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors for {0}'.format(day))
plt.grid(True)
sensors_data = list()
sensors_name = list()
temp = modified_gers_data[modified_gers_data.index.day_name() == day]
# temp = temp.groupby("name")
cumulative = 0
for name, sensor_data in temp.groupby("name"):
sensors_name.append(name)
sensors_data.append(temp.groupby("name").get_group(name)["pm2_5"].dropna().to_numpy().tolist())
cumulative += len(sensor_data)
print('{} - {} list size: {}'.format(day, name, len(sensor_data)))
print('{} - {} list size of data above 1000: {}'.format(day, name, len(sensor_data[sensor_data.pm2_5 > 1000])))
print ('{} total: {}'.format(day, cumulative))
print()
results = plt.boxplot(sensors_data, showfliers=False, labels=sensors_name, showmeans=True, meanline=True)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
plt.show()
# -
| all_modified/all_modified.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import sklearn
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.metrics import r2_score
from sklearn import preprocessing
from scipy.stats import skew
import functools
import operator
from numpy import inf
# %matplotlib inline
# -
# С долната функция ще получаваме статистика за липсващите данни по колони.
def missing(set_to_check):
total_missing = set_to_check.isnull().sum().sort_values(ascending=False)
total_missing = total_missing[total_missing > 0]
percent_missing = (set_to_check.isnull().sum() / set_to_check.isnull().count()).sort_values(ascending=False)
percent_missing = percent_missing[percent_missing > 0]
missing_data = pd.concat([total_missing, percent_missing], axis=1, keys=['Total Missing', 'Percentage of Missing'])
return missing_data
# Нека заредим train и test set-овете и да видим статистика за липсващите данни по колони.
train_set = pd.read_csv('data/train.csv', index_col=['Id'])
test_set = pd.read_csv('data/test.csv', index_col=['Id'])
missing(train_set)
missing(test_set)
# Забелязваме, че почти липсва информация за ***PoolQC***, ***MiscFeature***, ***Alley*** и ***Fence***. Също така липсва информация за ***FireplaceQu*** в близо половината от данните, за това ще премахнем всички тези колони.
def drop_highly_missing_columns(set_to_drop_from):
return set_to_drop_from.drop(labels=['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'], axis=1)
train_set = drop_highly_missing_columns(train_set)
test_set = drop_highly_missing_columns(test_set)
# Нека отново видим статистика за липсващите колони.
missing(train_set)
missing(test_set)
# От описанието на данните забелязваме, че ако липсва информация в следните колони: ***GarageType***, ***GarageFinish***, ***GarageQual*** и ***GarageCond***, то е защото вероятно липсва гараж!
# За това ще попълним липсващата информация така, че тя да отговаря на липсата на гараж!
def fill_info_for_missing_garage(set_to_fill):
main_columns = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']
rest_columns = ['GarageYrBlt', 'GarageCars', 'GarageArea']
indexes = set_to_fill[
set_to_fill.GarageType.isnull()
|
set_to_fill.GarageFinish.isnull()
|
set_to_fill.GarageQual.isnull()
|
set_to_fill.GarageCond.isnull()
].index
set_to_fill.loc[indexes, main_columns] = 'No'
set_to_fill.loc[indexes, rest_columns] = 0
return set_to_fill
train_set = fill_info_for_missing_garage(train_set)
test_set = fill_info_for_missing_garage(test_set)
# Нека отново видим статистика за липсващите колони.
missing(train_set)
missing(test_set)
# След търсене в описанието на данните за *Bsmt* разбраме, че Bsmt е съкращение за Basement и там положението е сходно на това за колони за Garage. Така че ще решим проблема с липсващите дани в тези колони по аналогичен начин.
def fill_info_for_missing_basement(set_to_fill):
main_columns = ['BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']
rest_columns = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath']
indexes = set_to_fill[
set_to_fill.BsmtQual.isnull()
|
set_to_fill.BsmtCond.isnull()
|
set_to_fill.BsmtExposure.isnull()
|
set_to_fill.BsmtFinType1.isnull()
|
set_to_fill.BsmtFinType2.isnull()
].index
set_to_fill.loc[indexes, main_columns] = 'No'
set_to_fill.loc[indexes, rest_columns] = 0
return set_to_fill
train_set = fill_info_for_missing_basement(train_set)
test_set = fill_info_for_missing_basement(test_set)
# Нека отново видим статистика за липсващите колони.
missing(train_set)
missing(test_set)
# От описанието разбираме, че има само две колони свързани с *MasVnr* и не получаваме информация, че можем директно да попълним тези две колони.
# За това нека разгледаме малко графики.
train_set.MasVnrType.value_counts().plot(kind='bar');
test_set.MasVnrType.value_counts().plot(kind='bar');
# Нека погледнем как стоят нещата спрямо колоната, която трябва да предсказваме - ***SalePrice*** и да видим дали не можем да вземем решение за попълването ѝ.
color_map = {'None': 'Blue', 'BrkFace': 'Orange', 'Stone': 'Green', 'BrkCmn': 'Red', 'NA': 'Black'}
colors = [color_map[point] for point in train_set.MasVnrType.fillna('NA')]
plt.scatter(x=range(0, len(colors)), y=train_set.SalePrice, c=colors)
del color_map, colors
# Не се вижда лесно спрямо коя група липсващите са по-близки. Нека разлгедаме отделни графики.
color_map = {'None': 'Blue', 'NA': 'Black'}
indexes = train_set.MasVnrType[(train_set.MasVnrType.isnull() | (train_set.MasVnrType == 'None'))].index
points = train_set.MasVnrType[indexes].fillna('NA')
colors = [color_map[point] for point in points]
plt.scatter(x=range(0, len(colors)), y=train_set.SalePrice[indexes], c=colors)
del color_map, indexes, colors, points
color_map = {'BrkFace': 'Orange', 'NA': 'Black'}
indexes = train_set.MasVnrType[(train_set.MasVnrType.isnull() | (train_set.MasVnrType == 'BrkFace'))].index
points = train_set.MasVnrType[indexes].fillna('NA')
colors = [color_map[point] for point in points]
plt.scatter(x=range(0, len(colors)), y=train_set.SalePrice[indexes], c=colors)
del color_map, indexes, colors, points
color_map = {'Stone': 'Green', 'BrkCmn': 'Red', 'NA': 'Black'}
indexes = train_set.MasVnrType[train_set.MasVnrType.isnull() | (train_set.MasVnrType == 'Stone') | (train_set.MasVnrType == 'BrkCmn')].index
points = train_set.MasVnrType[indexes].fillna('NA')
colors = [color_map[point] for point in points]
plt.scatter(x=range(0, len(colors)), y=train_set.SalePrice[indexes], c=colors)
del color_map, indexes, colors, points
# Нека разгледаме и статистическа информация!
train_set.SalePrice[train_set.MasVnrType.isnull()].describe()
train_set.SalePrice[train_set.MasVnrType == 'None'].describe()
train_set.SalePrice[train_set.MasVnrType == 'BrkFace'].describe()
train_set.SalePrice[train_set.MasVnrType == 'Stone'].describe()
# Изглежда сякаш липсващата група е най-близка до ***Stone*** групата, разбира се няма как да знаем, че това е в сила за test set-a. Но там липсва информация в малко над 1% от данните, което не е чак толкова. За това ще предположим, че липсващите данни са от тази група и ще попълним данните. Ако не сме доволни от крайния резултат можем да пробваме нещо друго, но за сега ще пробваме с това.
# Нека видим средната стойност за двата set-а.
train_set.MasVnrArea[train_set.MasVnrType == 'Stone'].mean(), test_set.MasVnrArea[test_set.MasVnrType == 'Stone'].mean()
# Двете са много близки числа, така че ще попълним всеки от двата set-а с неговата.
def fill_missing_for_masvnr(set_to_fill):
set_to_fill.MasVnrType = set_to_fill.MasVnrType.fillna('Stone')
set_to_fill.MasVnrArea = set_to_fill.MasVnrArea.fillna(set_to_fill.MasVnrArea[set_to_fill.MasVnrType == 'Stone'].mean())
return set_to_fill
train_set = fill_missing_for_masvnr(train_set)
test_set = fill_missing_for_masvnr(test_set)
# Нека си припомним каква информация липсва.
missing(train_set)
missing(test_set)
# Ще попълним стойността на реда за който липсва информация в колоната ***Electrical***
# Нека видим как стоят нещата в тази колона, от описанието се вижда,че тя е категорийна.
train_set.Electrical.value_counts().plot(kind='bar');
# Напрактика няма голямо значение за това ще я попълним с доминантната стойност.
train_set.Electrical = train_set.Electrical.fillna('SBrkr')
# И така до тук липсва информация за:
missing(train_set)
missing(test_set)
# В не малка част от данните липсва информация в колоната ***LotFrontage***, така че за момента ще я премахнем от данните и ако не сме доволни от крайния резултат бихме могли да я попълним със средна стойност, занулим или дори да пробваме да я предскажем. Но за сега ще я пренебрегнем!
def drop_lot_frontage_column(set_to_drop_from):
return set_to_drop_from.drop(labels=['LotFrontage'], axis=1)
train_set = drop_lot_frontage_column(train_set)
test_set = drop_lot_frontage_column(test_set)
# Вече нямаме липсваща информация в train set-а, но все още има липсваща информация в test set-a.
missing(train_set)
missing(test_set)
# Остава да попълним и всички останали липсващи данни. Това ще направим по следния начин:
#
# - Ако колоната е числова, то ще попълним липсващите в колоната с медианата.
# - Ако колоната е категорийна, то ще попълним липсващите в колоната с модата.
#
# Но преди това ще разделим колоните на числови и категорийни.
categorial_columns = test_set.select_dtypes(include = ["object"]).columns
numeric_columns = test_set.select_dtypes(exclude = ["object"]).columns
# Сега ще попълним данните липсващи в числовите колони.
def fillna_numeric_columns(set_to_fill, numeric_columns):
for col in numeric_columns:
set_to_fill[col] = set_to_fill[col].mean()
return set_to_fill
test_set = fillna_numeric_columns(test_set, numeric_columns)
# Нека видим дали сме попълнили някоя колона:
missing(test_set)
# Явно не сме :(
# +
def encode_categorical(data, columns, encoders):
data = data.fillna('')
return pd.DataFrame({col: encoders[col].transform(data[col]) for col in columns}, index = data.index)
def fill_missing_from_categorical(data, columns, encoders):
for col in columns:
if '' in encoders[col].classes_:
mapped_na = (encoders[col].transform(['']))[0]
data.loc[data[data[col] == mapped_na].index, col] = int(data[col][data[col] != mapped_na].mode())
return data
def decode_categorical(data, columns, encoders):
return pd.DataFrame({col: encoders[col].inverse_transform(data[col]) for col in columns}, index = data.index)
def fillna_catecorial_columns(data, columns):
encoders = {col: LabelEncoder().fit(pd.concat([train_set[col], test_set[col].fillna('')])) for col in categorial_columns}
return decode_categorical(fill_missing_from_categorical(encode_categorical(data, columns, encoders), columns, encoders), columns, encoders)
# -
test_set = fillna_catecorial_columns(test_set, categorial_columns)
# Нека се уверим, че вече нямаме липсващи данни:
missing(train_set)
missing(test_set)
# + active=""
# Нека видим какво постигаме с тези данни.
# +
def one_hot_encode_categorical(data, train, test, categorial_columns):
one_hot_encoder = OneHotEncoder().fit(data[categorial_columns])
return one_hot_encoder.transform(train[categorial_columns]), one_hot_encoder.transform(test[categorial_columns])
def map_predicrted(pred, y_mean):
pred[(pred == 0.0) | (pred == inf)] = y_mean
return pred
def best_score_of_LR(train):
X, y = train.drop(labels=['SalePrice'], axis=1), train.SalePrice
y_mean = float(y.mean())
categorial_columns = X.select_dtypes(include = ["object"]).columns
numeric_columns = X.select_dtypes(exclude = ["object"]).columns
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
one_hot_x_train, one_hot_x_test = one_hot_encode_categorical(X, x_train, x_test, categorial_columns)
new_x_train = pd.np.concatenate([one_hot_x_train.todense(), x_train[numeric_columns]], axis=1)
new_x_test = pd.np.concatenate([one_hot_x_test.todense(), x_test[numeric_columns]], axis=1)
a = None
reg = LinearRegression().fit(new_x_train, pd.np.log10(y_train))
max_train_score = r2_score(10**reg.predict(new_x_train), y_train)
max_test_score = r2_score(map_predicrted(10**reg.predict(new_x_test), y_mean), y_test)
for r in np.linspace(0.01, 10, 100):
ridge = Ridge(alpha=0.6).fit(new_x_train, pd.np.log10(y_train))
score = r2_score(10**ridge.predict(new_x_test), y_test)
if score > max_test_score:
a = r
max_test_score = score
max_train_score = r2_score(10**ridge.predict(new_x_train), y_train)
return a, max_train_score, max_test_score
# -
best_score_of_LR(train_set)
# Нека премахнем outlier-ите от train set-a
def cor_matrix(data):
corrmat = data.corr()
sns.heatmap(corrmat, vmax=.8, square=True);
cols = corrmat.nlargest(10, 'SalePrice')['SalePrice'].index
cm = np.corrcoef(data[cols].values.T)
hm = sns.heatmap(cm, square=True, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
return cols[1:], cm[0]
cols, cor = cor_matrix(train_set)
cor
# Нека видим малко графики:
for col in cols:
plt.scatter(train_set[col], train_set.SalePrice)
plt.xlabel(col)
plt.ylabel('SalePrice')
plt.show()
train_set.SalePrice.describe()
def drop_outliers(train_set):
train_set = train_set.drop(index=train_set[(train_set.GrLivArea > 4000) & (train_set.SalePrice < 300000)].index)
train_set = train_set.drop(index=train_set[train_set.TotalBsmtSF > 6000].index)
train_set = train_set.drop(index=train_set[train_set.TotRmsAbvGrd > 13].index)
train_set = train_set.drop(index=train_set[train_set.GarageArea > 1230].index)
return train_set
train_set = drop_outliers(train_set)
# Нека видим графиките след премахването
for col in cols:
plt.scatter(train_set[col], train_set.SalePrice)
plt.xlabel(col)
plt.ylabel('SalePrice')
plt.show()
best_score_of_LR(train_set)
# И с този модел подобрихме и двата r2score-а от лекции :) Но има какво още да се желае
| hw1/house_prices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from Bio import Entrez
from urllib.error import HTTPError,URLError
import textwrap
import time
pd.set_option('display.max_columns', 500)
def swmatrix(a, b, match_score=3, gap_cost=2):
H = np.zeros((len(a) + 1, len(b) + 1), np.int)
for i, j in itertools.product(range(1, H.shape[0]), range(1, H.shape[1])):
match = H[i - 1, j - 1] + (match_score if a[i - 1] == b[j - 1] else - match_score)
delete = H[i - 1, j] - gap_cost
insert = H[i, j - 1] - gap_cost
H[i, j] = max(match, delete, insert, 0)
return H
def swtraceback(H, b, b_='', old_i=0):
# flip H to get index of **last** occurrence of H.max() with np.argmax()
H_flip = np.flip(np.flip(H, 0), 1)
i_, j_ = np.unravel_index(H_flip.argmax(), H_flip.shape)
i, j = np.subtract(H.shape, (i_ + 1, j_ + 1)) # (i, j) are **last** indexes of H.max()
if H[i, j] == 0:
return b_, j
b_ = b[j - 1] + '-' + b_ if old_i - i > 1 else b[j - 1] + b_
return swtraceback(H[0:i, 0:j], b, b_, i)
def smith_waterman(a, b, match_score=3, gap_cost=2):
a, b = a.upper(), b.upper()
H = swmatrix(a, b, match_score, gap_cost)
b_, pos = swtraceback(H, b)
return pos, pos + len(b_)
s1='FARGNYPAL'
s2 = 'TTFLAHSLDTDK'
smith_waterman(s1,s2)
Entrez.email ="<EMAIL>"
IEDB = pd.read_csv('/Volumes/Maxtor/h2kball1.csv')
IEDB2 = pd.read_csv('/Volumes/Maxtor/h2kball2.csv')
IEDB = pd.concat([IEDB,IEDB2],ignore_index=True,sort=True)
IEDB = IEDB[['Epitope.2','Epitope.8','Assay.4']]
IEDB = IEDB[~(IEDB['Assay.4']=='Negative')]
IEDB = IEDB.drop_duplicates(subset='Epitope.2')
IEDB = IEDB.dropna(axis=0,subset=['Epitope.8']).reset_index(drop=True)
namprop = IEDB[['Epitope.8']].drop_duplicates(subset='Epitope.8').reset_index(drop=True)
namprop = namprop.drop(list(range(0,4089))).reset_index(drop=True)
proteins=pd.DataFrame(columns=['peptides','NB'])
cc=0
window_size = 12
for index, row in namprop.iterrows():
errorch = 0
if index == 0:
pass
else:
line = row['Epitope.8']
print(index,line)
try:
handle = Entrez.efetch(db="protein", id=line, retmode="xml")
except HTTPError:
time.sleep(20)
try:
handle = Entrez.efetch(db="protein", id=line, retmode="xml")
except HTTPError:
errorch = 1
print('Protein '+line+' not found')
except URLError:
time.sleep(20)
handle = Entrez.efetch(db="protein", id=line, retmode="xml")
if errorch == 1:
pass
else:
records = Entrez.read(handle)
time.sleep(1) # to make sure not many requests go per second to ncbi
orig = records[0]["GBSeq_sequence"].upper()
s2 = orig
peptss = IEDB[IEDB['Epitope.8']==line].reset_index(drop=True)
peptss = peptss.drop_duplicates(subset='Epitope.2').reset_index(drop=True)
for index, row in peptss.iterrows():
s1 = row['Epitope.2']
p1, p2 = smith_waterman(s1,s2)
pos1, pos2 = smith_waterman(s1,orig)
ctr = int(np.round(np.mean([pos1,pos2])))
cc=cc+1
proteins.loc[cc]=[orig[ctr-int(window_size/2):ctr+int(window_size/2)],int(1)]
s2 = s2[:p1] + s2[p2+1:]
peps = textwrap.wrap(s2,window_size)
for n in range (0,len(peps)-1):
cc=cc+1
proteins.loc[cc]=[peps[n],int(0)]
proteins = proteins.drop_duplicates(subset='peptides')
proteins.to_csv("/Volumes/Maxtor/windowpeptides7.csv",index=False)
hola = textwrap.wrap("123456789", 2)
print(hola)
#1126
print(proteins)
proteins = proteins.drop_duplicates(subset='peptides')
proteins.to_csv("/Volumes/Maxtor/windowpeptides7.csv",index=False)
print(IEDB.loc[1,'Epitope.2'])
print(1388+474+458+293+259+1217)
IEDB = pd.read_csv('/Volumes/Maxtor/h2kball1.csv')
IEDB2 = pd.read_csv('/Volumes/Maxtor/h2kball2.csv')
IEDB = pd.concat([IEDB,IEDB2],ignore_index=True,sort=True)
IEDB = IEDB[['Epitope.2','Epitope.8','Assay.4']]
IEDB = IEDB[~(IEDB['Assay.4']=='Negative')]
IEDB = IEDB.drop_duplicates(subset='Epitope.2')
IEDB = IEDB.dropna(axis=0,subset=['Epitope.8']).reset_index(drop=True)
namprop = IEDB[['Epitope.8']].drop_duplicates(subset='Epitope.8').reset_index(drop=True)
#namprop = namprop.drop(list(range(132,180))).reset_index(drop=True)
#Q99JY0
print(namprop[namprop['Epitope.8']=='Q7ARG3'])
namprop = IEDB[['Epitope.8']].drop_duplicates(subset='Epitope.8').reset_index(drop=True)
namprop = namprop.drop(list(range(132,180))).reset_index(drop=True)
print(namprop.loc[200:230])
pt1 = pd.read_csv('/Volumes/Maxtor/windowpeptides1.csv')
pt2 = pd.read_csv('/Volumes/Maxtor/windowpeptides2.csv')
pt3 = pd.read_csv('/Volumes/Maxtor/windowpeptides3.csv')
pt4 = pd.read_csv('/Volumes/Maxtor/windowpeptides4.csv')
pt5 = pd.read_csv('/Volumes/Maxtor/windowpeptides5.csv')
pt6 = pd.read_csv('/Volumes/Maxtor/windowpeptides6.csv')
pt7 = pd.read_csv('/Volumes/Maxtor/windowpeptides7.csv')
pt = pd.concat([pt1,pt2,pt3,pt4,pt5,pt6,pt7],ignore_index=True)
pt = pt.drop_duplicates(subset='peptides')
indexes_to_drop = []
for index,row in pt.iterrows():
if type(row['peptides'])==float:
indexes_to_drop.append(index)
elif len(row['peptides'])<12:
indexes_to_drop.append(index)
else:
pass
pt = pt.drop(indexes_to_drop)
np.random.seed(123)
pt = pt.sample(frac=1).reset_index(drop=True)
print(pt)
negg = len(pt[pt['NB']==0])
poss = len(pt[pt['NB']==1])
total = negg+poss
print(negg/total)
strings = pt['peptides']
pt.to_csv("/Volumes/Maxtor/4train.csv",index=False)
Neg1 = pd.read_csv('/Volumes/Maxtor/realnegatives.csv')
print(Neg1)
IEDB = pd.read_csv('/Volumes/Maxtor/secondtrain.csv')
Neg1 = pd.read_csv('/Volumes/Maxtor/realnegatives.csv')
Neg1 = Neg1.sample(frac=0.3).reset_index(drop=True)
allp = pd.concat([IEDB,Neg1],ignore_index=True,sort=True)
allp = allp[['peptides','NB']]
allp = allp.sample(frac=1).reset_index(drop=True)
negg = len(allp[allp['NB']==0])
poss = len(allp[allp['NB']==1])
total = negg+poss
print(negg/total)
allp.to_csv("/Volumes/Maxtor/thirdtrain.csv",index=False)
IEDB = pd.read_csv('/Volumes/Maxtor/h2kball1.csv')
IEDB2 = pd.read_csv('/Volumes/Maxtor/h2kball2.csv')
IEDB = pd.concat([IEDB,IEDB2],ignore_index=True,sort=True)
IEDB = IEDB[['Epitope.2','Epitope.8','Assay.4']]
IEDB = IEDB[~(IEDB['Assay.4']=='Negative')]
IEDB = IEDB.drop_duplicates(subset='Epitope.2')
IEDB = IEDB.dropna(axis=0,subset=['Epitope.8']).reset_index(drop=True)
strings = IEDB['Epitope.2']
total_avg = sum( map(len, strings) ) / len(strings)
total_max = max(strings, key=len)
total_min = min(strings, key=len)
print(total_avg, len(total_max), len(total_min))
lens = list(map(len, strings))
plt.hist(lens)
# +
s1='LGITYDGMMTD'
s2 = 'MADSHNTQYCSLQESAQAQQELDNDQETMETSEEEEDTTTSNKVYGSGIPSPPQSPQRAYSPCVALASIPDSPSEEASIKGSGGLEDPLYLLHNAQNTKVYDLVDFLVLNYQMKAFTTKAEMLESIGREYEEYYPLIFSEASECLKMVFGLDMVEVDPSVHSYILVTALGITYDGMMTDVLGMPKTGILIAVLSVIFMKGNYVSEEIIWEMVNNIGLCGGRDPYIHKDPRKLISEEFVQEGCLKYRQVPNSDPPSYGFLWGPRAFAETSKMKVLQFFASINKTHPRAYPEKYAEALQDEIDRTKAWILNRCSNSSDLLTF'
p1,p2 = smith_waterman(s1,s2)
ctr = int(np.round(np.mean([p1,p2])))
window = 14
print(s2[ctr-int(window/2):ctr+int(window/2)])
print(len(s2[ctr-int(window/2):ctr+int(window/2)]))
# -
ctr = np.mean([p1,p2])
print(ctr)
IEDB = pd.read_csv('/Volumes/Maxtor/h2kball1.csv')
IEDB2 = pd.read_csv('/Volumes/Maxtor/h2kball2.csv')
IEDB = pd.concat([IEDB,IEDB2],ignore_index=True,sort=True)
IEDB = IEDB[['Epitope.2','Assay.4']]
opos = IEDB[~(IEDB['Assay.4']=='Negative')]
opos_f = pd.DataFrame(columns=['peptides','NB'])
cc = 0
window_size = 14
for index, row in opos.iterrows():
if index==0:
pass
else:
if len(row['Epitope.2'])>=14:
orig = row['Epitope.2']
ctr = int(np.round((len(row['Epitope.2'])-1)/2))
cc=cc+1
opos_f.loc[cc]=[orig[ctr-int(window_size/2):ctr+int(window_size/2)],int(1)]
print(opos_f)
pti = pd.read_csv("/Volumes/Maxtor/4train.csv")
strings = pti['peptides']
total_avg = sum( map(len, strings) ) / len(strings)
total_max = max(strings, key=len)
total_min = min(strings, key=len)
print(total_avg,total_max,total_min)
type(12)==int
print(len(pt.loc[4217,'peptides']))
Entrez.email ="<EMAIL>"
IEDB = pd.read_csv('/Volumes/Maxtor/h2kball1.csv')
IEDB2 = pd.read_csv('/Volumes/Maxtor/h2kball2.csv')
IEDB = pd.concat([IEDB,IEDB2],ignore_index=True,sort=True)
IEDB = IEDB[['Epitope.2','Epitope.8','Assay.4']]
#IEDB = IEDB[~(IEDB['Assay.4']=='Negative')]
IEDB = IEDB.drop_duplicates(subset='Epitope.2')
#IEDB = IEDB.dropna(axis=0,subset=['Epitope.8']).reset_index(drop=True)
namprop = IEDB[['Epitope.8']].drop_duplicates(subset='Epitope.8').reset_index(drop=True)
#namprop = namprop.drop(list(range(0,4089))).reset_index(drop=True)
print(namprop)
pti = pd.read_csv("/Volumes/Maxtor/4train.csv")
print(len(pti[pti['NB']==1]))
print(len(pti[pti['NB']==0]))
| Project/All extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import os
os.chdir("..")
from deepsvg.svglib.geom import Point, Angle
from deepsvg.svglib.svg import SVG
from deepsvg.svglib.utils import make_grid
# # SVGLib walk-through
# ## Loading & displaying
dolphin = SVG.load_svg("docs/imgs/dolphin.svg")
dolphin.draw()
# For proper displaying, SVGlib expects SVGs to be drawn in a normalized viewbox of size 24x24. Stroke-width will appear to thin if a larger viewbox is used and too thick if the viewbox is too small.
print(dolphin.viewbox)
# You can **normalize** an SVG with the `normalize` method which will properly scale the points to fit a 24x24 window.
dolphin.normalize()
print(dolphin.viewbox)
dolphin.draw()
# The `with_points` option lets you display the individual points of the path:
dolphin.draw(with_points=True)
# Let us "unzoom" a little, so that there is a little more free room on the borders of the image.
dolphin.zoom(0.9).draw(with_points=True)
# For easier training of DeepSVG, the following "canonicalization" strategy is used:
# - an SVG path is oriented clockwise and starts from the topmost-leftmost point.
# - the path is simplified, so that points lie at approximately equal distance between each other.
dolphin.canonicalize().draw(with_points=True)
# Our simplification heuristic uses a combination of Ramer-Douglas-Peucker and <NAME> algorithms.
dolphin.simplify_heuristic().draw(with_points=True)
# Finally, generating a GIF from the drawing is as easy as:
dolphin.animate(frame_duration=0.1)
# ## Distinguishing individual paths
svg = SVG.load_svg("docs/imgs/rainbow.svg")
# Using the `draw_colored` makes it easy to distinguish the individual paths of an SVG image.
svg.draw_colored()
# ## Rotation and grid displaying
# Here is a code sample showing how one can duplicate an existing icon, rotate it and display both icons side-by-side.
clock = SVG.load_svg("docs/imgs/clock.svg")
clock2 = clock.copy().rotate(Angle(45))
clock3 = clock.copy().rotate(Angle(90))
make_grid([clock, clock2, clock3]).draw()
# ## Manipulating individual paths
canvas = SVG.load_svg("docs/imgs/canvas.svg")
canvas.draw()
# Let us scale the mountain on the right and translate the sun to the left.
mountain2 = canvas[4]
center = mountain2.bbox().center
mountain2.translate(-center).scale(1.5).translate(center).draw()
sun = canvas[1]
sun.translate(Point(-10, 0)).draw()
canvas.draw()
# ## Conversion to PyTorch tensor-format
square = SVG.unit_square().normalize().zoom(0.5).rotate(Angle(45))
square.draw()
t = square.copy().numericalize().to_tensor()
t
# `to_tensor` converts the `SVG` instance to a PyTorch tensor of shape $N_C \times (1 + 13)$, representing the $N_C$ commands that constitute the SVG path. If the SVG has more than one path, commands will either be concatenated or `to_tensor` will output a list of tensors.
# - the first row represents the command type (`m`, `l`, `c`, `a`, `EOS`, `SOS` or `z`)
# - the 13 subsequent rows represent respectively: $r_x$, $r_y$, $\varphi$, $f_A$, $f_S$, $x_1$, $y_1$, $q_{x1}$, $q_{y1}$, $q_{x2}$, $q_{y2}$, $x_2$ and $y_2$. Note that the first 5 rows are for a potential future compatiblity with elliptical arc commands, but aren't used in general.
#
# As described in the paper, unused arguments are encoded as -1.
#
# After numericalization, coordinates are between 0 and 255.
from deepsvg.difflib.tensor import SVGTensor
tensor = SVGTensor.from_data(t)
# `SVGTensor` is a wrapper around this PyTorch raw data, and allows for easy padding/EOS/SOS processing as well as differentiable sampling of points along the path contours. More details in the `notebooks/svgtensor.ipynb` notebook!
| notebooks/svglib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="MHtyJaj6RiR8" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
# + id="_WFSOMMVZtun" colab_type="code" colab={}
# #!pip install datadotworld
# #!pip install datadotworld[pandas]
# + id="q7a7JCtWaVgb" colab_type="code" colab={}
# #!dw configure
# + id="awPAxhKJaY9R" colab_type="code" colab={}
import datadotworld as dw
# + id="WHgYm0q7agc6" colab_type="code" colab={}
#drive.mount("/content/drive")
# + id="z8f_XeAValW7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8a5655a8-f1fd-4d33-8c77-95f16f1c3e47" executionInfo={"status": "ok", "timestamp": 1581544891866, "user_tz": -60, "elapsed": 592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="BeDoFMISatnS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5708e22b-1881-49ee-8d24-2db242a71f4e" executionInfo={"status": "ok", "timestamp": 1581544940925, "user_tz": -60, "elapsed": 2471, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
# ls matrix_one
# + id="2evsCrzZbEwR" colab_type="code" colab={}
# !mkdir data
# + id="7x8C1WwgbPHp" colab_type="code" colab={}
# !echo 'data' > .gitignore
# + id="-kKsQkj9bfp4" colab_type="code" colab={}
# !git add .gitignore
# + id="IXfbHeaKbjcX" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="aeoZiN1EbxCI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="6a284cc5-b4cc-40f8-f1a0-cf781fae1a0a" executionInfo={"status": "ok", "timestamp": 1581545157645, "user_tz": -60, "elapsed": 2183, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df = data.dataframes['7004_1']
df.shape
# + id="foVX2gr1b2mQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 615} outputId="c79032c8-d647-4174-901d-2d826b1a327e" executionInfo={"status": "ok", "timestamp": 1581545175690, "user_tz": -60, "elapsed": 933, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df.sample(5)
# + id="WIC2INILcDaA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="326b4fd9-21ca-4297-bb2e-bfb106846ae8" executionInfo={"status": "ok", "timestamp": 1581545197204, "user_tz": -60, "elapsed": 568, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df.columns
# + id="l69ZU1pTcIvo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="2a6e118b-82d8-4f7a-f7a0-3839ffb78892" executionInfo={"status": "ok", "timestamp": 1581545225655, "user_tz": -60, "elapsed": 568, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df.prices_currency.unique()
# + id="Zw6Rp0mNcPrn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="878c1d2f-d26a-4bcf-c4f3-25e05a5bc6a4" executionInfo={"status": "ok", "timestamp": 1581545280741, "user_tz": -60, "elapsed": 785, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df.prices_currency.value_counts(normalize=True)
# + id="UyKVG2gecXNP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b7ff2419-860a-40b2-a035-4eebb8ff5a12" executionInfo={"status": "ok", "timestamp": 1581545379048, "user_tz": -60, "elapsed": 613, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df_usd = df[df.prices_currency == 'USD'].copy()
df_usd.shape
# + id="uBRYL6hzcj9C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="18360683-4196-4ac5-98e4-b315d12fdd13" executionInfo={"status": "ok", "timestamp": 1581545567281, "user_tz": -60, "elapsed": 574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="x_fz_1y8dAEP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9a9c78e9-0b8f-4cec-a83e-dbe9b8a0bf08" executionInfo={"status": "ok", "timestamp": 1581545897631, "user_tz": -60, "elapsed": 731, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
filter_max = np.percentile(df_usd['prices_amountmin'],99)
filter_max
# + id="gOWjvDdIdvtp" colab_type="code" colab={}
df_usd_filter = df_usd[df_usd['prices_amountmin']<filter_max]
# + id="poBUpkUoeLj2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="f65ff74b-4fd9-4469-a004-5822c7dcffd3" executionInfo={"status": "ok", "timestamp": 1581545900530, "user_tz": -60, "elapsed": 864, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="x8uErp_SeVUv" colab_type="code" colab={}
# !git add matrix_one/matrixday3.ipynb
# + id="PGHPYEQVga-B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="5fea80c9-46ab-41f5-dab9-caaa1791d615" executionInfo={"status": "ok", "timestamp": 1581546537924, "user_tz": -60, "elapsed": 3803, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
# !git commit -m "Read Men's Shoe Prices fataset from data.world"
# + id="dfezsUmEhBR1" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "Bogna"
# + id="NxneBdIqhLMV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="9213472a-f79f-41bc-870f-2d0318b6030f" executionInfo={"status": "ok", "timestamp": 1581546682682, "user_tz": -60, "elapsed": 6108, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mA8r-uU-ic9bTie-HKICW7GlVtVAwaRairckHTF=s64", "userId": "16349637023315994061"}}
# !git push -u origin master
# + id="mGCh1-f1hyC9" colab_type="code" colab={}
| matrix_one/matrixday3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Matriculando alunos
# ## Preaparando ambiente
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(20, 10))
# -
# !pip3 install html5lib
# !pip install lxml
import html5lib
# ## Carregando dados
# ### Nomes
nomes = pd.read_json('dados/nomes.json')
nomes
nomes.set_index('id_aluno', inplace = True)
nomes
# ### Cursos
cursos = pd.read_json('dados/cursos.json')
cursos
cursos.index.name = 'id'
cursos
# ## Matrículas
# ### Informando o número de cursos de cada aluno
total_alunos = len(nomes)
nomes['matriculas'] = np.ceil(np.random.exponential(size = total_alunos) * 1.5).astype(int)
nomes.sample(20)
sns.countplot(data = nomes, x = 'matriculas');
nomes.matriculas.describe()
nomes.matriculas.value_counts()
# ### Associando alunos aos cursos
len(cursos)
max(cursos.index)
matriculas_alunos = []
# cria uma sequencia de n valores aleatórios entre 0 e 1, sendo o n = número de cursos
x = np.random.rand(len(cursos))
# Cria uma probabilidade para que cada curso seja o curso que o aluno está fazendo
prob_curso = x / sum(x)
prob_curso
x
# +
for i, row in nomes.iterrows():
id = i
matriculas = row.matriculas
for j in range(matriculas):
mat = [id, np.random.choice(cursos.index, p = prob_curso)]
matriculas_alunos.append(mat)
matriculas_alunos
# -
matriculas = pd.DataFrame(matriculas_alunos, columns = ['id_aluno', 'id_curso'])
matriculas.sample(10)
matriculasCurso = matriculas.groupby('id_curso').count().join(cursos['nome_do_curso']).rename(columns = {'id_aluno': 'quantidade_alunos'})
matriculasCurso
# ## Salvando os dados
matriculasCurso.reset_index(inplace = True)
matriculasCurso.sample(10)
matriculasCurso.to_json('dados/matriculas_curso.json')
pd.read_json('dados/matriculas_curso.json')
matriculas.to_json('dados/matriculas.json')
nomes
nomes.to_json('dados/nomes.json')
| PandasIO/3 - Matriculando alunos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import tree
from dtreeviz.trees import *
from sklearn.datasets import load_boston
# # Cars
# +
import pandas as pd
from sklearn.model_selection import train_test_split
df_cars = pd.read_csv("data/cars.csv")
X = df_cars[['WGT']]
y = df_cars['MPG']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from matplotlib import rcParams
rcParams['font.family'] = 'Arial'
plt.scatter(X,y,marker='o', alpha=.4, c='#4575b4',
edgecolor=GREY, lw=.3)
plt.xlabel("Vehicle Weight", fontsize=14)
plt.ylabel("MPG", fontsize=14)
plt.tight_layout()
plt.savefig("/tmp/cars-wgt-vs-mpg.svg", bbox_inches=0, pad_inches=0)
# +
X = df_cars.drop('MPG', axis=1)
y = df_cars['MPG']
fig = plt.figure()
ax = fig.gca()
max_depth = 1
rtreeviz_univar(ax,
X.WGT, y,
max_depth=max_depth,
feature_name='Vehicle Weight',
target_name='MPG',
fontsize=14,
show={'splits'})
plt.savefig(f"/tmp/cars-dectree-depth-{max_depth}.svg", bbox_inches=0, pad_inches=0)
# +
def get_splits(x_train,y_train,max_depth):
t = tree.DecisionTreeRegressor(max_depth=max_depth)
t.fit(x_train.reshape(-1,1), y_train)
shadow_tree = ShadowDecTree(t, x_train.reshape(-1,1), y_train, feature_names=['foo'])
splits = []
for node in shadow_tree.internal:
splits.append(node.split())
preds = []
for node in shadow_tree.leaves:
preds.append(node.prediction())
return splits, preds
splits, preds = get_splits(X.WGT.values, y, 1)
print(splits, '\n', preds)
splits, preds = get_splits(X.WGT.values, y, 2)
print(splits, '\n', preds)
# -
# ## Animate cars
# +
df_cars = pd.read_csv("data/cars.csv")
X = df_cars.drop('MPG', axis=1)
y = df_cars['MPG']
max_depth = 4
features=[2,1]
X = X.values[:, features]
figsize = (6,5)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
rtreeviz_bivar_3D(ax,
X, y,
max_depth=max_depth,
feature_names=['Vehicle Weight', 'Horse Power'],
target_name='MPG',
fontsize=14)
plt.savefig(f"/tmp/rtree-depth-{max_depth}.svg", bbox_inches=0, pad_inches=0)
plt.show()
# +
max_depth = 2
X = df_cars.drop('MPG', axis=1)
y = df_cars['MPG']
x_train = X.values[:,2]
y_train = y
feature_names = ['Vehicle Weight']
t = tree.DecisionTreeRegressor(max_depth=max_depth)
t.fit(x_train.reshape(-1,1), y_train)
viz = dtreeviz(t, x_train.reshape(-1,1), y_train, target_name='MPG',
feature_names=feature_names)
filename = f"/tmp/cars-wgt-depth-{max_depth}.svg"
plt.tight_layout()
viz.save(filename)
# -
# # Wine
# +
from sklearn.datasets import load_wine
fig = plt.figure()
ax = fig.gca()
wine = load_wine()
class_values = [0,1,2]
print(wine.feature_names)
X = wine.data[:,[12,6]]
y = wine.target
X_hist = [X[y == cl] for cl in class_values]
color_values = color_blind_friendly_colors[3]
colors = {v: color_values[i] for i, v in enumerate(class_values)}
for i, h in enumerate(X_hist):
ax.scatter(h[:,0], h[:,1], alpha=1, marker='o', s=25, c=colors[i],
edgecolors=GREY, lw=.3)
#plt.scatter(X,y,marker='o', alpha=.4, c='#4575b4', edgecolor=GREY, lw=.3)
plt.xlabel("Profline", fontsize=14)
plt.ylabel("Flavanoid", fontsize=14)
plt.tight_layout()
plt.savefig("/tmp/wine-prol-flav.svg", bbox_inches=0, pad_inches=0)
# +
from dtreeviz.trees import *
max_depth = 2
features = [12]
feature_names=['proline']
figsize = (6, 2)
fig, ax = plt.subplots(1, 1, figsize=figsize)
x_train = wine.data[:, features[0]]
y_train = wine.target
ctreeviz_univar(ax, x_train, y_train, max_depth=max_depth, feature_name=feature_names[0],
class_names=list(wine.target_names), gtype='strip', target_name='wine',
show={'splits','legend'})
filename = f"/tmp/wine-{feature_names[0]}-featspace-depth-{max_depth}.svg"
plt.tight_layout()
plt.savefig(filename, bbox_inches=0, pad_inches=0)
# +
max_depth = 2
features = [12,6]
feature_names=['proline','flavanoids']
figsize = (6, 5)
fig, ax = plt.subplots(1, 1, figsize=figsize)
X_train = wine.data
X_train = X_train[:, features]
y_train = wine.target
ctreeviz_bivar(ax, X_train, y_train, max_depth=max_depth, feature_names=feature_names,
class_names=list(wine.target_names), target_name='wine',
show={'splits'})
filename = f"/tmp/wine-{','.join(feature_names)}-featspace-depth-{max_depth}.svg"
plt.tight_layout()
plt.savefig(filename, bbox_inches=0, pad_inches=0)
# +
features = [12,6]
feature_names=['proline','flavanoids']
max_depth = 2
clf = tree.DecisionTreeClassifier(max_depth=max_depth)
wine = load_wine()
X_train = wine.data[:, features]
y_train = wine.target
clf.fit(X_train, y_train)
viz = dtreeviz(clf, X_train, y_train, target_name='wine',
feature_names=feature_names,
class_names=list(wine.target_names),
histtype='strip')
filename = f"/tmp/wine-{','.join(feature_names)}-depth-{max_depth}.svg"
plt.tight_layout()
viz.save(filename)
# -
# ## sklearn version
# ### wine
# +
from sklearn.tree import export_graphviz
wine = load_wine()
X_train = wine.data[:, features]
y_train = wine.target
max_depth = 2
clf = tree.DecisionTreeClassifier(max_depth=max_depth)
clf.fit(X_train, y_train)
dot = export_graphviz(clf, out_file=None,
filled=True, rounded=True,
feature_names=feature_names,
class_names=list(wine.target_names),
special_characters=True)
viz = graphviz.Source(dot)
viz.render(f"/tmp/wine-sklearn-depth-{max_depth}")
# -
# ### cars
# +
X = df_cars.drop('MPG', axis=1)
y = df_cars['MPG']
features=[2]
feature_names=['Vehicle Weight']
target_name='MPG'
X_train = X.values[:, 2]
y_train = y
max_depth = 2
regr = tree.DecisionTreeRegressor(max_depth=max_depth)
regr.fit(X_train.reshape(-1,1), y_train)
dot = export_graphviz(regr, out_file=None,
filled=True, rounded=True,
feature_names=feature_names,
special_characters=True)
viz = graphviz.Source(dot)
viz.render(f"/tmp/cars-sklearn-depth-{max_depth}")
# -
| testing/slides.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # `scabs1(Z)`
#
# Computes the sum of absolute values of the real and imaginary components of $z$.
#
# Operates on sinle-precision complex valued scalars.
# ### Example usage
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(''), "..", "..")))
import numpy as np
from pyblas.level1 import scabs1
scabs1(-1 + 2j)
# ### Docstring
# + jupyter={"source_hidden": true}
help(scabs1)
# -
# ### Source code
# + jupyter={"source_hidden": true}
# scabs1??
| docs/level1/scabs1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data
import pandas as pd
import numpy as np
from collections import Counter
# ls
data_clients = pd.read_csv('clients.csv')
data_materials = pd.read_csv('materials.csv')
data_plants = pd.read_csv('plants.csv')
data_transaction = pd.read_parquet('transactions.parquet')
data_materials.set_index('material', inplace=True)
data_client_date = pd.DataFrame(data_transaction.groupby(['client_id', 'chq_date'])['material'].apply(list))
data_client_date_sum = data_transaction.groupby(['client_id', 'chq_date'])['sales_sum'].sum()
# ## Merging the tables
# * Our aim to get the list of regular purchases for each client
# * For this we merge tables Transactions and Materials in order to have information about most popular subcategories
def category(g):
return data_materials.loc[g, 'hier_level_4']
def regular(l, k=7):
c = Counter()
for s in l:
for g in set(s):
try:
c[category(g)] += 1
except:
pass
return [g[0] for g in c.most_common(k)]
data_client_populars = data_client_date.groupby(['client_id'])['material'].apply(list).apply(regular)
sum_sales_per_material = data_transaction.groupby(['material'])['sales_sum'].apply(sum)
client_populars = pd.DataFrame(data_client_populars)
client_populars.head()
# ## Constructing the Embedding using tSNE
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
texts = [' '.join(s) for s in client_populars['material'].to_list()]
X = vectorizer.fit_transform(texts)
X.shape
from sklearn.manifold import TSNE
transformed_data = TSNE(n_components=2, n_jobs=-1).fit_transform(X)
# ## Clustering algorithms
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=10)
kmeans.fit(transformed_data)
labels = kmeans.predict(transformed_data)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.scatter(transformed_data[:, 0], transformed_data[:, 1], c=labels, s=5);
# ## Future work
# * experiments with other clustering techniques
# * training NN embedding using Jackard similarity
| Lenta/Client_embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Setup
# %load_ext sql
# %config SqlMagic.autocommit=False # avoiding the error: FAILED: IllegalStateException COMMIT is not supported yet.
# %sql hive://hadoop@localhost:10000/
# # Time It
# ## Count all rows
# %time %sql select count(*) from movielens.ratings
# %time %sql select count(*) from movielens_parquet.ratings
# %time %sql select count(*) from movielens_parquet_compressed.ratings
# ## Get max(userid)
# %time %sql select max(userid) from movielens.ratings
# %time %sql select max(userid) from movielens_parquet.ratings
# %time %sql select max(userid) from movielens_parquet_compressed.ratings
# ## Save all Ratings into a Variable
# %%time
# test_1 = %sql select userid from movielens.ratings
# %%time
# test_2 = %sql select userid from movielens_parquet.ratings
# %%time
# test_3 = %sql select userid from movielens_parquet_compressed.ratings
| V6/3_movie_lens_parquet/3_Performance_Comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bernstein-Vazirani Algorithm
# In this section, we first introduce the Bernstein-Vazirani problem, and classical and quantum algorithms to solve it. We then implement the quantum algorithm using Qiskit, and run on a simulator and device.
#
# ## Contents
#
# 1. [Introduction](#introduction)
# 1.1 [Bernstein-Vazirani Problem](#bvproblem)
# 1.2 [Bernstein-Vazirani Algorithm](#bvalgorithm)
# 2. [Example](#example)
# 3. [Qiskit Implementation](#implementation)
# 3.1 [Simulation](#simulation)
# 3.2 [Device](#device)
# 4. [Problems](#problems)
# 5. [References](#references)
# ## 1. Introduction <a id='introduction'></a>
#
# The Bernstein-Vazirani algorithm, first introduced in Reference [1], can be seen as an extension of the Deutsch-Josza algorithm covered in the last section. It showed that there can be advantages in using a quantum computer as a computational tool for more complex problems compared to the Deutsch-Josza problem.
#
# ### 1a. Bernstein-Vazirani Problem <a id='bvproblem'> </a>
#
# The Bernstein-Vazirani Problem is based around the idea of a function, $f(x)$. This takes as as input a string of $n$ bits, for some given $n$, and then returns either $0$ or $1$. Specifically, it returns the bitwise product of the input with reference string, $s$,
#
# $$
# f(x) = s \cdot x \,\, \text{mod 2} = \left( \sum_j s_j x_j \right) \,\, \text{mod 2} .
# $$
#
# Here $x_j$ and $s_j$ refer to the $j$th bit of their respective strings, where $x_0$ is the least significant bit of $x$, $x_{n-1}$ is the most significant and so on.
#
# We given the ability to compute the output of such a function, without being told the corresponding string $s$. We could, for example, have been given a program that computes the function without specifically referencing the string. This is known as the 'oracle'.
#
# By making queries to the oracle, our job is to find the 'hidden string' $s$. We would like to do this using as few queries as possible.
# ### 1b. Bernstein-Vazirani Algorithm <a id='bvalgorithm'> </a>
#
# #### Classical Solution
#
# When the oracle is run classically, each query simply returns an output $s \cdot x \,\, \text{mod 2}$ for a given $x$. To acquire the $n$ bits of information that make up $s$, we need to run $n$ times. The most straightforward method is to query the oracle with each of the $n$ input strings for which $n-1$ of the bits are $\texttt{0}$ and a single bit is $\texttt{1}$. For example, for $n=3$, $f(\texttt{001})=s_0$, $f(\texttt{010})=s_1$ and $f(\texttt{100})=s_2$.
#
# #### Quantum Solution
#
# Even if we are given $f(x)$ as a classical program, we can still implement it on a quantum computer. The problem can then be solved with 100% confidence using only a single quantum query.
#
# Before we look at the quantum Bernstein-Vazirani algorithm, let's start by working backwards from the solution. Take the string $s$ and write it as a bit string on $n$ qubits. For example, $\texttt{001}$ would become $\left|001\right\rangle$.
#
# Now what would happen if we were to take this state $\left|s\right\rangle$, and apply a Hadamard to every qubit? This would replace each $\left|0\right\rangle$ in our string with $\left|+\right\rangle$, and each $\left|1\right\rangle$ with $\left|-\right\rangle$. More concisely, we can say that it acts on each bit $s_j$ as
#
# $$
# H \left| s_j \right\rangle = \frac{1}{\sqrt{2}} \sum_{x_j \in {0,1}} (-1)^{s_j x_j} \left| x_j \right\rangle.
# $$
#
# The state of all $n$ qubits will then become
#
# $$
# \left| \tilde s \right\rangle = H^{\otimes n} \left| s \right\rangle = \frac{1}{\sqrt{2^n}} \sum_x (-1)^{s\cdot x} \left|{x}\right\rangle
# $$
#
# Here $s \cdot x = \sum_j s_j x_j$, as before. Also, since this quantity is the exponent of $-1$, the property $(-1)^2=1$ means that we could equivalently use the exponent $s \cdot x \,\, \text{mod 2}$. This is exactly the function $f(x)$ that we are interested in.
#
# Since the Hadamard gate is its own inverse, it follows that $H^{\otimes n} \left| \tilde s \right\rangle = \left| s \right\rangle$. This gives us a hint about how to solve the problem: if we can find some alternative way to prepare $\left| \tilde s \right\rangle$, simply applying Hadamard gates will reveal $s$.
#
# The alternative method is to use the oracle. With our knowledge of how to apply the function classically, we can implement a unitary $Q_f$.
#
# $$
# Q_f \left| x \right\rangle = (-1)^{f(x)} \left| x \right\rangle.
# $$
#
# This could be done, for example, by implementing our program to calculate $f(x)$ using $n+1$ qubits, where an `x` gate is applied to the extra qubit if and only if $f(x)=1$. If the extra qubit is initialized as $\left| 0 \right\rangle$, this would mean that the result would be written onto this qubit. If it were initialized as $\left| - \right\rangle$, however, the effect would simply be that the state would acquire a phase of $(-1)^{f(x)}$. The state of the extra qubit would be otherwise unaffected, and so could subsequently be ignored.
#
# Once we have implemented the unitary $Q_f$, we would find that
#
# $$
# \left| \tilde s \right\rangle = Q_f H^{\otimes n} \left| 0 \right\rangle ^{\otimes n}.
# $$
#
# Here the $H$ gates create a superposition of all bit strings. Since the initial state is $\left| 0 \right\rangle$ on each qubit, the phase for each string is $+1$. The effect of $Q_f$ is then to inject the phases required for the state $\left| \tilde s \right\rangle$.
#
# Putting all this together, the quantum Bernstein-Vazirani algorithm to find the hidden string becomes very simple: (1) start from a $\left|0\right\rangle^{\otimes n}$ state, (2) apply Hadamard gates, (3) query the oracle, (4) apply Hadamard gates, and (5) measure, generically illustrated below:
#
# 
#
# The final state is simply $\left| \tilde s \right\rangle$, so with the measurement we read out the hidden bit string $s$.
# ## 2. Example <a id='example'></a>
#
# Let's go through a specific example for $n=2$ qubits and a secret string $s=11$. Note that we are following the formulation in Reference [2] that generates a circuit for the Bernstein-Vazirani quantum oracle using only one register.
#
# <ol>
# <li> The register of two qubits is initialized to zero:
#
#
# $$\lvert \psi_0 \rangle = \lvert 0 0 \rangle$$
#
#
# </li>
#
# <li> Apply a Hadamard gate to both qubits:
#
#
# $$\lvert \psi_1 \rangle = \frac{1}{2} \left( \lvert 0 0 \rangle + \lvert 0 1 \rangle + \lvert 1 0 \rangle + \lvert 1 1 \rangle \right) $$
#
#
# </li>
#
# <li> For the string $s=11$, the quantum oracle can be implemented as $\text{Q}_f = Z_{1}Z_{2}$:
#
#
# $$\lvert \psi_2 \rangle = \frac{1}{2} \left( \lvert 0 0 \rangle - \lvert 0 1 \rangle - \lvert 1 0 \rangle + \lvert 1 1 \rangle \right)$$
#
#
# </li>
#
# <li> Apply a Hadamard gate to both qubits:
#
#
# $$\lvert \psi_3 \rangle = \lvert 1 1 \rangle$$
#
#
# </li>
#
# <li> Measure to find the secret string $s=11$
# </li>
#
#
# </ol>
#
#
# ## 3. Qiskit Implementation <a id='implementation'></a>
# We now implement the Bernstein-Vazirani algorithm with Qiskit for a two bit function with $s=11$.
# +
# initialization
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg' # Makes the images look nice
import numpy as np
# importing Qiskit
from qiskit import IBMQ, BasicAer
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.visualization import plot_histogram
# -
# We first set the number of qubits used in the experiment, and the hidden integer $s$ to be found by the algorithm. The hidden integer $s$ determines the circuit for the quantum oracle.
# +
nQubits = 2 # number of physical qubits used to represent s
s = 3 # the hidden integer
# make sure that a can be represented with nqubits
s = s % 2**(nQubits)
# -
# We then use Qiskit to program the Bernstein-Vazirani algorithm.
# +
# Creating registers
# qubits for querying the oracle and finding the hidden integer
qr = QuantumRegister(nQubits)
# bits for recording the measurement on qr
cr = ClassicalRegister(nQubits)
bvCircuit = QuantumCircuit(qr, cr)
barriers = True
# Apply Hadamard gates before querying the oracle
for i in range(nQubits):
bvCircuit.h(qr[i])
# Apply barrier
if barriers:
bvCircuit.barrier()
# Apply the inner-product oracle
for i in range(nQubits):
if (s & (1 << i)):
bvCircuit.z(qr[i])
else:
bvCircuit.iden(qr[i])
# Apply barrier
if barriers:
bvCircuit.barrier()
#Apply Hadamard gates after querying the oracle
for i in range(nQubits):
bvCircuit.h(qr[i])
# Apply barrier
if barriers:
bvCircuit.barrier()
# Measurement
bvCircuit.measure(qr, cr)
# -
bvCircuit.draw(output='mpl')
# ### 3a. Experiment with Simulators <a id='simulation'></a>
#
# We can run the above circuit on the simulator.
# +
# use local simulator
backend = BasicAer.get_backend('qasm_simulator')
shots = 1024
results = execute(bvCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
plot_histogram(answer)
# -
# We can see that the result of the measurement is the binary representation of the hidden integer $3$ $(11)$.
# ### 3b. Experiment with Real Devices <a id='device'></a>
#
# We can run the circuit on the real device as below.
# Load our saved IBMQ accounts and get the least busy backend device with less than or equal to 5 qubits
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits <= 5 and
x.configuration().n_qubits >= 2 and
not x.configuration().simulator and x.status().operational==True))
print("least busy backend: ", backend)
# +
# Run our circuit on the least busy backend. Monitor the execution of the job in the queue
from qiskit.tools.monitor import job_monitor
shots = 1024
job = execute(bvCircuit, backend=backend, shots=shots)
job_monitor(job, interval = 2)
# +
# Get the results from the computation
results = job.result()
answer = results.get_counts()
plot_histogram(answer)
# -
# As we can see, most of the results are $11$. The other results are due to errors in the quantum computation.
# ## 4. Problems <a id='problems'></a>
#
# 1. The above [implementation](#implementation) of Bernstein-Vazirani is for a secret bit string of $s = 11$. Modify the implementation for a secret string os $s = 1011$. Are the results what you expect? Explain.
# 2. The above [implementation](#implementation) of Bernstein-Vazirani is for a secret bit string of $s = 11$. Modify the implementation for a secret string os $s = 1110110101$. Are the results what you expect? Explain.
#
# ## 5. References <a id='references'></a>
# 1. <NAME> and <NAME> (1997) "Quantum Complexity Theory" SIAM Journal on Computing, Vol. 26, No. 5: 1411-1473, [doi:10.1137/S0097539796300921](https://doi.org/10.1137/S0097539796300921).
# 2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2001) "Implementation of a quantum algorithm to solve the Bernstein-Vazirani parity problem without entanglement on an ensemble quantum computer", Phys. Rev. A 64, 042306, [10.1103/PhysRevA.64.042306](https://doi.org/10.1103/PhysRevA.64.042306), [arXiv:quant-ph/0012114](https://arxiv.org/abs/quant-ph/0012114).
import qiskit
qiskit.__qiskit_version__
| qiskit-textbook/content/ch-algorithms/bernstein-vazirani.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K Nearest Neighbors com Python
#
# Você recebeu um conjunto de dados classificados de uma empresa. Eles ocultaram a coluna de parâmetros, mas lhe deram os dados e a classe de destino.
#
# Vamos tentar usar o KNN para criar um modelo que possa predizer diretamente a classe para um novo ponto de dados baseado nos parâmetros.
#
# Vamos pegar e usá-lo!
# ## Import Libraries
#
#
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# ## Obter dados
#
# Defina index_col = 0 para usar a primeira coluna como índice.
df = pd.read_csv("Classified Data",index_col=0)
df.head()
# ## Normalizar as variáveis
#
# Como o classificador KNN prediz a classe de uma determinada observação ao identificar as observações mais próximas, a escala da variável é importante. Todas as variáveis que estão em grande escala terão um efeito muito maior na distância entre as observações e, portanto, sobre o classificador KNN, do que as variáveis em pequena escala.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df.drop('TARGET CLASS',axis=1))
scaled_features = scaler.transform(df.drop('TARGET CLASS',axis=1))
df_feat = pd.DataFrame(scaled_features,columns=df.columns[:-1])
df_feat.head()
# ## Divisão treino-teste
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(scaled_features,df['TARGET CLASS'],
test_size=0.30)
# ## Usando o KNN
#
# Lembre-se de que estamos tentando encontrar um modelo para prever se alguém estará na TARGET CLASS ou não. Começaremos com k = 1
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
# ## Previsões e avaliações
#
# Vamos avaliar o nosso modelo KNN!
from sklearn.metrics import classification_report,confusion_matrix
print(confusion_matrix(y_test,pred))
print(classification_report(y_test,pred))
# # Escolhendo um valor K
#
# Vamos em frente e usar o método do cotovelo para escolher um bom Valor K:
# +
error_rate = []
# Levará algum tempo
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
# -
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
# Aqui podemos ver que, após cerca de K > 23, a taxa de erro tende a girar em torno de 0,06-0,05. Vamos treinar novamente o modelo com isso e verificar o relatório de classificação!
# +
# PRIMEIRA COMPARAÇÃO RÁPIDA PARA O NOSSO ORIGINAL K = 1
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
print('WITH K=1')
print('\n')
print(confusion_matrix(y_test,pred))
print('\n')
print(classification_report(y_test,pred))
# +
# Agora com K = 23
knn = KNeighborsClassifier(n_neighbors=23)
knn.fit(X_train,y_train)
pred = knn.predict(X_test)
print('WITH K=23')
print('\n')
print(confusion_matrix(y_test,pred))
print('\n')
print(classification_report(y_test,pred))
# -
# Conseguimos extrair mais algum desempenho do nosso modelo, ajustando-nos para um melhor valor K!
| coursera/Python-Data-Science-and-Machine-Learning-Bootcamp/5. Machine Learning/K-Nearest-Neighbors/K Nearest Neighbors com Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:variants]
# language: python
# name: conda-env-variants-py
# ---
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
import statsmodels.discrete.count_model as reg_models
# Load all GPCR missense variants
missense_variants = pd.read_csv('../data/gnomAD_population_variants/gnomad_v2.1.1_gpcr_variants_missense.csv',index_col=0)
# Load all GPCR synonymous variants
synonymous_variants = pd.read_csv('../data/gnomAD_population_variants/gnomad_v2.1.1_gpcr_variants_synonymous.csv',index_col=0)
# Load all GPCR LoF variants
pLoF_variants = pd.read_csv('../data/gnomAD_population_variants/gnomad_v2.1.1_gpcr_variants_plof.csv',index_col=0)
# Load pre-computed constraint metrics
summary_constraint = pd.read_csv('../data/gnomAD_population_variants/gnomad_v2.1.1_gpcr_precomputed_constraint.csv',index_col=0)
# Load residue labels for class A GPCRs
residue_labels_classA = pd.read_csv('../data/protein_sequences/GPCRdb_generic_numbers_by_gpcr_residue_ClassA.csv')
# Load classification of GPCRs
gpcr_classes = pd.read_csv('../data/gene_annotations/GPCRdb_class_by_gpcr.csv')
# Merge missense variants with class A residue labels and filter for non-matching reference AAs
missense_variants_classA = residue_labels_classA.merge(missense_variants,
left_on=['HGNC Symbol','sequence_position'],
right_on=['HGNC symbol','protein_pos'],
how='left')
missense_variants_classA = missense_variants_classA.drop([
'HGNC symbol',
'ensembl_gene',
'ensembl_transcript',
'swissprot_match',
'protein_pos'
],axis=1)
missense_variants_classA = missense_variants_classA[missense_variants_classA['amino_acid']==missense_variants_classA['reference_amino_acid']]
missense_variants_classA.to_csv('../data/gnomad_v2.1.1_gpcr_variants_missense_labelled_classA.csv')
# missense_variants_classA = pd.read_csv('../data/gnomad_v2.1.1_gpcr_variants_missense_labelled_classA.csv',index_col=0)
# Count total synonymous variants per gene
synonymous_counts_by_gene = synonymous_variants[['HGNC symbol']].value_counts().reset_index()
synonymous_counts_by_gene.columns = ['HGNC symbol','obs_synonymous_protein']
# Count total missense variants per gene
missense_counts_by_gene = missense_variants_classA[['HGNC Symbol']].value_counts().reset_index()
missense_counts_by_gene.columns = ['HGNC symbol','obs_missense_protein']
# Count missense variants by segment and protein
missense_counts_by_segment = missense_variants_classA[['HGNC Symbol','protein_segment']].value_counts().reset_index()
missense_counts_by_segment.columns = ['HGNC symbol','protein_segment','obs_missense_segment']
# Count missense variants by structural position and protein
missense_counts_by_position = missense_variants_classA[['HGNC Symbol','GPCRdb_alignment_number']].value_counts().reset_index()
missense_counts_by_position.columns = ['HGNC symbol','GPCRdb_alignment_number','obs_missense_position']
# Count protein lengths
protein_length = residue_labels_classA[['HGNC Symbol']].value_counts().reset_index()
protein_length.columns = ['HGNC symbol','protein_length']
# Count length of segments by protein
segment_length_by_protein = residue_labels_classA[['HGNC Symbol','protein_segment']].value_counts().reset_index()
segment_length_by_protein.columns = ['HGNC symbol','protein_segment','segment_length']
# Presence of structural positions by protein
position_present_by_protein = residue_labels_classA[['HGNC Symbol','GPCRdb_alignment_number']].value_counts().reset_index()
position_present_by_protein.columns = ['HGNC symbol','GPCRdb_alignment_number','position_present']
oe_protein = synonymous_counts_by_gene.merge(missense_counts_by_gene,on='HGNC symbol')
oe_protein['exp_missense_protein'] = 2 * oe_protein['obs_synonymous_protein']
oe_protein['oe_missense_protein'] = oe_protein['obs_missense_protein'] / oe_protein['exp_missense_protein']
oe_segment = (missense_counts_by_segment
.merge(synonymous_counts_by_gene,on='HGNC symbol')
.merge(segment_length_by_protein,on=['HGNC symbol','protein_segment'])
.merge(protein_length,on=['HGNC symbol'])
.merge(missense_counts_by_gene,on='HGNC symbol'))
oe_segment['rate_synonymous_protein'] = oe_segment['obs_synonymous_protein'] / oe_segment['protein_length']
oe_segment['exp_missense_segment'] = (2 * oe_segment['rate_synonymous_protein'] * oe_segment['segment_length'] )
oe_segment['oe_missense_segment'] = oe_segment['obs_missense_segment'] / oe_segment['exp_missense_segment']
oe_segment
oe_position = (position_present_by_protein
.merge(missense_counts_by_position,on=['HGNC symbol','GPCRdb_alignment_number'],how='left')
.merge(synonymous_counts_by_gene,on='HGNC symbol')
.merge(protein_length,on='HGNC symbol'))
oe_position['rate_synonymous_protein'] = oe_position['obs_synonymous_protein'] / oe_position['protein_length']
oe_position['exp_missense_position'] = 2 *oe_position['rate_synonymous_protein']
oe_position['obs_missense_position'] = oe_position['obs_missense_position'].fillna(0)
oe_position['oe_missense_position'] = oe_position['obs_missense_position'] / oe_position['exp_missense_position']
oe_position['segment'] = oe_position['GPCRdb_alignment_number'].str.split(pat='.').apply(lambda x: x[0])
oe_position
sns.displot(oe_position,x='oe_missense_position')
plt.savefig('../plots/position_constraint_distribution.png')
oe_position_average = (oe_position
.groupby('GPCRdb_alignment_number')
.agg({'oe_missense_position':[np.mean,np.std,'count']})
.reset_index())
oe_position_average.columns = [
'GPCRdb_alignment_number',
'oe_missense_position_mean',
'oe_missense_position_std',
'position_count'
]
oe_position_average['oe_missense_position_sem'] = oe_position_average['oe_missense_position_std'] / oe_position_average['position_count']
oe_position_average['segment'] = oe_position_average['GPCRdb_alignment_number'].str.split(pat='.').apply(lambda x: x[0])
sns.histplot(oe_position.GPCRdb_alignment_number.value_counts(),bins=20)
sns.displot(oe_position_average, x='oe_missense_position_sem',kind='hist')
plt.yscale('log')
oe_position_average_filter = oe_position_average[
(oe_position_average['oe_missense_position_sem']<0.1) & \
(oe_position_average['position_count']>50)
].reset_index()
sns.displot(
oe_position_average_filter,
x='oe_missense_position_mean',kind='kde',rug=True,
)
plt.savefig('../plots/average_position_constraint_distribution.png')
oe_position_average_filter['oe_missense_position_mean'].mean()
oe_position_average_filter['oe_missense_position_mean'].std()
# +
sns.set_context('poster')
fig, ax = plt.subplots(figsize=(8,5))
g = sns.lineplot(
data=oe_position_average_filter,
x='GPCRdb_alignment_number',
y='oe_missense_position_mean',
hue='segment',
ax=ax,
palette='deep'
)
plt.annotate('3x50',('3.51x51',2.20))
# plt.fill_between(
# x = oe_position_average_filter['GPCRdb_alignment_number'],
# y1 = oe_position_average_filter['oe_missense_position_mean'] + oe_position_average_filter['oe_missense_position_sem'],
# y2 = oe_position_average_filter['oe_missense_position_mean'] - oe_position_average_filter['oe_missense_position_sem']
# )
plt.hlines(1,'1.27x27','8.63x63',linestyles='dashed',colors='k')
plt.xticks([])
plt.ylabel('Mean obs/exp \n missense ')
plt.xlabel('Position (GPCRdb)')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
g.legend_.remove()
plt.tight_layout()
plt.savefig('../plots/region_constraint/Missense_constraint_by_position_lineplot.png',dpi=500)
# -
highly_unconstrained = oe_position_average_filter.sort_values('oe_missense_position_mean',ascending=False)[:5]
highly_unconstrained
oe_dRy = oe_position[oe_position['GPCRdb_alignment_number']=='3.50x50'][['HGNC symbol','oe_missense_position']]
oe_dRy.columns = ['HGNC symbol','oe_missense_dRy']
oe_protein_dRy
oe_protein_dRy = oe_protein.merge(oe_dRy,on='HGNC symbol')
sns.scatterplot(data=oe_protein_dRy,x='oe_missense_protein',y='oe_missense_dRy')
dRy_allele_counts = missense_variants_classA[
(missense_variants_classA['GPCRdb_alignment_number']=='3.50x50')
].groupby('HGNC Symbol').agg({'allele_count':sum}).reset_index()
plof_variants = pd.read_csv('../data/gnomad_v2.1.1_gpcr_variants_plof.csv',index_col=0)
plof_allele_counts = plof_variants.groupby('HGNC symbol').agg({'allele_count':sum}).reset_index()
plof_variants.columns
plof_dry_allele_counts = plof_allele_counts.merge(dRy_allele_counts,left_on='HGNC symbol',right_on='HGNC Symbol')
missense_variants_classA[
(missense_variants_classA['GPCRdb_alignment_number']=='3.50x50') & \
(missense_variants_classA['allele_count'] > 10000)
]
# Count missense variants by structural position and protein
missense_allele_counts_by_position = (missense_variants_classA
.groupby(['HGNC Symbol','GPCRdb_alignment_number'])
.agg({'allele_count':sum}).reset_index())
missense_allele_counts_by_position.columns = ['HGNC symbol','GPCRdb_alignment_number','total_allele_count']
missense_allele_counts_by_position = position_present_by_protein.merge(missense_allele_counts_by_position,how='left',on=['HGNC symbol','GPCRdb_alignment_number'])
missense_allele_counts_by_position['total_allele_count'] = missense_allele_counts_by_position['total_allele_count'].fillna(0)
missense_allele_counts_by_position
out=reg_models.ZeroInflatedNegativeBinomialP(y,x,x, inflation='logit')
fit=out.fit(method='bfgs', maxiter = 2000) # May need more than the default 35 iterations, very small number!
fit.summary()
missense_variants_classA[missense_variants_classA['HGNC Symbol'] == 'CXCR4']
gene_missense_variants = missense_variants_classA[missense_variants_classA['Uniprot_name'] == 'CXCR4_HUMAN']
colors = {True:'blue',False:'orange'}
fig, ax1 = plt.subplots(nrows=1,figsize=(8,3))
ax1.bar(
x=gene_missense_variants['sequence_position'],
height=gene_missense_variants['allele_count'],
color=(gene_missense_variants['protein_segment']=='C-term').map(colors)
)
ax1.semilogy()
gene_missense_variants = missense_variants_classA[missense_variants_classA['Uniprot_name'] == '5HT2B_HUMAN']
colors = {True:'blue',False:'orange'}
fig, ax1 = plt.subplots(nrows=1,figsize=(8,3))
ax1.bar(
x=gene_missense_variants['sequence_position'],
height=gene_missense_variants['allele_count'],
color=(gene_missense_variants['protein_segment']=='C-term').map(colors)
)
ax1.semilogy()
missense_variants['PolyPhen'].str.split('(').apply(lambda x: x[0]).value_counts()
fig, ax1 = plt.subplots(nrows=1,figsize=(8,3))
sns.scatterplot(data=missense_variants_classA[missense_variants_classA['Uniprot_name'] == 'CXCR4_HUMAN'],
x='sequence_position',y='allele_count',hue='protein_segment',ax=ax1)
#ax1.semilogy()
colors = {'North America':'red', 'Europe':'green', 'Asia':'blue', 'Australia':'yellow'}
plt.legend(title='Segment',bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
| scripts/old_analysis_scripts/.ipynb_checkpoints/gnomad_constraint_by_region_analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from numpy import loadtxt
file_url = 'https://raw.githubusercontent.com/Develop-Packt/Introduction-to-Deep-Learning-and-Neural-Networks/master/Datasets/german_scaled.csv'
data = loadtxt(file_url, delimiter=',')
data
label = data[:, 0]
features = data[:, 1:]
from sklearn.model_selection import train_test_split
features_train, features_test, label_train, label_test = train_test_split(features, label, test_size=0.2, random_state=7)
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
np.random.seed(1)
tf.random.set_seed(1)
model = tf.keras.Sequential()
layer1 = layers.Dense(16, activation='relu', input_shape=[19])
final_layer = layers.Dense(1, activation='sigmoid')
model.add(layer1)
model.add(final_layer)
optimizer = tf.keras.optimizers.Adam(0.001)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.summary()
model.fit(features_train, label_train, epochs=10)
| Exercise03/Exercise03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={}
# # Clustering HCA Mouse Atlas with scVI and ScanPy
# -
# Disclaimer: some of the code in this notebook was taken from Scanpy's Clustering tutorial (https://scanpy-tutorials.readthedocs.io/en/latest/pbmc3k.html) which is itself based on SEURAT's clustering tutorial in R.
#
# This notebook is designed as a demonstration of scVI's potency on the tasks considered in the Scanpy PBMC 3K Clustering notebook.
# In order to do so, we follow the same workflow adopted by scanpy in their clustering tutorial while performing the analysis using scVI as often as possible.
# Specifically, we use scVI's latent representation and differential expression analysis (which computes a Bayes Factor on imputed values).
# For visualisation, pre-processing and for some canonical analysis, we use the Scanpy package directly.
#
# When useful, we provide high-level wrappers around scVI's analysis tools. These functions are designed to make standard use of scVI as easy as possible.
# For specific use cases, we encourage the reader to take a closer look at those functions and modify them according to his needs.
# cd ../../
# ## Automated testing configuration
# +
# This is for notebook automated testing purpose
def allow_notebook_for_test():
print("Testing the annotation notebook")
import sys, os
sys.path.append(os.path.abspath("../.."))
n_epochs_all = None
test_mode = False
def if_not_test_else(x, y):
if not test_mode:
return x
else:
return y
save_path = "data/"
# End of configuration
# -
# ## Initialization
# + pycharm={}
# Uncomment to download the data (only works on Unix system)
# # !mkdir data
# # !wget http://cf.10xgenomics.com/samples/cell-exp/1.1.0/pbmc3k/pbmc3k_filtered_gene_bc_matrices.tar.gz -O data/pbmc3k_filtered_gene_bc_matrices.tar.gz
# # !cd data; tar -xzf pbmc3k_filtered_gene_bc_matrices.tar.gz
# + pycharm={"is_executing": false}
# Seed for reproducability
import torch
import numpy as np
torch.manual_seed(0)
np.random.seed(0)
# + pycharm={}
import pandas as pd
import scanpy as sc
sc.settings.verbosity = 0 # verbosity: errors (0), warnings (1), info (2), hints (3)
# + pycharm={}
if not test_mode:
# %matplotlib inline
sc.settings.set_figure_params(dpi=60)
# -
test_mode
# + [markdown] pycharm={}
# # Load the data
# adata = sc.read_10x_mtx(
# os.path.join(
# save_path, "filtered_gene_bc_matrices/hg19/"
# ), # the directory with the `.mtx` file
# var_names="gene_symbols", # use gene symbols for the variable names (variables-axis index)
# )
# adata.var_names_make_unique()
# -
save_path = "/lustre/scratch117/cellgen/team205/tpcg/backup/backup_20190401/sc_sclassification/CellTypist/data_repo/MouseAtlas/MouseAtlas.total.h5ad"
adata = sc.read_h5ad(save_path)
adata.raw.X.shape
adata.var_names_make_unique()
adata.obs_names_make_unique()
from collections import Counter
Counter(adata.obs['Organ'])
Counter(adata.obs['Dataset'])
adata.X.shape
# +
save_path2 = "/lustre/scratch117/cellgen/team205/tpcg/human_data/HumanAtlas.h5ad"
adata_human = sc.read_h5ad(save_path2)
# -
pd.crosstab(adata_human.obs['Dataset'], adata_human.obs['Tissue'])
adata_human.obs["CellType"].value_counts()
adata_human.obs.head()
adata_human.raw.var.head()
sc.tl.leiden(adata, resolution = 1.5)
adata_human.raw.X
# + [markdown] pycharm={}
# ## Preprocessing
# + [markdown] pycharm={}
# In the following section, we reproduce the preprocessing steps adopted in the scanpy notebook.
#
#
# Basic filtering: we remove cells with a low number of genes expressed and genes which are expressed in a low number of cells.
# -
min_genes = if_not_test_else(200, 0)
min_cells = if_not_test_else(20, 0)
# + pycharm={}
sc.settings.verbosity = 2
sc.pp.filter_cells(adata, min_genes=min_genes)
sc.pp.filter_genes(adata, min_cells=min_cells)
sc.pp.filter_cells(adata, min_genes=1)
# -
adata.shape
# + [markdown] pycharm={}
# As in the scanpy notebook, we then look for high levels of mitochondrial genes and high number of expressed genes which are indicators of poor quality cells.
# + [markdown] pycharm={}
# #### Non applicable step
# mito_genes = adata.var_names.str.startswith("MT-")
# adata.obs["percent_mito"] = (
# np.sum(adata[:, mito_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
# )
# adata.obs["n_counts"] = adata.X.sum(axis=1).A1
# + pycharm={}
adata = adata[adata.obs["n_genes"] < 2500, :]
# -
# adata = adata[adata.obs["percent_mito"] < 0.05, :]
# + [markdown] pycharm={}
# ## ⚠ scVI uses non normalized data so we keep the original data in a separate `AnnData` object, then the normalization steps are performed
# + [markdown] pycharm={}
# ##### Normalization and more filtering
#
# We only keep highly variable genes
# + pycharm={}
adata_original = adata.copy()
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
sc.pp.log1p(adata)
# -
adata_original.X
# +
min_mean = if_not_test_else(0.0125, -np.inf)
max_mean = if_not_test_else(3, np.inf)
min_disp = if_not_test_else(0.5, -np.inf)
max_disp = if_not_test_else(None, np.inf)
sc.pp.highly_variable_genes(
adata,
min_mean=min_mean,
max_mean=max_mean,
min_disp=min_disp,
max_disp=max_disp
# n_top_genes=500
)
# +
adata.raw = adata
highly_variable_genes = adata.var["highly_variable"]
adata = adata[:, highly_variable_genes]
sc.pp.regress_out(adata, ["n_counts", "percent_mito"])
sc.pp.scale(adata, max_value=10)
# Also filter the original adata genes
adata_original = adata_original[:, highly_variable_genes]
print(highly_variable_genes.sum())
# We also store adata_original into adata.raw
# (which was designed for this purpose but actually has limited functionnalities)
adata.raw = adata_original
# + [markdown] pycharm={}
# ## Compute the scVI latent space
# + [markdown] pycharm={}
# Below we provide then use a wrapper function designed to compute scVI's latent representation of the non-normalized data. Specifically, we train scVI's VAE, compute and store the latent representation then return the posterior which will later be used for further inference.
# +
from scvi.dataset.anndata import AnnDataset
from scvi.inference import UnsupervisedTrainer
from scvi.models.vae import VAE
from typing import Tuple
# + pycharm={}
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 5,
n_epochs: int = 100,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDataset(adata)
# Train a model
vae = VAE(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
####
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent
# + pycharm={}
n_epochs = 10 if n_epochs_all is None else n_epochs_all
scvi_posterior, scvi_latent = compute_scvi_latent(
adata_original, n_epochs=n_epochs, n_latent=6, use_cuda = True
)
adata.obsm["X_scvi"] = scvi_latent
# + [markdown] pycharm={}
# ## Principal component analysis to reproduce ScanPy results and compare them against scVI's
#
# Below, we reproduce exactly scanpy's PCA on normalized data.
# + pycharm={}
sc.tl.pca(adata, svd_solver="arpack")
# + pycharm={}
sc.pl.pca(adata, color="CST3")
# + pycharm={}
sc.pl.pca_variance_ratio(adata, log=True)
# + [markdown] pycharm={}
# ## Computing, embedding and clustering the neighborhood graph
# + [markdown] pycharm={}
# The Scanpy API computes a neighborhood graph with `sc.pp.neighbors` which can be called to work on a specific representation `use_rep='your rep'`.
# Once the neighbors graph has been computed, all Scanpy algorithms working on it can be called as usual (that is *louvain*, *paga*, *umap* ...)
# + [markdown] pycharm={}
# ### Using PCA representation (Scanpy tutorial)
# + pycharm={}
sc.pp.neighbors(adata, n_neighbors=10, n_pcs=40)
sc.tl.louvain(adata, key_added="louvain_pca")
sc.tl.umap(adata)
# + pycharm={}
sc.pl.umap(adata, color=["louvain_pca", "CST3", "NKG7", "MS4A1"], ncols=4)
# + [markdown] pycharm={}
# ### Using scVI latent space representation
# + pycharm={}
sc.pp.neighbors(adata, n_neighbors=20, n_pcs=40, use_rep="X_scvi")
sc.tl.umap(adata)
# + pycharm={}
sc.tl.louvain(adata, key_added="louvain_scvi", resolution=0.7)
# + pycharm={}
sc.pl.umap(adata, color=["louvain_scvi", "CST3", "NKG7", "MS4A1"], ncols=4)
# + [markdown] pycharm={}
# ## Finding marker genes
# + [markdown] pycharm={}
# ScanPy tries to determine marker genes using a *t-test* and a *Wilcoxon* test.
#
# For the same task, from scVI's trained VAE model we can sample the gene expression rate for each gene in each cell. For the two populations of interest, we can then randomly sample pairs of cells, one from each population to compare their expression rate for a gene. The degree of **differential expression** is measured by logit($\frac{p}{1-p}$) (Bayes Factor) where $p$ is the probability of a cell from population $A$ having a higher expression than a cell from population $B$. We can form the null distribution of the DE values by sampling pairs randomly from the combined population.
#
# Below, we provide a wrapper around scVI's differential expression process. Specifically, it computes the average of the Bayes factor where population $A$ covers each cluster in `adata.obs[label_name]` and is compared with the aggregate formed by all the other clusters.
# + hideOutput=true pycharm={}
def rank_genes_groups_bayes(
adata: sc.AnnData,
scvi_posterior: scvi.inference.Posterior,
n_samples: int = None,
M_permutation: int = None,
n_genes: int = 25,
label_name: str = "louvain_scvi",
) -> pd.DataFrame:
"""
Rank genes for characterizing groups.
Computes Bayes factor for each cluster against the others to test for differential expression.
See Nature article (https://rdcu.be/bdHYQ)
:param adata: sc.AnnData object non-normalized
:param scvi_posterior:
:param n_samples:
:param M_permutation:
:param n_genes:
:param label_name: The groups tested are taken from adata.obs[label_name] which can be computed
using clustering like Louvain (Ex: sc.tl.louvain(adata, key_added=label_name) )
:return: Summary of Bayes factor per gene, per cluster
"""
# Call scvi function
per_cluster_de, cluster_id = scvi_posterior.one_vs_all_degenes(
cell_labels=np.asarray(adata.obs[label_name].values).astype(int).ravel(),
min_cells=1,
n_samples=n_samples,
M_permutation=M_permutation,
)
# convert to ScanPy format -- this is just about feeding scvi results into a format readable by ScanPy
markers = []
scores = []
names = []
for i, x in enumerate(per_cluster_de):
subset_de = x[:n_genes]
markers.append(subset_de)
scores.append(tuple(subset_de["bayes1"].values))
names.append(tuple(subset_de.index.values))
markers = pd.concat(markers)
dtypes_scores = [(str(i), "<f4") for i in range(len(scores))]
dtypes_names = [(str(i), "<U50") for i in range(len(names))]
scores = np.array([tuple(row) for row in np.array(scores).T], dtype=dtypes_scores)
scores = scores.view(np.recarray)
names = np.array([tuple(row) for row in np.array(names).T], dtype=dtypes_names)
names = names.view(np.recarray)
adata.uns["rank_genes_groups_scvi"] = {
"params": {
"groupby": "",
"reference": "rest",
"method": "",
"use_raw": True,
"corr_method": "",
},
"scores": scores,
"names": names,
}
return markers
# + [markdown] pycharm={}
# ### Use a t-test on scvi_clusters like in the ScanPy tutorial
# + pycharm={}
n_genes = 20
sc.tl.rank_genes_groups(
adata,
"louvain_scvi",
method="t-test",
use_raw=False,
key_added="rank_genes_groups_ttest",
n_genes=n_genes,
)
sc.tl.rank_genes_groups(
adata,
"louvain_scvi",
method="wilcoxon",
use_raw=False,
key_added="rank_genes_groups_wilcox",
n_genes=n_genes,
)
sc.pl.rank_genes_groups(
adata, key="rank_genes_groups_ttest", sharey=False, n_genes=n_genes
)
sc.pl.rank_genes_groups(
adata, key="rank_genes_groups_wilcox", sharey=False, n_genes=n_genes
)
# + [markdown] pycharm={}
# ### Use differential expression from the scVI posterior
# + pycharm={}
rank_genes_groups_bayes(
adata, scvi_posterior, label_name="louvain_scvi", n_genes=n_genes
)
sc.pl.rank_genes_groups(
adata, key="rank_genes_groups_scvi", sharey=False, n_genes=n_genes
)
# + [markdown] pycharm={}
# ### Measure similarity between *scVI differential expression*, *t-test* and *wilcoxon-test*
# + pycharm={}
# We compute the rank of every gene to perform analysis after
all_genes = len(adata.var_names)
sc.tl.rank_genes_groups(adata, 'louvain_scvi', method='t-test', use_raw=False, key_added='rank_genes_groups_ttest', n_genes=all_genes)
sc.tl.rank_genes_groups(adata, 'louvain_scvi', method='wilcoxon', use_raw=False, key_added='rank_genes_groups_wilcox', n_genes=all_genes)
differential_expression = rank_genes_groups_bayes(adata, scvi_posterior, label_name='louvain_scvi', n_genes=all_genes)
# + pycharm={}
def ratio(A, B):
A, B = set(A), set(B)
return len(A.intersection(B)) / len(A) * 100
# + pycharm={}
cluster_distrib = adata.obs.groupby("louvain_scvi").count()["n_genes"]
# -
# For each cluster, we compute the percentage of genes which are in the `n_genes` most expressed genes of both Scanpy's and scVI's differential expression tests.
# + pycharm={}
n_genes = 25
sc.pl.umap(adata, color=["louvain_scvi"], ncols=1)
for c in cluster_distrib.index:
print(
"Cluster %s (%d cells): t-test / wilcox %6.2f %% & t-test / scvi %6.2f %%"
% (
c,
cluster_distrib[c],
ratio(
adata.uns["rank_genes_groups_ttest"]["names"][c][:n_genes],
adata.uns["rank_genes_groups_wilcox"]["names"][c][:n_genes],
),
ratio(
adata.uns["rank_genes_groups_ttest"]["names"][c][:n_genes],
adata.uns["rank_genes_groups_scvi"]["names"][c][:n_genes],
),
)
)
# + [markdown] pycharm={}
# ## Plot px_scale for most expressed genes and less expressed genes by cluster
# + [markdown] pycharm={}
# Sample the scale for all the data (all genes, cells), average on multiple samples
#
# ``` python
# scale = scvi_posterior.get_sample_scale()
# for _ in range(9):
# scale += scvi_posterior.get_sample_scale()
# scale /= 10
#
# for gene, gene_scale in zip(adata.var.index, np.squeeze(scale).T):
# adata.obs["scale_" + gene] = gene_scale
#
# ```
#
# This is not tractable for large dataset so we provide another function below
# -
# ### The code below doesn't work
# + active=""
# from typing import List
#
#
# def get_scales_per_gene(
# gene_names: List[str],
# adata: sc.AnnData,
# scvi_posterior: scvi.inference.Posterior,
# n_samples: int = 10,
# batchsize: int = 32,
# ):
# """Get imputed values for each gene in gene_names - for each cell in adata. Performed inplace.
# Scales are added in adata.obs under the alias 'scale_' + gene_name.
#
# This function handles very large dataset thanks to batch size control
#
# Args:
# gene_names: list of gene names
# adata: scRNAseq dataset
# posterior: scVI Posterior object
# n_samples: number of samples to average on
# batchsize: for computation: number of cells to query in each iteration
# """
# all_gene_names = list(scvi_posterior.gene_dataset.gene_names)
# gene_idx = [all_gene_names.index(g) for g in gene_names]
# ashape = scvi_posterior.gene_dataset.X.shape
# px_scales = np.zeros((len(gene_names), n_samples, ashape[0]))
# for idx in range(int(ashape[0] / batchsize)):
# current_slice = slice(idx * batchsize, (idx + 1) * batchsize)
# px_scales_batch = scvi_posterior.model.get_sample_scale(
# torch.from_numpy(scvi_posterior.gene_dataset.X[current_slice]),
# n_samples=n_samples,
# )
# px_scales[:, :, current_slice] = np.transpose(
# px_scales_batch.detach().numpy()[:, :, gene_idx], (2, 0, 1)
# )
# for name, scales in zip(gene_names, px_scales):
# adata.obs["scale_" + name] = scales.mean(axis=0)
# +
from typing import List
def get_scales_per_gene(
gene_names: List[str],
adata: sc.AnnData,
scvi_posterior: scvi.inference.Posterior,
n_samples: int = 10,
batchsize: int = 32,
):
"""Get imputed values for each gene in gene_names - for each cell in adata. Performed inplace.
Scales are added in adata.obs under the alias 'scale_' + gene_name.
This function handles very large dataset thanks to batch size control
Args:
gene_names: list of gene names
adata: scRNAseq dataset
posterior: scVI Posterior object
n_samples: number of samples to average on
batchsize: for computation: number of cells to query in each iteration
"""
all_gene_names = list(scvi_posterior.gene_dataset.gene_names)
gene_idx = [all_gene_names.index(g) for g in gene_names]
ashape = scvi_posterior.gene_dataset.X.shape
px_scales = np.zeros((len(gene_names), n_samples, ashape[0]))
for idx in range(int(ashape[0] / batchsize)):
current_slice = slice(idx * batchsize, (idx + 1) * batchsize)
x = torch.tensor(scvi_posterior.gene_dataset.X[current_slice], device="cuda")
px_scales_batch = scvi_posterior.model.get_sample_scale(
x,
n_samples=n_samples
)
px_scales[:, :, current_slice] = np.transpose(
px_scales_batch.detach().to("cpu").numpy()[:, :, gene_idx], (2, 0, 1)
)
for name, scales in zip(gene_names, px_scales):
adata.obs["scale_" + name] = scales.mean(axis=0)
for name, scales in zip(gene_names, px_scales):
adata.obs["scale_" + name] = scales.mean(axis=0)
# -
# #### Most differentialy expressed genes
# + pycharm={}
cluster_id = 2
n_best_genes = 10
gene_names = differential_expression[
differential_expression["clusters"] == cluster_id
].index.tolist()[:n_best_genes]
gene_names
# -
get_scales_per_gene(gene_names, adata, scvi_posterior)
# + pycharm={}
print("Top genes for cluster %d" % cluster_id)
sc.pl.umap(adata, color=["louvain_scvi"] + ["scale_" + g for g in gene_names], ncols=3)
# -
# #### Less differentialy expressed genes
# + pycharm={}
cluster_id = 2
n_best_genes = 10
gene_names = differential_expression[
differential_expression["clusters"] == cluster_id
].index.tolist()[-n_best_genes:]
gene_names
# -
get_scales_per_gene(gene_names, adata, scvi_posterior)
# + pycharm={}
print("Top down regulated genes for cluster %d" % cluster_id)
sc.pl.umap(adata, color=["louvain_scvi"] + ["scale_" + g for g in gene_names], ncols=3)
# + [markdown] pycharm={}
# ### Analyze ranking difference between **t-test** and **scVI**
# + pycharm={}
cluster_id = if_not_test_else("2", "0")
# + pycharm={}
from collections import defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
def plot_ranking(method_1, method_2):
mapping = defaultdict(list)
for rank, gene in enumerate(
adata.uns["rank_genes_groups_" + method_1]["names"][cluster_id]
):
mapping[gene].append(rank)
for rank, gene in enumerate(
adata.uns["rank_genes_groups_" + method_2]["names"][cluster_id]
):
mapping[gene].append(rank)
x, y = np.array(list(mapping.values())).T
# n_genes = all_genes
n_genes = all_genes
plt.figure(figsize=(8, 8))
sns.scatterplot(x, y, s=10)
plt.axhline(100, c="red")
plt.axvline(100, c="red")
plt.xlim(0, n_genes)
plt.ylim(0, n_genes)
plt.xlabel(method_1 + " ranking")
plt.ylabel(method_2 + " ranking")
# + pycharm={}
plot_ranking("scvi", "ttest")
plot_ranking("wilcox", "ttest")
# + [markdown] pycharm={}
# ### Investigating discrepancies
# + [markdown] pycharm={}
# Cluster 4 top genes of t-test and scvi are totally different, but when we look closer at the data one can notice:
# - The Bayes factor (or t-test score) are all very low for the cluster (no genes are significant)
# - Plots confirm the latter point: the top genes are not specific to the cluster and are either noise or overlapping with other clusters
#
# Specifically, we plot first the expression levels of genes selected by scVI, then of genes selected by the t-test. In both cases, genes seem irrelevant.
# + pycharm={}
n_genes = 10
cluster_id = if_not_test_else(2, 0)
genes = differential_expression[
differential_expression["clusters"] == cluster_id
].index.tolist()
sc.pl.umap(
adata,
color=["louvain_scvi"]
+ adata.uns["rank_genes_groups_scvi"]["names"][str(cluster_id)].tolist()[:n_genes],
ncols=3,
)
sc.pl.umap(
adata,
color=["louvain_scvi"]
+ adata.uns["rank_genes_groups_ttest"]["names"][str(cluster_id)].tolist()[:n_genes],
ncols=3,
)
# + [markdown] pycharm={}
# scVi tends to predict samples that are not expressed outside the cluster when t-test tends to select highly expressed genes in the cluster even if it also expressed everywhere.
# -
# ### Store differential expression scores
def store_de_scores(
adata: sc.AnnData, differential_expression: pd.DataFrame, save_path: str = None
):
"""Creates, returns and writes a DataFrame with all the differential scores used in this notebook.
Args:
adata: scRNAseq dataset
differential_expression: Pandas Dataframe containing the bayes factor for all genes and clusters
save_path: file path for writing the resulting table
Returns:
pandas.DataFrame containing the scores of each differential expression test.
"""
# get shapes for array initialisation
n_genes_de = differential_expression[
differential_expression["clusters"] == 0
].shape[0]
all_genes = adata.shape[1]
# check that all genes have been used
if n_genes_de != all_genes:
raise ValueError(
"scvi differential expression has to have been run with n_genes=all_genes"
)
# get tests results from AnnData unstructured annotations
rec_scores = []
rec_names = []
test_types = ["ttest", "wilcox"]
for test_type in test_types:
res = adata.uns["rank_genes_groups_" + test_type]
rec_scores.append(res["scores"])
rec_names.append(res["names"])
# restrict scvi table to bayes factor
res = differential_expression[["bayes1", "clusters"]]
# for each cluster join then append all
dfs_cluster = []
groups = res.groupby("clusters")
for cluster, df in groups:
for rec_score, rec_name, test_type in zip(rec_scores, rec_names, test_types):
temp = pd.DataFrame(
rec_score[str(cluster)],
index=rec_name[str(cluster)],
columns=[test_type],
)
df = df.join(temp)
dfs_cluster.append(df)
res = pd.concat(dfs_cluster)
if save_path:
res.to_csv(save_path)
return res
de_table = store_de_scores(adata, differential_expression, save_path=None)
de_table.head()
# + [markdown] pycharm={}
# # Running other ScanPy algorithms is easy, binding the index keys
# -
# ### PAGA
# + pycharm={}
sc.tl.paga(adata, groups="louvain_scvi")
sc.pl.paga(adata)
# + [markdown] pycharm={}
# ### HeatMap
# + pycharm={}
marker_genes = gene_names[1:10]
# + pycharm={}
# sc.pl.heatmap(adata, marker_genes, groupby="louvain_scvi", dendrogram=True)
# -
| tests/notebooks/scanpy_MouseAtlas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 驾驶员状态检测
#
# 根据汽车安全部门的调查显示,五分之一的交通事故都是由于驾驶员分心(distracted)造成的。每年,distracted driving会造成约42500人受伤,3000人死亡。这个数字非常惊人。
#
# [State Farm](https://www.statefarm.com/)希望通过车载的dashboard cameras来检测用户是否处于distracted driving的状态,从而发出警告。
#
# </br>
# <font color=red size=3 face=“黑体”>这是我实际的开发过程,我不会上来就写出最优的解决方案,而是把我所遇到的“坑”都给写出来,这些“坑”真的很经典</font>
#
# 由于训练量比较大,所以本项目的所有训练均是通过租用Amazon EC2进行训练,价格适中,下文有介绍。
# ## 1.1 数据集获取
#
# [数据集](https://www.kaggle.com/c/state-farm-distracted-driver-detection/data)来自于kaggle,总共有三个文件需要下载,如下所示。其中imgs.zip是通过摄像头来抓取的驾驶员的状态的标记数据集。该数据集的大小有4G。
#
# * imgs.zip - 所有训练/测试图片打包的zip文件<font color=red size=3 face=“黑体”>(你需要自行下载)</font>
# * sample_submission.csv - 提交kaggle时候的格式
# * driver_imgs_list.csv - 文件的信息,文件名对应的图像中的司机ID以及图像中司机的状态ID。
#
# 现在解压imgs.zip来观察
#
# +
from keras.layers import Input
from keras.layers.core import Lambda
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
import os
import h5py
import numpy as np
zip_name = 'imgs.zip'
train_dir_name = 'train'
test_dir_name = 'test'
link_path = 'train_link'
link_train_path = 'train_link/train'
link_valid_path = 'train_link/validation'
test_link = 'test_link'
test_link_path = 'test_link/data'
resnet_50_model_save_name = 'model_resnet50.h5'
inceptionv3_model_save_name = 'model_inceptionv3.h5'
xception_model_save_name = 'model_xception.h5'
## check if the train and test data is exist
if not isdir(train_dir_name) or not isdir(test_dir_name):
if not isfile(zip_name):
print ("Please download imgs.zip from kaggle!")
assert(False)
else:
with zipfile.ZipFile(zip_name) as azip:
print ("Now to extract %s " % (zip_name))
azip.extractall()
print ("Data is ready!")
# -
# 我们将检测驾驶员10种驾驶员的驾驶状态,如下
#
# * c0: 安全驾驶
# * c1: 右手打字
# * c2: 右手打电话
# * c3: 左手打字
# * c4: 左手打电话
# * c5: 调收音机
# * c6: 喝饮料
# * c7: 拿后面的东西
# * c8: 整理头发和化妆
# * c9: 和其他乘客说话
#
# 其中,关于每一种驾驶状态的图片都分开存放,也就是有c0-c9文件夹存放各自状态的图片。此时文件夹的目录结构大概如下
#
# |----imgs.zip
# |----train
# |-----c0
# |-----c1
# |-----c2
# |-----c3
# |-----c4
# |-----c5
# |-----c6
# |-----c7
# |-----c8
# |-----c9
# |----test
#
# ### 1.2 数据集基本信息
#
# 接下来就是了解数据集的基本信息:
#
# 1. 统计训练测试样本数量
# 2. 每一类训练数据的数量分布
#
# +
import os
## get train and file nums
train_class_dir_names = os.listdir(train_dir_name)
test_size = len(os.listdir(test_dir_name))
train_size = 0
train_class_size = {}
for dname in train_class_dir_names:
file_names = os.listdir(train_dir_name + '/' + dname)
train_class_size[dname] = len(file_names)
train_size = train_class_size[dname] + train_size
print ("Test file numbers: ", test_size)
print ("Train file numbers: ", train_size)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
fig = plt.figure()
plt.bar(train_class_size.keys(), train_class_size.values(), 0.4, color="green")
plt.xlabel("Classes")
plt.ylabel("File nums")
plt.title("Classes distribution")
plt.show()
# -
# 从上面的结果可以看出,我们总共有79726张测试图片,22424张测试图片。在训练图片中,每一个状态(类)包含大约2000张图片,分布还是比较均匀的。上面的可视化是根据状态来显示的,现在我们从另一个角度,看看每个司机大约包含了多少张图片。
#
# 接下来就要解压文件driver_imgs_list.csv.zip
# +
# get driver_imgs_list_file
driver_imgs_list_zip = 'driver_imgs_list.csv.zip'
driver_imgs_list_file = 'driver_imgs_list.csv'
if not isfile(driver_imgs_list_file):
if not isfile(driver_imgs_list_zip):
print ("Please download river_imgs_list.csv.zip from kaggle!")
assert(False)
else:
with zipfile.ZipFile(driver_imgs_list_zip) as azip:
print ("Now to extract %s " % (driver_imgs_list_zip))
azip.extractall()
# +
import pandas as pd
df = pd.read_csv(driver_imgs_list_file)
df.describe()
# -
ts = df['subject'].value_counts()
print (ts)
fig = plt.figure(figsize=(30,10))
plt.bar(ts.index.tolist(), ts.iloc[:].tolist(), 0.4, color="green")
plt.xlabel("Driver ID")
plt.ylabel("File nums")
plt.title("Driver ID distribution")
plt.show()
# 可以看出,我们的训练集的数据来自于26个不同的司机的状态,每个司机都是都拥有不同状态的图片,其中346号司机拥有的图片数量最少,为346张。21号司机拥有的图片数量最多,为1237张。可以看出,如果按照司机ID来观察数据,数据分布并不均匀。但是这并不影响我们的训练,因为我们主要关心的是每一个状态所拥有的图片是否均匀,而不是每一个司机所拥有的图片是否均匀。接下来我们来简单可视化一下每一类的样本
# +
import cv2
import numpy as np
state_des = {'c0':'safe driving','c1':'texting - right hand','c2':'talking on the phone - right','c3':'texting - left hand', \
'c4':'talking on the phone - left hand','c5':'operating the radio','c6':'drinking','c7':'reaching behind','c8':'hair and makeup', \
'c9':'talking to passenger'};
## class that you want to display
c = 0
## random choose the filenames of the class
dis_dir = train_dir_name + '/c' + str(c)
dis_filenames = os.listdir(dis_dir)
dis_list = np.random.randint(len(dis_filenames), size=(6))
dis_list = [dis_filenames[index] for index in dis_list]
plt.figure(1, figsize=(13, 13))
for i,filename in enumerate(dis_list):
image = cv2.imread(dis_dir + '/' + str(filename))
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
ax1=plt.subplot(3,3,i+1)
plt.imshow(image)
plt.axis("off")
plt.title(state_des['c'+str(c)] + "\n" + str(image.shape))
plt.show()
# -
# ## 2.单模型的迁移学习
#
# 在做过了几个kaggle项目之后,对计算机视觉类的项目有了一个大概的直觉,如果要采用当前流行的CNN模型来完成项目,那么总是可以尝试迁移学习的,因为对于CNN来说,一些图片的“底层信息”(直线,边缘,眼睛,耳朵,猫脸,狗脸)是可以共享的,所以我们就可以利用各大模型在大型计算机视觉数据集中学习到的通用的“知识”,将其迁移到我们的项目的学习中,从而帮助我们更好地提取通用的特征。并且
#
# * 如果我们只拥有少量的数据集,那么就可以只训练top layer(全连接层,输出层)的权重,不学习top layer以外的层的权重。如果数据集很少的情况下执意要对top layer以外的层进行训练,极有可能破坏预训练模型在庞大数据集中学习的知识,反而造成不好的效果。
# * 如果我们拥有中等数量的数据集,那么我们可以开放少量的卷积层进行权重的fine-tune,而且为了避免大的权重的更新对以前学习到的知识造成破坏,建议采用小的learning rate。
# * 如果我们拥有庞大的数据集,并且项目拥有足够的时间来进行训练,那么此时依然可以采用迁移学习,但是这时候我们会开放比第二种情况更多的层,甚至所有的层
#
# 之前我们已经对数据集进行了一些基本的了解,发现训练集的数量为2万多,所以我将我们现在的情况定位为第一种(毕竟2万多相对于百万级别的数据还是太少)。所以现在我打算在单模型上面进行迁移学习,<font color=red size=3 face=“黑体”> 并且只更新top layer的权重 </font>(为什么这里标记为红色,后面你就知道了)
# ### 2.1 resnet-50的单模型迁移学习(Train top layer only)
#
# 我第一个尝试的模型是在ILSVRC 2015比赛中获得了冠军resnet-50,该模型利用residual block,解决了当网络深度增加是所产生的Degradation问题,即准确率会先上升然后达到饱和,再持续增加深度则会导致准确率下降(注意哟,这是训练集的准确率哟,不是验证集哟,所以不是过拟合问题)。我相信50层神经网络能够轻松应付本项目,并且不用担心会出现Degradation问题,何乐而不为呢?废话不多说,加载预训练权重并开始调参过程吧!
#
# 这里说明一下:我的电脑的显卡是Gtx 960,这个显卡用于训练小的神经网络还可以,但是用于训练resnet这种规模的网络就有点儿捉襟见肘了。所以我租用的亚马逊AWS云主机,p3.2 xlarge,其竞价实例的价格大约是1美元每小时,如果没有接触过AWS的可以通过这篇文章来学习[如何利用AWS来进行深度学习](https://zhuanlan.zhihu.com/p/33173963?utm_source=wechat_session&utm_medium=social)(不知不觉又给亚马逊打了一下广告),还有,如果你想用AWS的话,你需要一把梯子(翻墙)。如果不想翻墙的话,阿里巴巴也有类似的云主机,只不过就是价格贵了点儿,自己衡量吧!
# 上面说了,我只会训练top layer的权重,所以为了方便之后的调参过程,这里采用bottleneck feature的方式来减少重复的前向传播过程,也就是提取所有样本在top layer之前的输出结果(对于resnet,该输出结果的维度是1*1*2048,以后统一叫做特征向量),将其保存起来,从而将其作为top layer的输入来达到加速调参的过程。也算是一种用空间换取时间的策略吧!
# +
## load pretrained resnet
resNet_input_shape = (224,224,3)
res_x = Input(shape=resNet_input_shape)
res_x = Lambda(resnet50.preprocess_input)(res_x)
res_model = resnet50.ResNet50(include_top=False, weights='imagenet', input_tensor=res_x, input_shape=resNet_input_shape)
res_model.summary()
# -
# 我们使用的在ImageNet上训练的resnet-50模型,并且,我们没有加载top layer,因为接下来是要提取bottleneck feature,无需top layer。从模型的打印信息可以知道,该模型的输出维度为1*1*2048,但是我要保存的是1维的特征向量,所以需要将其进行类似flatten的操作
out = GlobalAveragePooling2D()(res_model.output)
res_vec_model = Model(inputs=res_model.input, outputs=out)
# 在得到了提取bottleneck feature的模型之后,就可以开始着手提取特征了,考虑到之后也可能使用类似的操作来提取其他模型的bottleneck feature,所以我就写了一个函数(所以上面两个cell只是用来说明如何构建提取feature的模型的,真正使用的是下面这个函数)。该函数先构造一个提取bottleneck feature的模型,做的事情和上面两个cell一样。后面使用了image data generator的方式来来进行bottleneck feature提取,为什么采用这种方式呢?因为这种方式并不会一次性地把所有图片都加载到内存中,而是采用类似队列一样的边使用边加载的方式进行数据的读取,这样可以大大地减少内存的使用。我们可以来算一算,如果将所有的数据一次性加载到内存中会消耗多少的内存?假设一张图片的大小为224*224*3(resnet-50需要的大小),那么我们有22424张训练集(还没有包含庞大的测试集),加入我们以uint8的data type来加载数据,那么训练集所占用的内存大小就是224*224*3*22424 = 3375439872,达到了惊人的3GB。所以测试集将占用超过9GB,总共就是12GB。这还没开始训练呢,就占用了如此多的内存,即使我使用的AWS主机的Tesla v100显卡有16GB内存,但是考虑到之后留给训练的余量,还是不要以这种方式来加载数据了。
def model_vector_catch(MODEL, image_size, vect_file_name, vec_dir, train_dir, test_dir, preprocessing=None):
"""
MODEL:the model to extract bottleneck features
image_size:MODEL input size(h, w, channels)
vect_file_name:file to save vector
preprocessing:whether or not need preprocessing
"""
if isfile(vec_dir + '/' + vect_file_name):
print ("%s already OK!" % (vect_file_name))
return
input_tensor = Input(shape=(image_size[0], image_size[1], 3))
if preprocessing:
## check if need preprocessing
input_tensor = Lambda(preprocessing)(input_tensor)
model_no_top = MODEL(include_top=False, weights='imagenet', input_tensor=input_tensor, input_shape=(image_size[0], image_size[1], 3))
## flatten the output shape and generate model
out = GlobalAveragePooling2D()(model_no_top.output)
new_model = Model(inputs=model_no_top.input, outputs=out)
## get iamge generator
gen = ImageDataGenerator()
test_gen = ImageDataGenerator()
"""
classes = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', ] -- cat is 0, dog is 1, so we need write this
class_mode = None -- i will not use like 'fit_fitgenerator', so i do not need labels
shuffle = False -- it is unneccssary
batch_size = 64
"""
class_list = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', ]
train_generator = gen.flow_from_directory(train_dir, image_size, color_mode='rgb', \
classes=class_list, class_mode=None, shuffle=False, batch_size=64)
#test_generator = test_gen.flow_from_directory(test_dir, image_size, color_mode='rgb', \
#class_mode=None, shuffle=False, batch_size=64)
"""
steps = None, by default, the steps = len(generator)
"""
train_vector = new_model.predict_generator(train_generator)
#test_vector = new_model.predict_generator(test_generator)
with h5py.File(vec_dir + "/" + (vect_file_name), 'w') as f:
f.create_dataset('x_train', data=train_vector)
f.create_dataset("y_train", data=train_generator.classes)
#f.create_dataset("test", data=test_vector)
print ("Model %s vector cached complete!" % (vect_file_name))
# +
vec_dir = 'vect'
if not isdir(vec_dir):
os.mkdir(vec_dir)
# +
res_vect_file_name = 'resnet50_vect.h5'
model_vector_catch(resnet50.ResNet50, resNet_input_shape[:2], res_vect_file_name, vec_dir, train_dir_name, test_dir_name, resnet50.preprocess_input)
# -
# 现在我们已经提取好了训练集(暂时不提取测试集的bottleneck feature,等我先验证了这种方法是否好用之后,再做测试集的bottleneck feature的提取,因为训练模型的时候用不到测试集,所以不着急提取)的bottleneck feature,下面就是搭建模型的top layer来进行训练了,那么top layer要怎么搭建呢?在原始的resnet-50中,只有一个含有1000个隐藏单元的输出层,所以这里我们参考resnet-50的设计,改成一个含有10个隐藏单元的输出层(因为我们的类别是10),激活函数是softmax。
# +
v = 10
input_tensor = Input(shape=(2048,))
x = Dropout(0.5)(input_tensor)
x = Dense(driver_classes, activation='softmax', name='res_dense_1')(x)
resnet50_model = Model(inputs=input_tensor, outputs=x)
# -
# 在训练模型之前,需要先把保存在文件中的bottleneck feature提取到内存中来,便于之后的fit。进一步地,因为我们之前保存的label是0-9的数字,现在我们要使用one-hot的方式来表示。
# +
import numpy as np
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)]
return Y
# +
from sklearn.utils import shuffle
x_train = []
y_train = []
with h5py.File(vec_dir + '/' + res_vect_file_name, 'r') as f:
x_train = np.array(f['x_train'])
y_train = np.array(f['y_train'])
#one-hot vector
y_train = convert_to_one_hot(y_train, driver_classes)
x_train, y_train = shuffle(x_train, y_train, random_state=0)
# -
# <font color=red size=3 face=“黑体”>注意上面的代码!!!这里有一个坑,是我之前项目中犯过的,导致我的模型无论如何都不收敛,而且还花了很长时间去调试。就是shuffle操作,一定要shuffle啊,如果你的数据是每一类单独存放在一起的,如果不shuffle的话,那么每一个batch的数据都将是同一类的数据,导致模型根本学不到东西,因为这个batch我学习到了这一类的特点,另一个batch我就要学习另一类完全不同的特点,从而抛弃之前学习的内容,导致这样的恶性循环,无法收敛。因为这个错误是之前犯的,所以现在也就没有必要再专门调试说明了。</font>
# 接下来的过程就是设置编译参数,然后调试了。这里optimizer采用Adam,参数为默认参数。batch_size采用64,epoch先运行10代看看情况,验证集划分为0.2.
# +
resnet50_model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
hist = resnet50_model.fit(x_train, y_train, batch_size=32, epochs=30, validation_split=0.2)
# -
# 我尝试过了很多优化算法,Adam,SGD等,包括设置了低的学习速率,以及学习速率衰减,但是现象都和上面的训练结果惊人的相似,主要现象分为两方面:
#
# * (训练集loss居高不下)训练集的loss一直在0.35左右摆动,无论如何也无法有效降低(参考kaggle排行榜上的前10名的loss,均小于0.13)
# * (验证集loss远远低于训练集loss)val_loss远远低于train_loss,经过30个epoch后,train_loss为0.35,而val_loss为0.09,并且val_loss还有继续下降的趋势。
#
# <font color=red size=3 face=“黑体”>大家记住,上面就是我遇到的两个最大的“坑”</font>
#
# 下面我们就来一一分析这两个“坑”,并给出解决方案。
# ### 2.1.1 针对“训练集loss高居不下”的现象
#
# 先暂且不管模型的验证集的loss是如何如何低,如何如何完美。现在只关心训练集的loss,其值高达0.35,按照正常思维,这就是模型欠拟合了。一般解决欠拟合的套路如下:
# * 训练集loss反映了偏差(bias)的大小,bias反映了模型本的拟合能力,所以在模型拟合能力不够的情况下可以适当增加模型的复杂度,加入更多的隐藏层或者隐藏单元。
#
# <font color=red size=3 face=“黑体”>结果:我尝试了在输出层之前中加入了两个含有1024个隐藏单元的全连接层(这里就不给出代码了,读者可以自行尝试),但是根本没效果</font>
#
# * 减小正则项,其实这也算是提高模型复杂度。对于本项目来说就是减小dropout的drop rate(等效于增大keep probability)
#
# <font color=red size=3 face=“黑体”>结果:无效!</font>
#
# 在上述两种方法都尝试失败了之后,说明正常的套路已经没有用了,是不是我的思考方向错误了呢?是否不应该单纯地从模型的角度分析问题?是否应该尝试转换思路?所以我就开始尝试从数据集本身开始分析。
#
# 因为我们采用的策略是迁移学习,迁移学习的权重是来自于resnet-50在ImageNet上进行预训练的权重,也就是说我们靠这些ImageNet训练出来权重进行bottleneck feature的提取,但是结果大家都看到了,目前这样的方法并不能适用于我们的问题,那就是说提取的bottleneck feature不能帮助我们很好地进行学习,如果是这样的话,那就说明一个问题,我们的司机数据集和ImageNet数据集是有点儿“不一样”的。接下来我们就来分析分析这个不一样在哪里?
#
# 1. 首先ImageNet数据集包含有1000种类别的图片,其中的数据大都是来自于不同的场景,拥有复杂的背景。但是我们的司机数据均来自于同一个场景(一个司机坐在车里,前面有一个方向盘),而且同一个司机的数据来自于同一个视频流,也就是说我们司机数据之间本身很相似!!!!
#
# 那么预训练的resnet-50(不含有top layer)对非常相似的司机数据进行提取时,会发生什么呢?当然就是会提取出一些非常相似的bottleneck feature!!(猜想预训练模型会这么‘干’:‘我’提取出了这张图片的信息,这张图片有脸,有手,手方向盘。。。但是我们需要的似乎不仅仅是这些信息),也就是不同的司机状态的数据也会提取出非常相似的feature的话,那么对于司机问题来说,这些feature就是无用的feature(当然‘无用’说得太过分了,毕竟训练的模型也有88%的准确率呢!只是说提取的特征信息还不能够满足本项目的需求),当然也就不能获得很好的训练效果了。
#
# 好了,原因知道了那么解决方法也就明朗了。解决方法就是开放更多地层,因为我们的模型需要学习更多的关于司机数据集的‘知识’。既然要开放更多的层,那么bottleneck feature的方法对于我们来说就不太使用了,因为模型中间的层一般维度都比较大,比如14*14*256,在20000个样本的情况下要占用4G的内存,如果再加上测试集的话,内存就消耗完了(而且如果AWS中选择K80显卡的话,只有11G的内存,根本不够)。所以现在我们就要对网络进行整体的训练。
# ### 2.1.1.1 resnet-50在更多的卷积层上面训练
#
# 接下来的大部分的操作和之前相似,不一样的是我们现在要开放更多的卷积层进行学习。在这之前,让我们先来准备用于训练的数据,这次我还是打算采用imagedataGenerator来产生训练的数据,同时用fitgenerator来进行训练。
# 此时的验证集的数据需要单独地列出来(因为待会儿训练的时候我使用的是fit_generator,而这个接口是不支持validation_split参数的,所以需要手动地将验证集分出来),我决定和之前一样采用总样本数量的20%的样本进行训练,所以我需要在每一个类别中提取出20%的样本作为验证集。这里为了节约磁盘空间,我将采用软链接的形式来建立train_link数据集。
# +
classes = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']
if not isdir(test_link_path):
os.makedirs(test_link_path)
c_filenames = os.listdir(test_dir_name)
for file in c_filenames:
os.symlink('../../' + test_dir_name + '/' + file, test_link_path + '/' + file)
if not isdir(link_path):
os.makedirs(link_train_path)
os.makedirs(link_valid_path)
# make c0-c9
for c in classes:
os.makedirs(link_train_path + '/' + c)
os.makedirs(link_valid_path + '/' + c)
# create link from train dir
for c in classes:
# get path name
c_path = train_dir_name + '/' + c
train_dst_path = link_train_path + '/' + c
valid_dst_path = link_valid_path + '/' + c
# list all file name of this class
c_filenames = os.listdir(c_path)
valid_size = int (len(c_filenames)*0.2)
# create validation data of this class
for file in c_filenames[:valid_size]:
os.symlink('../../../' + c_path + '/' + file, valid_dst_path + '/' + file)
# create train data of this class
for file in c_filenames[valid_size:]:
os.symlink('../../../' + c_path + '/' + file, train_dst_path + '/' + file)
# -
def get_data_generator(train_dir, valid_dir, test_dir, image_size):
#gen = ImageDataGenerator(shear_range=0.3, zoom_range=0.3, rotation_range=0.3)
gen = ImageDataGenerator()
gen_valid = ImageDataGenerator()
test_gen = ImageDataGenerator()
"""
classes = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'] -- cat is 0, dog is 1, so we need write this
class_mode = categorical, the returned label mode
shuffle = True, we need,
batch_size = 64
"""
class_list = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']
# create train generator
train_generator = gen.flow_from_directory(train_dir, image_size, color_mode='rgb', \
classes=class_list, class_mode='categorical', shuffle=True, batch_size=32)
# create validation generator
valid_generator = gen_valid.flow_from_directory(valid_dir, image_size, color_mode='rgb', \
classes=class_list, class_mode='categorical', shuffle=False, batch_size=32)
test_generator = test_gen.flow_from_directory(test_dir, image_size, color_mode='rgb', \
class_mode=None, shuffle=False, batch_size=32)
return train_generator, valid_generator, test_generator
#, test_generator
def model_built(MODEL, input_shape, preprocess_input, classes, last_frozen_layer_name):
"""
MODEL: pretrained model
input_shape: pre-trained model's input shape
preprocessing_input: pre-trained model's preprocessing function
last_frozen_layer_name: last layer to frozen
"""
## get pretrained model
x = Input(shape=input_shape)
if preprocess_input:
x = Lambda(preprocess_input)(x)
notop_model = MODEL(include_top=False, weights='imagenet', input_tensor=x, input_shape=input_shape)
x = GlobalAveragePooling2D()(notop_model.output)
## build top layer
x = Dropout(0.5, name='dropout_1')(x)
out = Dense(classes, activation='softmax', name='dense_1')(x)
ret_model = Model(inputs=notop_model.input, outputs=out)
## Frozen some layer
#for layer in ret_model.layers:
#layer.trainable = False
#if layer.name == last_frozen_layer_name:
#break
return ret_model
# +
import pandas as pd
"""
def get_test_result(model_obj, test_generator, model_name="default"):
pred_test = model_obj.predict_generator(test_generator, len(test_generator))
pred_test = np.array(pred_test)
pred_test = pred_test.clip(min=0.005, max=0.995)
df = pd.read_csv("sample_submission.csv")
for i, fname in enumerate(test_generator.filenames):
df.loc[df["img"] == fname] = [fname] + list(pred_test[i])
df.to_csv('%s.csv' % (model_name), index=None)
print ('test result file %s.csv generated!' % (model_name))
df.head(10)
"""
def get_test_result(model_obj, test_generator, model_name="default"):
print("Now to predict")
pred_test = model_obj.predict_generator(test_generator, len(test_generator), verbose=1)
pred_test = np.array(pred_test)
pred_test = pred_test.clip(min=0.005, max=0.995)
print("create datasheet")
result = pd.DataFrame(pred_test, columns=['c0', 'c1', 'c2', 'c3',
'c4', 'c5', 'c6', 'c7',
'c8', 'c9'])
test_filenames = []
for f in test_generator.filenames:
test_filenames.append(os.path.basename(f))
result.loc[:, 'img'] = pd.Series(test_filenames, index=result.index)
result.to_csv('%s.csv' % (model_name), index=None)
print ('test result file %s.csv generated!' % (model_name))
# -
resnet50_train_generator, resnet50_valid_generator, resnet50_test_generator = get_data_generator(link_train_path, link_valid_path, test_link, resNet_input_shape[:2])
# 现在数据准备好了,接下来就是搭建模型,开放所有层进行训练。
resnet50_model = model_built(resnet50.ResNet50, resNet_input_shape, resnet50.preprocess_input, 10, None)
# +
from keras import optimizers
sgd = optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
resnet50_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
hist = resnet50_model.fit_generator(resnet50_train_generator, len(resnet50_train_generator), epochs=3,\
validation_data=resnet50_valid_generator, validation_steps=len(resnet50_valid_generator))
# -
hist = resnet50_model.fit_generator(resnet50_train_generator, len(resnet50_train_generator), epochs=5,\
validation_data=resnet50_valid_generator, validation_steps=len(resnet50_valid_generator))
# 由于时间关系我就不继续往下训练了,可以看到我们已经将<font color=red size=3 face=“黑体”>“训练集loss高居不下”</font>的问题<font color=green size=3 face=“黑体”>解决</font>了,也就是之前列出来的两个问题中的第一个。看起来一切都很完美,完美得我们都已经忽略存在的第二个问题,因为从上面的训练结果不太明显(其实也能看出来,因为每次epoch之后,训练loss都明显高于验证loss),如果你执意认为没问题,那好,让我们将该模型在测试集上运行,然后将运行结果提交kaggle就知道了。
get_test_result(resnet50_model, resnet50_test_generator, model_name="resnet-50")
# 不出所料,提交到kaggle之后,loss是惊人的2.3。下一小节开始分析此问题
# ### 2.1.2 针对“验证集loss远远低于训练集loss”问题
#
# 现在就来分析一下哪里有问题吧!从之前的训练过程可以看出,每一次验证集的loss都是要低于测试集的loss,这很不正常。说明我们验证集直接或者间接的过拟合了。为什么会过拟合呢?其实问题原因还是在数据上。我们之前说过,关于同一个司机的数据是由同一个摄像头采集于同一场景,也就是说每一个司机的同一个状态的数据会非常相似(相当于视频流的连续帧),如果这些数据有一些在训练集上,有一些在测试集上,那么模型就相当于已经“看过”了验证集的数据再来做验证,那么此时就造成了验证集的过拟合(相当于用训练数据来做验证),所以呢,验证集的数据不应该出现在测试集上,所以我们应该按照司机的ID来划分验证集和测试集,将一个司机的所有图片用来当做验证集。为了让验证集达到训练集的20%,我这里选择了四个司机(p021,p022,p024,p026)的图片(总共:4800多张)。
# +
import matplotlib.pyplot as plt
def show_loss(hist, title='loss'):
# show the training and validation loss
plt.plot(hist.history['val_loss'], label="validation loss")
plt.plot(hist.history['loss'], label="train loss")
plt.ylabel('loss')
plt.xlabel('epoch')
plt.title(title)
plt.legend()
plt.show()
# +
import pandas as pd
link_path = 'train_link'
link_train_path = 'train_link/train'
link_valid_path = 'train_link/validation'
test_link_path = 'test_link/data'
classes = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']
validation_drivers = ["p021","p022","p024","p026"]
if not isdir(test_link_path):
os.makedirs(test_link_path)
c_filenames = os.listdir(test_dir_name)
for file in c_filenames:
os.symlink('../../' + test_dir_name + '/' + file, test_link_path + '/' + file)
if not isdir(link_path):
os.makedirs(link_train_path)
os.makedirs(link_valid_path)
# get validation file names
df = pd.read_csv(driver_imgs_list_file)
validation_files = list(df[df['subject'].isin(validation_drivers)]['img'])
# make c0-c9
for c in classes:
os.makedirs(link_train_path + '/' + c)
os.makedirs(link_valid_path + '/' + c)
# create link from train dir
for c in classes:
# get path name
c_path = train_dir_name + '/' + c
train_dst_path = link_train_path + '/' + c
valid_dst_path = link_valid_path + '/' + c
# list all file name of this class
c_filenames = os.listdir(c_path)
# create validation data of this class
for file in c_filenames:
if file in validation_files:
os.symlink('../../../' + c_path + '/' + file, valid_dst_path + '/' + file)
else:
os.symlink('../../../' + c_path + '/' + file, train_dst_path + '/' + file)
# -
def model_built(MODEL, input_shape, preprocess_input, classes, last_frozen_layer_name):
"""
MODEL: pretrained model
input_shape: pre-trained model's input shape
preprocessing_input: pre-trained model's preprocessing function
last_frozen_layer_name: last layer to frozen
"""
## get pretrained model
x = Input(shape=input_shape)
if preprocess_input:
x = Lambda(preprocess_input)(x)
notop_model = MODEL(include_top=False, weights='imagenet', input_tensor=x, input_shape=input_shape)
x = GlobalAveragePooling2D()(notop_model.output)
## build top layer
x = Dropout(0.5, name='dropout_1')(x)
out = Dense(classes, activation='softmax', name='dense_1')(x)
ret_model = Model(inputs=notop_model.input, outputs=out)
## Frozen some layer
#for layer in ret_model.layers:
#layer.trainable = False
#if layer.name == last_frozen_layer_name:
#break
return ret_model
# +
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.applications import xception, resnet50
# get generator
resNet_input_shape = (224,224,3)
resnet50_train_generator, resnet50_valid_generator, resnet50_test_generator = get_data_generator(link_train_path, link_valid_path, test_link, resNet_input_shape[:2])
##xception_train_generator, xception_valid_generator, xception_test_generator = get_data_generator(link_train_path, link_valid_path, test_link, (299, 299))
# build model
resnet50_model = model_built(resnet50.ResNet50, resNet_input_shape, resnet50.preprocess_input, 10, None)
#xception_model = model_built(xception.Xception, (299, 299, 3), xception.preprocess_input, 10, None)
# trainmodel
ckpt = ModelCheckpoint('resnet50.weights.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
resnet50_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
hist = resnet50_model.fit_generator(resnet50_train_generator, len(resnet50_train_generator), epochs=6,\
validation_data=resnet50_valid_generator, validation_steps=len(resnet50_valid_generator), callbacks=[ckpt])
#ckpt = ModelCheckpoint('xception.weights.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True, save_weights_only=True)
#adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#xception_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
#hist = xception_model.fit_generator(xception_train_generator, len(xception_train_generator), epochs=6,\
#validation_data=xception_valid_generator, validation_steps=len(xception_valid_generator), callbacks=[ckpt])
# +
#adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#xception_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
#xception_model.load_weights('weights.06-0.25.hdf5')
#hist = xception_model.fit_generator(xception_train_generator, len(xception_train_generator), epochs=6,\
#validation_data=xception_valid_generator, validation_steps=len(xception_valid_generator), \
#callbacks=[ckpt])
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
resnet50_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
hist = resnet50_model.fit_generator(resnet50_train_generator, len(resnet50_train_generator), epochs=6,\
validation_data=resnet50_valid_generator, validation_steps=len(resnet50_valid_generator), \
callbacks=[ckpt], initial_epoch=4)
# -
hist = resnet50_model.fit_generator(resnet50_train_generator, len(resnet50_train_generator), epochs=6,\
validation_data=resnet50_valid_generator, validation_steps=len(resnet50_valid_generator), \
callbacks=[ckpt])
# +
#get_test_result(xception_model, xception_test_generator, model_name="xception_test_result")
# -
# 可以看到,经过了上面的训练,验证集loss收敛于一个比较“真实”的loss,所以解决了我们所说的第二个问题。
#
# 上面的训练过程折腾了挺长时间,主要在于讯息速率的选择,学习速率如果选择过大,或者过小,都无法收敛到一个合适的值,学习速率太大会破坏预训练的权重,也就是我们说的“跑炸了”,学习速率太小收敛速度慢到无法忍受。
#
# 最终resnet-50模型收敛到了验证集loss为0.27,提交到kaggle之后,loss为0.34左右,在kaggle排名为前10%左右。但是我还不满足于这个结果,所以要提高模型的分数,自然就是要用到模型融合了。
# ## 3. 模型融合提高kaggle分数
#
# 模型融合的对于比赛的重要性不言而喻,观察kaggle的各个竞赛项目,排名最高的各位大神们不出意外地都是用了各种各样的模型融合方法,我把这种所谓的模型融合方法叫做集成学习方法(或者他们两个很像)。所以接下来我将使用集成学习方法中的bagging方法来进行模型的集成。
#
# 集成学习的核心思想就是三个臭皮匠订个诸葛亮,举个例子,比如你想知道一支股票的涨跌,你仅仅去询问你的一个朋友,得知预测结果后你可能还是不太方放心,如果你陆续询问多个朋友,然后把他们的建议综合起来,那么现在是不是放心多了呢?这就是集成学习的思想。如果想了解什么是集成学习,可以参考我的学习比较中关于[集成学习](https://github.com/rikichou/note_link_summary)的章节。集成学习比较重要的一个前提是diversity,也就是差异性,如果我们集成了几个没有差异性的模型,这样的集成是无效的,什么意思呢?和刚才那个例子一样,如果对于你询问的股票涨跌问题,你的所有朋友总是给出一致的答案,那么他们集成和不集成又有什么区别呢?所以我们要考虑的是差异性。
#
# 差异性的来源有两个:
#
# * 一个是数据本身的差异性,也就是对于同一个模型,如果放在两个不同的数据集上分别进行训练,那么一般会得到两个不同的g(x)。
# * 一个是模型的本身带来的差异性,也就是不同的模型在同样的数据上面进行训练, 一般也会得到两个不同的g(x)。
#
# 所以我们接下来的方案就是利用数据本身的差异性来得到几个拥有diversity的g(x),然后再将这些g(x)的预测结果进行uniform blending(其实就是所有的g(x)的输出值加起来然后取平均值),得到最终的预测结果。那么这里的数据差异性如何产生呢?这里我们可以借鉴K折交叉验证的方法来进行数据的划分和模型的训练,不同的是K折交叉验证的目的是为了得出一个模型的客观的分数,而我们的目的是为了产生不同的g(x)来进行融合,所以我们只是借鉴K折交叉验证的方法而不是使用K折交叉验证哟,想要了解什么是K折交叉验证请参考培神的[这篇文章](https://zhuanlan.zhihu.com/p/25637642)
# ### 3.1 通用的接口函数
# #### 3.1.1 数据相关的接口函数
# 生成训练集和测试集的接口函数,传入验证集的司机ID的list。注意这个接口会删除之前创建的链接文件
# +
import pandas as pd
import os
import shutil
classes = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']
def train_valid_split(validation_drivers):
"""
validation_drivers: driver id list, like:["p021","p022","p024","p026"]
warning:this function will remove the old link dir
"""
#validation_drivers = ["p021","p022","p024","p026"]
if isdir(test_link_path):
shutil.rmtree(test_link_path)
if not isdir(test_link_path):
os.makedirs(test_link_path)
c_filenames = os.listdir(test_dir_name)
for file in c_filenames:
os.symlink('../../' + test_dir_name + '/' + file, test_link_path + '/' + file)
## check and remove the train dir
if isdir(link_path):
shutil.rmtree(link_path)
if not isdir(link_path):
os.makedirs(link_train_path)
os.makedirs(link_valid_path)
# get validation file names
df = pd.read_csv(driver_imgs_list_file)
validation_files = list(df[df['subject'].isin(validation_drivers)]['img'])
# make c0-c9
for c in classes:
os.makedirs(link_train_path + '/' + c)
os.makedirs(link_valid_path + '/' + c)
# create link from train dir
for c in classes:
# get path name
c_path = train_dir_name + '/' + c
train_dst_path = link_train_path + '/' + c
valid_dst_path = link_valid_path + '/' + c
# list all file name of this class
c_filenames = os.listdir(c_path)
# create validation data of this class
for file in c_filenames:
if file in validation_files:
os.symlink('../../../' + c_path + '/' + file, valid_dst_path + '/' + file)
else:
os.symlink('../../../' + c_path + '/' + file, train_dst_path + '/' + file)
# -
# data generator,根据前面产生的训练集和验证集,我们来生成generator
def data_generator(train_dir, valid_dir, test_dir, image_size):
"""
image_size: the output of the image size, like (224, 224)
"""
#gen = ImageDataGenerator(shear_range=0.3, zoom_range=0.3, rotation_range=0.3)
gen = ImageDataGenerator(rotation_range=10.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.1,
zoom_range=0.1)
gen_valid = ImageDataGenerator()
test_gen = ImageDataGenerator()
# create train generator
train_generator = gen.flow_from_directory(train_dir, image_size, color_mode='rgb', \
classes=classes, class_mode='categorical', shuffle=True, batch_size=32)
# create validation generator
valid_generator = gen_valid.flow_from_directory(valid_dir, image_size, color_mode='rgb', \
classes=classes, class_mode='categorical', shuffle=False, batch_size=32)
test_generator = test_gen.flow_from_directory(test_dir, image_size, color_mode='rgb', \
class_mode=None, shuffle=False, batch_size=32)
return train_generator, valid_generator, test_generator
# #### 3.1.2 模型相关的接口函数
def get_model(MODEL, input_shape, preprocess_input, output_num):
"""
MODEL: pretrained model
input_shape: pre-trained model's input shape
preprocessing_input: pre-trained model's preprocessing function
"""
## get pretrained model
x = Input(shape=input_shape)
if preprocess_input:
x = Lambda(preprocess_input)(x)
notop_model = MODEL(include_top=False, weights='imagenet', input_tensor=x, input_shape=input_shape)
x = GlobalAveragePooling2D()(notop_model.output)
## build top layer
x = Dropout(0.5, name='dropout_1')(x)
out = Dense(output_num, activation='softmax', name='dense_1')(x)
ret_model = Model(inputs=notop_model.input, outputs=out)
return ret_model
# #### 3.13 测试文件生成接口函数
def get_test_result(model_obj, generator, result_file_name="default"):
print("Now to predict result!")
pred_test = model_obj.predict_generator(generator, len(generator), verbose=1)
pred_test = np.array(pred_test)
pred_test = pred_test.clip(min=0.005, max=0.995)
print("Creating datasheet!")
result = pd.DataFrame(pred_test, columns=classes)
test_filenames = []
for f in test_generator.filenames:
test_filenames.append(os.path.basename(f))
result.loc[:, 'img'] = pd.Series(test_filenames, index=result.index)
result.to_csv(result_file_name, index=None)
print ('Test result file %s generated!' % (result_file_name))
# +
import pandas as pd
def merge_test_results(file_name_list, result_file_name='result'):
result = None
# get file number
file_num = len(file_name_list)
# read all test result
img = []
for i,name in enumerate(file_name_list):
df = pd.read_csv(name)
if i == 0:
img = df['img']
result = df.drop('img', axis=1)
else:
result = result + df.drop('img', axis=1)
result = result / float(file_num)
result['img'] = img
result.to_csv(result_file_name, index=None)
print ("Final result file: " + result_file_name)
# -
# ### 3.2 resnet-50的模型融合
#
# 我打算融合resnet-50训练出来的四个模型,其中每个模型是由不同的训练集划分出来的,通过上面的过程我们了解到,需要根据司机ID来划分训练集和验证集,在这里我假设每个验证集包含4个司机的数据,现在我们就来划分一下这四个模型需要的训练集和验证集
# #### 3.2.1 训练模型
# +
from keras.applications import resnet50
from keras import optimizers
from keras.callbacks import ModelCheckpoint
res_drivers_id_list = [["p039","p047","p052","p066"],
["p015","p041","p049","p081"],
["p002","p016","p035","p050"],
["p024","p041","p049","p052"]]
res_image_size = (224, 224)
res_input_shape = (224, 224, 3)
csv_names_list = []
for i in range(4):
print ('------------------------------------------------------------------------------------------------------------------')
""" 1. about data """
# create link and remove the old link
train_valid_split(res_drivers_id_list[i])
# get generator
train_generator, valid_generator, test_generator = data_generator(link_train_path, link_valid_path, test_link, res_image_size)
""" 2. about model """
# get model
resnet50_model = get_model(resnet50.ResNet50, res_input_shape, resnet50.preprocess_input, 10)
# compile
weights_file_name = 'resnet50.'+ str(i) + '.weights.hdf5'
ckpt = ModelCheckpoint(weights_file_name, verbose=1, save_best_only=True, save_weights_only=True)
adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
resnet50_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit
hist_1 = resnet50_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# comile
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
resnet50_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit using a small learning rate
hist_2 = resnet50_model.fit_generator(train_generator, len(train_generator), epochs=3,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# load weights
resnet50_model.load_weights(weights_file_name)
""" 3. about result """
result_file_name = 'resnet50.'+ str(i) + '.test.result.hdf5'
get_test_result(resnet50_model, test_generator, result_file_name=result_file_name)
csv_names_list.append(result_file_name)
# -
# #### 3.2.2 融合测试结果
merge_test_results(csv_names_list, 'resnet50.finall.result.csv')
# 可以看出在做出了resnet50的模型融合以后,提交kaggle发现loss由0.35下降到了0.23,效果明显提升了。
# ### 3.3 xception的模型融合
#
# 我打算融合xception训练出来的四个模型,其中每个模型是由不同的训练集划分出来的,出了模型不同外,其他均和resnet50的操作一致。
# #### 3.3.1 训练模型
# +
from keras.applications import xception
from keras import optimizers
from keras.callbacks import ModelCheckpoint
xcp_drivers_id_list = [['p016', 'p072', 'p026', 'p066'],
['p042', 'p022', 'p045', 'p075'],
['p064', 'p047', 'p056', 'p061'],
['p039', 'p012', 'p015', 'p052']]
xcp_image_size = (299, 299)
xcp_input_shape = (299, 299, 3)
xcp_csv_names_list = []
for i in range(4):
print ('------------------------------------------------------------------------------------------------------------------')
""" 1. about data """
# create link and remove the old link
train_valid_split(xcp_drivers_id_list[i])
# get generator
train_generator, valid_generator, test_generator = data_generator(link_train_path, link_valid_path, test_link, xcp_image_size)
""" 2. about model """
# get model
xception_model = get_model(xception.Xception, xcp_input_shape, xception.preprocess_input, 10)
# compile
weights_file_name = 'xception.'+ str(i) + '.weights.hdf5'
ckpt = ModelCheckpoint(weights_file_name, verbose=1, save_best_only=True, save_weights_only=True)
adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
xception_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit
hist_1 = xception_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# comile
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
xception_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit using a small learning rate
hist_2 = xception_model.fit_generator(train_generator, len(train_generator), epochs=3,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# load weights
xception_model.load_weights(weights_file_name)
""" 3. about result """
result_file_name = 'xception.'+ str(i) + '.test.result.hdf5'
get_test_result(xception_model, test_generator, result_file_name=result_file_name)
xcp_csv_names_list.append(result_file_name)
# -
merge_test_results(xcp_csv_names_list, 'xception.finall.result.csv')
# xception的模型的融合结果提交kaggle之后,loss为:0.27290
# ### 3.3 inceptionv3的模型融合
#
# 我打算融合inceptionv3训练出来的四个模型,其中每个模型是由不同的训练集划分出来的,出了模型不同外,其他均和resnet50的操作一致。
# +
from keras.applications import inception_v3
from keras import optimizers
from keras.callbacks import ModelCheckpoint
inc_drivers_id_list = [['p064', 'p056', 'p047', 'p041'],
['p051', 'p072', 'p052', 'p049'],
['p002', 'p050', 'p014', 'p075'],
['p035', 'p039', 'p012', 'p081']]
inc_image_size = (299, 299)
inc_input_shape = (299, 299, 3)
inc_csv_names_list = []
for i in range(4):
print ('------------------------------------------------------------------------------------------------------------------')
""" 1. about data """
# create link and remove the old link
train_valid_split(inc_drivers_id_list[i])
# get generator
train_generator, valid_generator, test_generator = data_generator(link_train_path, link_valid_path, test_link, inc_image_size)
""" 2. about model """
# get model
inceptionv3_model = get_model(inception_v3.InceptionV3, inc_input_shape, inception_v3.preprocess_input, 10)
# compile
weights_file_name = 'inceptionv3.'+ str(i) + '.weights.hdf5'
ckpt = ModelCheckpoint(weights_file_name, verbose=1, save_best_only=True, save_weights_only=True)
adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
inceptionv3_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit
hist_1 = inceptionv3_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# comile
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
inceptionv3_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit using a small learning rate
hist_2 = inceptionv3_model.fit_generator(train_generator, len(train_generator), epochs=2,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# load weights
inceptionv3_model.load_weights(weights_file_name)
""" 3. about result """
result_file_name = 'inceptionv3.'+ str(i) + '.test.result.hdf5'
get_test_result(inceptionv3_model, test_generator, result_file_name=result_file_name)
inc_csv_names_list.append(result_file_name)
# -
merge_test_results(inc_csv_names_list, 'inceptionv3.finall.result.csv')
# 提交kaggle之后分数为0.31467
# ### 3.4 inception_resnet_v2的模型融合
#
# 我打算融合inception_resnet_v2训练出来的四个模型,其中每个模型是由不同的训练集划分出来的,出了模型不同外,其他均和resnet50的操作一致。我不打算再调
# +
from keras.applications import inception_resnet_v2
from keras import optimizers
from keras.callbacks import ModelCheckpoint
ire_drivers_id_list = [['p041', 'p026', 'p022', 'p002'],
['p061', 'p081', 'p015', 'p075'],
['p042', 'p045', 'p047', 'p012'],
['p064', 'p051', 'p072', 'p021']]
ire_image_size = (299, 299)
ire_input_shape = (299, 299, 3)
ire_csv_names_list = []
for i in range(4):
print ('------------------------------------------------------------------------------------------------------------------')
""" 1. about data """
# create link and remove the old link
train_valid_split(ire_drivers_id_list[i])
# get generator
train_generator, valid_generator, test_generator = data_generator(link_train_path, link_valid_path, test_link, ire_image_size)
""" 2. about model """
# get model
inception_resnet_v2_model = get_model(inception_resnet_v2.InceptionResNetV2, ire_input_shape, inception_resnet_v2.preprocess_input, 10)
# compile
weights_file_name = 'inception_resnet_v2.'+ str(i) + '.weights.hdf5'
ckpt = ModelCheckpoint(weights_file_name, verbose=1, save_best_only=True, save_weights_only=True)
adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
inception_resnet_v2_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit
hist_1 = inception_resnet_v2_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# comile
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
inception_resnet_v2_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit using a small learning rate
hist_2 = inception_resnet_v2_model.fit_generator(train_generator, len(train_generator), epochs=3,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# load weights
inception_resnet_v2_model.load_weights(weights_file_name)
""" 3. about result """
result_file_name = 'inception_resnet_v2.'+ str(i) + '.test.result.hdf5'
get_test_result(inception_resnet_v2_model, test_generator, result_file_name=result_file_name)
ire_csv_names_list.append(result_file_name)
# +
ire_csv_names_list = ['inception_resnet_v2.0.test.result.hdf5',
'inception_resnet_v2.1.test.result.hdf5',
'inception_resnet_v2.2.test.result.hdf5',
'inception_resnet_v2.3.test.result.hdf5']
merge_test_results(ire_csv_names_list, 'inception_resnet_v2.finall.result.csv')
# +
all_name = ['inception_resnet_v2.finall.result.csv', 'inceptionv3.finall.result.csv', 'resnet50.finall.result.csv', 'xception.finall.result.csv']
merge_test_results(all_name, 'four.model.finall.result.csv')
# -
# 提交kaggle之后,loss为:0.29589
# ### 3.5四个模型选择性的融合
#
# 根据上面的结果,如果利用单独的模型来进行融合,resnet-50效果是最好的,可能是由于其他模型没有经过babysitting调试的过程,效果不是很理想,如果现在将四个模型都融合的话,有些模型可能会拖后腿,所以我就手工选择几个验证集表现最好的模型来进行融合
# +
name_list = ['resnet50.3.test.result.hdf5',
'resnet50.1.test.result.hdf5',
'xception.1.test.result.hdf5',
'xception.3.test.result.hdf5',
'inception_resnet_v2.0.test.result.hdf5']
merge_test_results(name_list, 'choose_top_5_model_finall.result.csv')
# -
# 提交kaggle之后,发现loss为0.23,已经进入了kaggle排行榜的前%10了,达到了初步估计的目标。由于时间关系(训练一个模型几乎都要差不多2个小时,4个模型就是8个小时),我不再继续优化下去了。
#
# 下面再增加几个可能的提升点:
# * 出了resnet50模型外,其他模型都没能够很好地收敛,原因是我使用了resnet-50的优化算法来对其他模型进行训练。所以一个提升点就是针对每个模型精心调试
#
# * 训练集增加更多的data augmentation,借鉴kaggle排名靠前的大神的方法。将同一个类别的图片进行左右拼接,这样可以提高模型的泛化能力,让模型知道他应该关注哪些地方,不应该关注哪些地方。
#
# * 增加更多的模型,我们知道,模型融合一个方向之一就是使用更多的模型,正常情况下这样总能得到比较好的分数(除非某个模型非常拖后腿)
# ## 4.CAM可视化
# 上面我们通过对模型的集成达到了不错的预测效果,那么如果我们想看看用于集成的模型到底在关注些什么,要怎么做呢?在周博磊博士的论文[Learning Deep Features for Discriminative Localization](http://cnnlocalization.csail.mit.edu/)中,提出了一种通过在CNN加入全局平均池化(global average pooling)层来获得class activation map,对于某一个特定的类,该类对应的class activation map表示CNN在预测该类对象的时候所关注的图像区域。产生某一个类的class activation map的方法也很简单,其实就是全局平均池化层的上一个层的各个feature map的线性组合,而线性组合的系数来自于输出层的该类对应的神经元的权重。
#
# 比如说resnet50,全局平均池化层的前一个层的输出维度为7x7x2048,那么class activation map就是这2048个7x7的feature map线性组合的结果,而线性组合的权重就是(在本项目中输出维度是10,所以权重维度就是2048x10,某一类别的神经元对应的权重就是2048个)某个类别对应的输出层神经元的权重了。
#
# 那么有了class activation map之后如何将其反映在图片中呢?现在有了一个关于某个类别的7x7的CAM,这个CAM中的某个位置的值就对应了原始图片中对应位置(比如CAM右下角的元素对应原始图片中右下角的内容)的内容对预测某一类别的重要性,所以我们就可以利用cv2的热力图的接口来进行热力图的绘制,通过热力图就可以看出模型在关注些什么。
# ### 4.1 resnet50 CAM可视化
# #### 4.1.1 加载模型
#
# 因为我们要通过获取输出层的权重和卷积层(也就是global average pooling的前一层)的激活值来计算class activation maps,所以在搭建模型的时候也需要将卷积层作为输出。
def cam_model(MODEL, input_shape, preprocess_input, output_num, weights_file_name):
"""
MODEL: pretrained model
input_shape: pre-trained model's input shape
preprocessing_input: pre-trained model's preprocessing function
weights_file_name: weights trained on driver datasheet
"""
## get pretrained model
x = Input(shape=input_shape)
if preprocess_input:
x = Lambda(preprocess_input)(x)
notop_model = MODEL(include_top=False, weights=None, input_tensor=x, input_shape=input_shape)
x = GlobalAveragePooling2D(name='global_average_2d_1')(notop_model.output)
## build top layer
x = Dropout(0.5, name='dropout_1')(x)
out = Dense(output_num, activation='softmax', name='dense_1')(x)
ret_model = Model(inputs=notop_model.input, outputs=[out, notop_model.layers[-2].output])
## load weights
ret_model.load_weights(weights_file_name)
## get the output layer weights
weights = ret_model.layers[-1].get_weights()
return ret_model, np.array(weights[0])
# +
from keras.applications import resnet50
res_image_size = (224, 224)
res_input_shape = (224, 224, 3)
cam_model, cam_weights = cam_model(resnet50.ResNet50, res_input_shape, resnet50.preprocess_input, 10, 'resnet50.3.weights.hdf5')
print (cam_weights.shape)
# -
# #### 4.1.2 绘制热力图
# +
import cv2
import numpy as np
image_path = 'train/c3/img_537.jpg'
resNet_input_shape = (224,224,3)
# read image
image = cv2.imread(image_path)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
image_input = cv2.resize(image, (resNet_input_shape[0], resNet_input_shape[1]))
image_input_m = np.expand_dims(image_input,axis=0)
# predict and get feature maps
predict_m, feature_maps_m = cam_model.predict(image_input_m)
# +
predict = predict_m[0]
feature_maps = feature_maps_m[0]
# get the class result
class_index = np.argmax(predict)
# get the class_index unit's weights
cam_weights_c = cam_weights[:, class_index]
# get the class activation map
cam = np.matmul(feature_maps, cam_weights_c)
# normalize the cam
cam = (cam - cam.min())/(cam.max())
# do not care the low values
cam[np.where(cam<0.2)] = 0
cam = cv2.resize(cam, (resNet_input_shape[0], resNet_input_shape[1]))
cam = np.uint8(255*cam)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
des = state_des['c'+str(class_index)]
# draw the hotmap
hotmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
# linear combine the picture with cam
dis = cv2.addWeighted(image_input, 0.8, hotmap, 0.4, 0)
plt.title("Predict C" + str(class_index) + ':' + des)
plt.imshow(dis)
plt.axis("off")
# +
import cv2
import numpy as np
def show_hot_map(image_path, model, cam_weights, input_shape):
""" 1. predict """
# read image
image = cv2.imread(image_path)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
image_input = cv2.resize(image, (input_shape[0], input_shape[1]))
image_input_m = np.expand_dims(image_input,axis=0)
# predict and get feature maps
predict_m, feature_maps_m = model.predict(image_input_m)
""" 2. get the calss activation maps """
predict = predict_m[0]
feature_maps = feature_maps_m[0]
# get the class result
class_index = np.argmax(predict)
# get the class_index unit's weights
cam_weights_c = cam_weights[:, class_index]
# get the class activation map
cam = np.matmul(feature_maps, cam_weights_c)
# normalize the cam
cam = (cam - cam.min())/(cam.max())
# do not care the low values
cam[np.where(cam<0.2)] = 0
cam = cv2.resize(cam, (input_shape[0], input_shape[1]))
cam = np.uint8(255*cam)
""" 3. show the hot map """
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
des = state_des['c'+str(class_index)]
# draw the hotmap
hotmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
# linear combine the picture with cam
dis = cv2.addWeighted(image_input, 0.8, hotmap, 0.4, 0)
plt.title("Predict C" + str(class_index) + ':' + des)
plt.imshow(dis)
plt.axis("off")
# +
image_path = 'train/c3/img_537.jpg'
resNet_input_shape = (224,224,3)
show_hot_map(image_path, cam_model, cam_weights, res_input_shape)
# -
# 可以通过更换文件名来查看每一张图片模型都在关注什么,看的出来,绝大部分情况下,模型都在做正确的事情。
#
# CAM图中,蓝色部分代表CAM值比较大的区域,也就是模型用来进行分类的中点关注区域。
# ## 5. 视频演示
# 接下来是一段视频演示,该视频有两个子窗口,分别为正常视频和热力图的视频。照着官方文档使用OpenCV来做视频是很方便的
# ### 5.1 关于生成热力图像的接口
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
def generate_hot_map(frame, cam_model, model_input_size, cam_weights, cam_size):
"""
image_input_m: CAM model's input
cam_model: CAM model
cam_weights: weights for CAM
cam_size: size of the output picture
"""
# resize frame for predict
img_for_model = cv2.resize(frame, model_input_size)
img_for_model = np.expand_dims(img_for_model,axis=0)
""" 1. predict """
# predict and get feature maps
predict_m, feature_maps_m = cam_model.predict(img_for_model)
""" 2. get the calss activation maps """
predict = predict_m[0]
feature_maps = feature_maps_m[0]
# get the class result
class_index = np.argmax(predict)
# get the class_index unit's weights
cam_weights_c = cam_weights[:, class_index]
# get the class activation map
cam = np.matmul(feature_maps, cam_weights_c)
# normalize the cam
cam = (cam - cam.min())/(cam.max())
# do not care the low values
cam[np.where(cam<0.2)] = 0
cam = cv2.resize(cam, cam_size)
cam = np.uint8(255*cam)
""" 3. show the hot map """
des = state_des['c'+str(class_index)]
# draw the hotmap
hotmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
# linear combine the picture with cam
image_input = cv2.resize(frame, cam_size)
dis = cv2.addWeighted(image_input, 0.8, hotmap, 0.4, 0)
return dis,predict
# -
# ### 5.2 关于视频的接口
# +
import cv2
import numpy as np
## video width and high
main_video_width = 1280
main_video_high = 720
## subwindow size
sub_video_width = int(main_video_width/2)
sub_video_high = int(main_video_high*0.6)
## subwindow coordinate
sub1_coord_1 = int((main_video_high-sub_video_high)/2)
sub1_coord_2 = int((main_video_high-sub_video_high)/2) + sub_video_high
sub1_coord_3 = 0
sub1_coord_4 = sub1_coord_3 + sub_video_width
sub2_coord_1 = int((main_video_high-sub_video_high)/2)
sub2_coord_2 = int((main_video_high-sub_video_high)/2) + sub_video_high
sub2_coord_3 = sub1_coord_4
sub2_coord_4 = main_video_width
def generate_video_with_classfication(model, model_input_size, video_name_or_camera, cam_weights, generate_video_name='output.avi'):
"""
model: model to predict the video
model_input_size: image size of the model
video_name_or_camera: read videl from camera or local video
cam_weights: weights for CAM
generate_video_name: the output video name
"""
"""0. create videl reader and writer, and get more video message """
cap = cv2.VideoCapture(video_name_or_camera)
video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
video_high = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_fps = cap.get(cv2.CAP_PROP_FPS)
print ("video size:({width}, {high}) fps:{fps}".format(width=video_width, high=video_high, fps=video_fps))
''' 0. create a new image '''
showBigImage = np.zeros((int(main_video_high), int(main_video_width), 3), np.uint8)
''' 1. create video writer '''
fourcc = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter(generate_video_name, fourcc, 20.0, (main_video_width, main_video_high))
if(cap.isOpened() == False):
print ("Failed to open " + video_name_or_camera)
return
while True:
""" 2. preprocessing and predict """
# get fram
ret, frame = cap.read()
# check if the video is over
if(ret != True):
print ("Ending!")
break
# get hot map
sub_frame_2, predict = generate_hot_map(frame, model, model_input_size, cam_weights, (sub_video_width, sub_video_high))
""" 3. add text to the image and show"""
class_index = np.argmax(predict)
text = 'Predicted: C{} {}'.format(class_index, state_des['c'+str(class_index)])
font = cv2.FONT_HERSHEY_SIMPLEX
showBigImage[:] = 0
cv2.putText(showBigImage, text, (10, sub1_coord_1-10), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
""" 4. resize and fill 2 subwindow """
frame = cv2.resize(frame, (sub_video_width, sub_video_high))
showBigImage[sub1_coord_1:sub1_coord_2, sub1_coord_3:sub1_coord_4] = frame
showBigImage[sub2_coord_1:sub2_coord_2, sub2_coord_3:sub2_coord_4] = sub_frame_2
""" 5. show video """
cv2.imshow('image', showBigImage)
""" 6. save video if need """
writer.write(showBigImage)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
# ### 5.3 关于模型的接口
# +
from keras.applications import xception
xce_image_size = (299, 299)
xce_input_shape = (299, 299, 3)
cam_model, cam_weights = cam_model(xception.Xception, xce_input_shape, xception.preprocess_input, 10, 'xception.fixed.weights.hdf5')
# -
# ### 5.4 生成视频
generate_video_with_classfication(cam_model, xce_image_size, 'real_fei.mp4', cam_weights, generate_video_name='output.avi')
# 我自己拍摄了一段视频,利用xception的单模型进行预测,但是效果不理想,估计是因为:
#
# * 拍摄角度和训练集和测试集不一致,我的角度是平拍,测试集和训练集的角度是俯拍
#
# * 用于训练模型的视频中涉及到的车辆的佩饰和我拍摄视频的车辆的佩饰不一致,模型的泛化能力比较差
#
# 所以视频就暂时不发出来演示了。
# ## 6.一点儿改进的尝试
# 之前训练的几个模型所用的数据都是经过了data augmentation的,但是也许我们可以做更“激进”一点儿的data augmentation,使得我们训练出来的模型具有更强大的泛化能力。
#
# 通过浏览各个驾驶状态的驾驶员以及CAM可以发现,对区分驾驶状态有帮助的图片区域基本上都位于左上部分:头,和右下部分:手,那么我们是否可以这样进行组合,将一张图的左半部分和另一张同类型的图的右半部分进行组合(后来我也加入了图片的上下两部分的结合),形成一张新的图片,这样既保留了用于区分状态的关键区域,又使得非关键区域(以下均称为背景)不具有主导地位(因为背景会时常变化)。
#
# 所以我就简单地修改了一下keras自带的data augmentation(其中标有红色注释的部分就是修改的部分),使其对我们的同类数据进行拼接。代码如下所示
#
# 接下来我就利用修改之后的data augmentation来产生数据,对xception进行改进,看看效果如何。
# +
from keras.preprocessing.image import *
from keras.preprocessing.image import _count_valid_files_in_directory
from keras.preprocessing.image import _list_valid_filenames_in_directory
import numpy as np
# new class, MergeImageDataGenerator, to generate the merge image
class MergeImageDataGenerator(ImageDataGenerator):
# redefine flow_from_directory method
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
# just return the iterator
return MergeDirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
# redefine the iterator of the generator
class MergeDirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff'}
# first, count the number of samples and classes
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('!!!!!!!Found %d images belonging to %d classes.' % (self.samples, self.num_classes))
# second, build an index of the images in the different class subfolders
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
"""
author: rikichou 2018,5,1 21:03:31
record the index range of each class
"""
# Start Code
classes_range = []
# End Code
for res in results:
classes, filenames = res.get()
"""
author: rikichou 2018,5,1 21:04:01
"""
# Start Code
start = i
end = i + len(classes) - 1
classes_range.append({'start':start, 'end':end})
# End Code
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
"""
author: rikichou
"""
# Start Code
self.classes_range = classes_range
# End Code
pool.close()
pool.join()
super(MergeDirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
if np.random.rand() < 0.8:
"""
author: rikichou 2018,5,1 21:03:31
before ramdom trainsform and standardize
"""
# Start Code
""" 1. which iamge to merge with """
class_merge = self.classes[j]
class_range = self.classes_range[class_merge]
target_index = np.random.randint(class_range['start'], class_range['end'])
""" 2. load target image """
target_name = self.filenames[target_index]
target_img = load_img(os.path.join(self.directory, target_name),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
""" 3. get the target image's start and end coordinate """
if np.random.rand() < 0.5:
start_x = 0
start_y = self.target_size[0]//2
end_x = self.target_size[1]
end_y = self.target_size[0]
else:
start_x = self.target_size[1]//2
start_y = 0
end_x = self.target_size[1]
end_y = self.target_size[0]
from_copy = (start_x, start_y, end_x, end_y)
to_paste = (start_x, start_y)
""" 4. copy and paste """
region = target_img.crop(from_copy)
img.paste(region, to_paste)
#EndCode
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_classes), dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
# -
def fixed_data_generator(train_dir, valid_dir, test_dir, image_size):
"""
image_size: the output of the image size, like (224, 224)
"""
gen = MergeImageDataGenerator(rotation_range=10.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.1,
zoom_range=0.1)
gen_valid = ImageDataGenerator()
test_gen = ImageDataGenerator()
# create train generator
train_generator = gen.flow_from_directory(train_dir, image_size, color_mode='rgb', \
classes=classes, class_mode='categorical', shuffle=True, batch_size=32)
# create validation generator
valid_generator = gen_valid.flow_from_directory(valid_dir, image_size, color_mode='rgb', \
classes=classes, class_mode='categorical', shuffle=False, batch_size=32)
test_generator = test_gen.flow_from_directory(test_dir, image_size, color_mode='rgb', \
class_mode=None, shuffle=False, batch_size=32)
return train_generator, valid_generator, test_generator
# +
from keras.applications import xception
from keras import optimizers
from keras.callbacks import ModelCheckpoint
xcp_drivers_id_list = [['p016', 'p072', 'p026', 'p066', 'p075']]
xcp_image_size = (299, 299)
xcp_input_shape = (299, 299, 3)
xcp_csv_names_list = []
#for i in range(2):
i = 0
print ('------------------------------------------------------------------------------------------------------------------')
""" 1. about data """
# create link and remove the old link
train_valid_split(xcp_drivers_id_list[i])
# get generator
train_generator, valid_generator, test_generator = fixed_data_generator(link_train_path, link_valid_path, test_link, xcp_image_size)
# +
""" 2. about model """
# get model
xception_model = get_model(xception.Xception, xcp_input_shape, xception.preprocess_input, 10)
# +
from keras.optimizers import Adam
# compile
weights_file_name = 'xception.fixed.weights.hdf5'
ckpt = ModelCheckpoint(weights_file_name, verbose=1, save_best_only=True, save_weights_only=True)
adam = optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
xception_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# fit
hist_1 = xception_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# +
adam = optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
xception_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
hist_1 = xception_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# -
hist_1 = xception_model.fit_generator(train_generator, len(train_generator), epochs=1,\
validation_data=valid_generator, validation_steps=len(valid_generator), callbacks=[ckpt])
# 可以看出来,xception的单模型表现明显提升,由于时间关系我就不再训练更多的模型进行融合了,但是这肯定是一条可行的道路。
# # 7. 总结
# 通过上述的过程走下来,我们得到了一个不错的模型,在kaggle的排行榜中可以排到top 10%。该项目有多个重点,如下:
#
# * 验证集训练集划分的时候,因为图像数据的特殊性(相似性),所以需要根据司机ID来进行划分,而不应该根据司机状态
#
# * 由于同一个司机的不同状态的图像很相似,所以应该开放尽可能多的层进行训练(本项目中我没有lock任何层)
#
# * 由于是kaggle竞赛,衡量标准仅仅是softmax cross entropy,对模型预测时间没有要求,所以为了提高分数,总是可以采用模型融合的策略
#
# * 为了让模型具有更强的泛化能力,应该加入一些特殊的data augmentation。对于同一状态的图片,将随机的两张图片的左半部分和右半部分或者上半部分和下班部分组合成一张新的图片。这样做确实可以提高单模型的泛化能力,如果加上模型融合的话,分数会进一步提高
| state_farm_distracted_driver_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Sample paths of stochastic processes.
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
sigma = 1. # Standard deviation.
mu = 10. # Mean.
tau = .05 # Time constant.
dt = .001 # Time step.
T = 1. # Total time.
n = int(T / dt) # Number of time steps.
t = np.linspace(0., T, n) # Vector of times.
sigma_bis = sigma * np.sqrt(2. / tau)
sqrtdt = np.sqrt(dt)
x = np.zeros(n)
for i in range(n - 1):
x[i + 1] = x[i] + dt * (-(x[i] - mu) / tau) + sigma_bis * sqrtdt * np.random.randn()
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(t, x, lw=2)
ntrials = 10000
X = np.zeros(ntrials)
# We create bins for the histograms.
bins = np.linspace(-2., 14., 100)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
for i in range(n):
# We update the process independently for
# all trials
X += dt * (-(X - mu) / tau) + \
sigma_bis * sqrtdt * np.random.randn(ntrials)
# We display the histogram for a few points in
# time
if i in (5, 50, 900):
hist, _ = np.histogram(X, bins=bins)
ax.plot((bins[1:] + bins[:-1]) / 2, hist,
{5: '-', 50: '.', 900: '-.', }[i],
label=f"t={i * dt:.2f}")
ax.legend()
# ### Black-Scholes Equation.
# +
sigma = 0.5 # Standard deviation.
mu = 0.05 # Mean.
x = np.zeros(n)
x[0] = 1
# -
for i in range(n - 1):
x[i + 1] = x[i] + dt * (mu*x[i]) + (sigma*x[i]) * sqrtdt * np.random.randn()
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(t, x, lw=2)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
for samples in range(10):
for i in range(n - 1):
x[i + 1] = x[i] + dt * (mu*x[i]) + (sigma*x[i]) * sqrtdt * np.random.randn()
ax.plot(t, x, lw=2)
def wiener_process(x0, n):
w = np.ones(n)*x0
for i in range(1,n):
# Sampling from the Normal distribution
yi = np.random.normal()
# Weiner process
w[i] = w[i-1]+(yi/np.sqrt(n))
return w
# +
x0 = 1
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
for samples in range(100):
#xa = np.zeros(n)
W = wiener_process(x0, n)
#for i in range(n):
xa = x0*np.exp((mu - sigma**2/2)*t+sigma*W)
ax.plot(t, xa, lw=2)
# -
| Simulation/Stochastic_differential_equations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transfer Learning on TPUs
#
# In the <a href="3_tf_hub_transfer_learning.ipynb">previous notebook</a>, we learned how to do transfer learning with [TensorFlow Hub](https://www.tensorflow.org/hub). In this notebook, we're going to kick up our training speed with [TPUs](https://www.tensorflow.org/guide/tpu).
#
# ## Learning Objectives
# 1. Know how to set up a [TPU strategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy?version=nightly) for training
# 2. Know how to use a TensorFlow Hub Module when training on a TPU
# 3. Know how to create and specify a TPU for training
#
# First things first. Configure the parameters below to match your own Google Cloud project details.
import os
os.environ["BUCKET"] = "your-bucket-here"
# ## Packaging the Model
# In order to train on a TPU, we'll need to set up a python module for training. The skeleton for this has already been built out in `tpu_models` with the data processing functions from the pevious lab copied into <a href="tpu_models/trainer/util.py">util.py</a>.
#
# Similarly, the model building and training functions are pulled into <a href="tpu_models/trainer/model.py">model.py</a>. This is almost entirely the same as before, except the hub module path is now a variable to be provided by the user. We'll get into why in a bit, but first, let's take a look at the new `task.py` file.
#
# We've added five command line arguments which are standard for cloud training of a TensorFlow model: `epochs`, `steps_per_epoch`, `train_path`, `eval_path`, and `job-dir`. There are two new arguments for TPU training: `tpu_address` and `hub_path`
#
# `tpu_address` is going to be our TPU name as it appears in [Compute Engine Instances](console.cloud.google.com/compute/instances). We can specify this name with the [ctpu up](https://cloud.google.com/tpu/docs/ctpu-reference#up) command.
#
# `hub_path` is going to be a Google Cloud Storage path to a downloaded TensorFlow Hub module.
#
# The other big difference is some code to deploy our model on a TPU. To begin, we'll set up a [TPU Cluster Resolver](https://www.tensorflow.org/api_docs/python/tf/distribute/cluster_resolver/TPUClusterResolver), which will help tensorflow communicate with the hardware to set up workers for training ([more on TensorFlow Cluster Resolvers](https://www.tensorflow.org/api_docs/python/tf/distribute/cluster_resolver/ClusterResolver)). Once the resolver [connects to](https://www.tensorflow.org/api_docs/python/tf/config/experimental_connect_to_cluster) and [initializes](https://www.tensorflow.org/api_docs/python/tf/tpu/experimental/initialize_tpu_system) the TPU system, our Tensorflow Graphs can be initialized within a [TPU distribution strategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy), allowing our TensorFlow code to take full advantage of the TPU hardware capabilities.
# **TODO**: Complete the code below to setup the `resolver` and define the TPU training strategy.
# +
# %%writefile tpu_models/trainer/task.py
import argparse
import json
import os
import sys
import tensorflow as tf
from . import model
from . import util
def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--epochs',
help='The number of epochs to train',
type=int, default=5)
parser.add_argument(
'--steps_per_epoch',
help='The number of steps per epoch to train',
type=int, default=500)
parser.add_argument(
'--train_path',
help='The path to the training data',
type=str, default="gs://cloud-ml-data/img/flower_photos/train_set.csv")
parser.add_argument(
'--eval_path',
help='The path to the evaluation data',
type=str, default="gs://cloud-ml-data/img/flower_photos/eval_set.csv")
parser.add_argument(
'--tpu_address',
help='The path to the evaluation data',
type=str, required=True)
parser.add_argument(
'--hub_path',
help='The path to TF Hub module to use in GCS',
type=str, required=True)
parser.add_argument(
'--job-dir',
help='Directory where to save the given model',
type=str, required=True)
return parser.parse_known_args(argv)
def main():
"""Parses command line arguments and kicks off model training."""
args = _parse_arguments(sys.argv[1:])[0]
# TODO: define a TPU strategy
resolver = # TODO: Your code goes here
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = # TODO: Your code goes here
with strategy.scope():
train_data = util.load_dataset(args.train_path)
eval_data = util.load_dataset(args.eval_path, training=False)
image_model = model.build_model(args.job_dir, args.hub_path)
model_history = model.train_and_evaluate(
image_model, args.epochs, args.steps_per_epoch,
train_data, eval_data, args.job_dir)
if __name__ == '__main__':
main()
# -
# ## The TPU server
# Before we can start training with this code, we need a way to pull in [MobileNet](https://tfhub.dev/google/imagenet/mobilenet_v2_035_224/feature_vector/4). When working with TPUs in the cloud, the TPU will [not have access to the VM's local file directory](https://cloud.google.com/tpu/docs/troubleshooting#cannot_use_local_filesystem) since the TPU worker acts as a server. Because of this **all data used by our model must be hosted on an outside storage system** such as Google Cloud Storage. This makes [caching](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#cache) our dataset especially critical in order to speed up training time.
#
# To access MobileNet with these restrictions, we can download a compressed [saved version](https://www.tensorflow.org/hub/tf2_saved_model) of the model by using the [wget](https://www.gnu.org/software/wget/manual/wget.html) command. Adding `?tf-hub-format=compressed` at the end of our module handle gives us a download URL.
# !wget https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4?tf-hub-format=compressed
# This model is still compressed, so lets uncompress it with the `tar` command below and place it in our `tpu_models` directory.
# + language="bash"
# rm -r tpu_models/hub
# mkdir tpu_models/hub
# tar xvzf 4?tf-hub-format=compressed -C tpu_models/hub/
# -
# Finally, we need to transfer our materials to the TPU. We'll use GCS as a go-between, using [gsutil cp](https://cloud.google.com/storage/docs/gsutil/commands/cp) to copy everything.
# !gsutil rm -r gs://$BUCKET/tpu_models
# !gsutil cp -r tpu_models gs://$BUCKET/tpu_models
# ## Spinning up a TPU
# Time to wake up a TPU! Open the [Google Cloud Shell](https://console.cloud.google.com/home/dashboard?cloudshell=true) and copy the [ctpu up]((https://cloud.google.com/tpu/docs/ctpu-reference#up)) command below. Say 'Yes' to the prompts to spin up the TPU.
#
# `ctpu up --zone=us-central1-b --tf-version=2.1 --name=my-tpu`
#
# It will take about five minutes to wake up. Then, it should automatically SSH into the TPU, but alternatively [Compute Engine Interface](https://console.cloud.google.com/compute/instances) can be used to SSH in. You'll know you're running on a TPU when the command line starts with `your-username@your-tpu-name`.
#
# This is a fresh TPU and still needs our code. Run the below cell and copy the output into your TPU terminal to copy your model from your GCS bucket. Don't forget to include the `.` at the end as it tells gsutil to copy data into the currect directory.
# !echo "gsutil cp -r gs://$BUCKET/tpu_models ."
# Time to shine, TPU! Run the below cell and copy the output into your TPU terminal. Training will be slow at first, but it will pick up speed after a few minutes once the Tensorflow graph has been built out.
# **TODO**: Complete the code below by adding flags for `tpu_address` and the `hub_path`. Have another look at `task.py` to see how these flags are used. The `tpu_address` denotes the TPU you created above and `hub_path` should denote the location of the TFHub module.
# !echo "python3 -m tpu_models.trainer.task
# TODO: Your code goes here
# TODO: Your code goes here
--job-dir=gs://$BUCKET/flowers_tpu_$(date -u +%y%m%d_%H%M%S)"
# How did it go? In the previous lab, it took about 2-3 minutes to get through 25 images. On the TPU, it took 5-6 minutes to get through 2500. That's more than 40x faster! And now our accuracy is over 90%! Congratulations!
#
# Time to pack up shop. Run `exit` in the TPU terminal to close the SSH connection, and `ctpu delete --zone=us-central1-b --name=my-tpu` in the [Cloud Shell](https://console.cloud.google.com/home/dashboard?cloudshell=true) to delete the TPU instance. Alternatively, it can be deleted through the [Compute Engine Interface](https://console.cloud.google.com/compute/instances).
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| courses/machine_learning/deepdive2/image_classification/labs/4_tpu_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # APEX Demo
#
# ## Develop and Deploy
#
# Load environment variables:
#
. demo-apex.env
# Initialise the Fn project:
mkdir -p $PROJECT_ROOT/build/$APP_NAME && \
cd $PROJECT_ROOT/build/$APP_NAME
# Copy the sample code into the Fn project directory:
cp -r $PROJECT_ROOT/sample-code/$APP_NAME/$FN_NAME $PROJECT_ROOT/build/$APP_NAME/
# Create the application using the subnet specified by the environment variable `SUBNET_OCID`:
fn create app $APP_NAME --annotation oracle.com/oci/subnetIds=[\"$SUBNET_OCID\"]
# Deploy the function:
# + tags=[]
cd $PROJECT_ROOT/build/$APP_NAME/$FN_NAME && \
fn deploy -v --app $APP_NAME
# -
# Test the function:
echo "{ \"first_addend\": 215, \"second_addend\": 751 }" | fn invoke calculator add
| 04-apex-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Best Model Selection
# Now that we have trained all the models, let's select the one we'll use for the application. We'll need to get the performance metrics generated in every model:
import pickle
import pandas as pd
# +
path_pickles = "/home/lnc/0. Latest News Classifier/04. Model Training/Models/"
list_pickles = [
"df_models_gbc.pickle",
"df_models_knnc.pickle",
"df_models_lrc.pickle",
"df_models_mnbc.pickle",
"df_models_rfc.pickle",
"df_models_svc.pickle"
]
df_summary = pd.DataFrame()
for pickle_ in list_pickles:
path = path_pickles + pickle_
with open(path, 'rb') as data:
df = pickle.load(data)
df_summary = df_summary.append(df)
df_summary = df_summary.reset_index().drop('index', axis=1)
# -
# Let's see the summary:
df_summary
# And sort it by **Test Set Accuracy**:
df_summary.sort_values('Test Set Accuracy', ascending=False)
# The Gradient Boosting, Logistic Regression and Random Forest seem to be overfit, so we'll discard them. From the remaining models, we will choose the **Support Vector Machine** since it has the highest Test Set Accuracy.
| 04.Model-Training/12. Best Model Selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Finding Synonyms and Analogies
# :label:`sec_synonyms`
#
# In :numref:`sec_word2vec_pretraining` we trained a word2vec word embedding model
# on a small-scale dataset and searched for synonyms using the cosine similarity
# of word vectors. In practice, word vectors pretrained on a large-scale corpus
# can often be applied to downstream natural language processing tasks. This
# section will demonstrate how to use these pretrained word vectors to find
# synonyms and analogies. We will continue to apply pretrained word vectors in
# subsequent sections.
#
# + origin_pos=1 tab=["mxnet"]
import os
from mxnet import np, npx
from d2l import mxnet as d2l
npx.set_np()
# + [markdown] origin_pos=3
# ## Using Pretrained Word Vectors
#
# Below lists pretrained GloVe embeddings of dimensions 50, 100, and 300,
# which can be downloaded from the [GloVe website](https://nlp.stanford.edu/projects/glove/).
# The pretrained fastText embeddings are available in multiple languages.
# Here we consider one English version (300-dimensional "wiki.en") that can be downloaded from the
# [fastText website](https://fasttext.cc/).
#
# + origin_pos=4 tab=["mxnet"]
#@save
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
#@save
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
#@save
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
#@save
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
'c1816da3821ae9f43899be655002f6c723e91b88')
# + [markdown] origin_pos=5
# We define the following `TokenEmbedding` class to load the above pretrained Glove and fastText embeddings.
#
# + origin_pos=6 tab=["mxnet"]
#@save
class TokenEmbedding:
"""Token Embedding."""
def __init__(self, embedding_name):
self.idx_to_token, self.idx_to_vec = self._load_embedding(
embedding_name)
self.unknown_idx = 0
self.token_to_idx = {
token: idx for idx, token in enumerate(self.idx_to_token)}
def _load_embedding(self, embedding_name):
idx_to_token, idx_to_vec = ['<unk>'], []
data_dir = d2l.download_extract(embedding_name)
# GloVe website: https://nlp.stanford.edu/projects/glove/
# fastText website: https://fasttext.cc/
with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
for line in f:
elems = line.rstrip().split(' ')
token, elems = elems[0], [float(elem) for elem in elems[1:]]
# Skip header information, such as the top row in fastText
if len(elems) > 1:
idx_to_token.append(token)
idx_to_vec.append(elems)
idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
return idx_to_token, np.array(idx_to_vec)
def __getitem__(self, tokens):
indices = [
self.token_to_idx.get(token, self.unknown_idx)
for token in tokens]
vecs = self.idx_to_vec[np.array(indices)]
return vecs
def __len__(self):
return len(self.idx_to_token)
# + [markdown] origin_pos=7
# Next, we use 50-dimensional GloVe embeddings pretrained on a subset of the Wikipedia. The corresponding word embedding is automatically downloaded the first time we create a pretrained word embedding instance.
#
# + origin_pos=8 tab=["mxnet"]
glove_6b50d = TokenEmbedding('glove.6b.50d')
# + [markdown] origin_pos=9
# Output the dictionary size. The dictionary contains $400,000$ words and a special unknown token.
#
# + origin_pos=10 tab=["mxnet"]
len(glove_6b50d)
# + [markdown] origin_pos=11
# We can use a word to get its index in the dictionary, or we can get the word from its index.
#
# + origin_pos=12 tab=["mxnet"]
glove_6b50d.token_to_idx['beautiful'], glove_6b50d.idx_to_token[3367]
# + [markdown] origin_pos=13
# ## Applying Pretrained Word Vectors
#
# Below, we demonstrate the application of pretrained word vectors, using GloVe as an example.
#
# ### Finding Synonyms
#
# Here, we re-implement the algorithm used to search for synonyms by cosine
# similarity introduced in :numref:`sec_word2vec`
#
# In order to reuse the logic for seeking the $k$ nearest neighbors when
# seeking analogies, we encapsulate this part of the logic separately in the `knn`
# ($k$-nearest neighbors) function.
#
# + origin_pos=14 tab=["mxnet"]
def knn(W, x, k):
# The added 1e-9 is for numerical stability
cos = np.dot(W, x.reshape(
-1,)) / (np.sqrt(np.sum(W * W, axis=1) + 1e-9) * np.sqrt(
(x * x).sum()))
topk = npx.topk(cos, k=k, ret_typ='indices')
return topk, [cos[int(i)] for i in topk]
# + [markdown] origin_pos=16
# Then, we search for synonyms by pre-training the word vector instance `embed`.
#
# + origin_pos=17 tab=["mxnet"]
def get_similar_tokens(query_token, k, embed):
topk, cos = knn(embed.idx_to_vec, embed[[query_token]], k + 1)
for i, c in zip(topk[1:], cos[1:]): # Remove input words
print(f'cosine sim={float(c):.3f}: {embed.idx_to_token[int(i)]}')
# + [markdown] origin_pos=18
# The dictionary of pretrained word vector instance `glove_6b50d` already created contains 400,000 words and a special unknown token. Excluding input words and unknown words, we search for the three words that are the most similar in meaning to "chip".
#
# + origin_pos=19 tab=["mxnet"]
get_similar_tokens('chip', 3, glove_6b50d)
# + [markdown] origin_pos=20
# Next, we search for the synonyms of "baby" and "beautiful".
#
# + origin_pos=21 tab=["mxnet"]
get_similar_tokens('baby', 3, glove_6b50d)
# + origin_pos=22 tab=["mxnet"]
get_similar_tokens('beautiful', 3, glove_6b50d)
# + [markdown] origin_pos=23
# ### Finding Analogies
#
# In addition to seeking synonyms, we can also use the pretrained word vector to seek the analogies between words. For example, “man”:“woman”::“son”:“daughter” is an example of analogy, “man” is to “woman” as “son” is to “daughter”. The problem of seeking analogies can be defined as follows: for four words in the analogical relationship $a : b :: c : d$, given the first three words, $a$, $b$ and $c$, we want to find $d$. Assume the word vector for the word $w$ is $\text{vec}(w)$. To solve the analogy problem, we need to find the word vector that is most similar to the result vector of $\text{vec}(c)+\text{vec}(b)-\text{vec}(a)$.
#
# + origin_pos=24 tab=["mxnet"]
def get_analogy(token_a, token_b, token_c, embed):
vecs = embed[[token_a, token_b, token_c]]
x = vecs[1] - vecs[0] + vecs[2]
topk, cos = knn(embed.idx_to_vec, x, 1)
return embed.idx_to_token[int(topk[0])] # Remove unknown words
# + [markdown] origin_pos=25
# Verify the "male-female" analogy.
#
# + origin_pos=26 tab=["mxnet"]
get_analogy('man', 'woman', 'son', glove_6b50d)
# + [markdown] origin_pos=27
# “Capital-country” analogy: "beijing" is to "china" as "tokyo" is to what? The answer should be "japan".
#
# + origin_pos=28 tab=["mxnet"]
get_analogy('beijing', 'china', 'tokyo', glove_6b50d)
# + [markdown] origin_pos=29
# "Adjective-superlative adjective" analogy: "bad" is to "worst" as "big" is to what? The answer should be "biggest".
#
# + origin_pos=30 tab=["mxnet"]
get_analogy('bad', 'worst', 'big', glove_6b50d)
# + [markdown] origin_pos=31
# "Present tense verb-past tense verb" analogy: "do" is to "did" as "go" is to what? The answer should be "went".
#
# + origin_pos=32 tab=["mxnet"]
get_analogy('do', 'did', 'go', glove_6b50d)
# + [markdown] origin_pos=33
# ## Summary
#
# * Word vectors pre-trained on a large-scale corpus can often be applied to downstream natural language processing tasks.
# * We can use pre-trained word vectors to seek synonyms and analogies.
#
#
# ## Exercises
#
# 1. Test the fastText results using `TokenEmbedding('wiki.en')`.
# 1. If the dictionary is extremely large, how can we accelerate finding synonyms and analogies?
#
# + [markdown] origin_pos=34 tab=["mxnet"]
# [Discussions](https://discuss.d2l.ai/t/387)
#
| scripts/d21-en/mxnet/chapter_natural-language-processing-pretraining/similarity-analogy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('..')
from src.analyse_utils_pytorch import plot_history
from src.resnet_pytorch import ResNet
from src.model_utils_pytorch import train_model, save_model
from torch.optim import Adam
model = ResNet()
model, history = train_model(model=model, optimizer=Adam(model.parameters(), lr=1e-4), epochs=800)
| code/experiment/.ipynb_checkpoints/AsianResnetPytorch-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
"""
Alignes images and pads them to get a cuboidal stack
Requires opencv3, rh-logger, rh-config as well"""
import aligner
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Set parameters to run Affine Alignment.
# imgs_dir: h5 file contaning
# conf_fname: file contaning hyperparameter values for feature detector and matcher.
imgs_dir = 'imgs_small.h5'
conf_fname = 'conf.yaml'
process_num = 8
# +
# Except for the helper function, we simply use Adi's code
_, imgs, transforms = aligner.get_transforms(imgs_dir, conf_fname, process_num)
# -
print("imgs.shape: ", imgs.shape)
print("transforms.shape: ", transforms.shape)
# +
import padding
n_imgs, nTransforms = padding.compute_padding(imgs, transforms)
'''
If we apply transforms[i] on imgs[i], we get the aligned image.
But some pixels fall in the negative coordinate space. Some of them are are mapped
to locations beyond the height/width of imgs[i].
To overcome this, we compute minimum and maximum possible coordinate any image of the stack is
is mapped on and add an offset to the transforms[i] to get ntrasnforms[i].
Applying ntrasnforms[i] on imgs[i]
'''
print("n_imgs.shape: ", n_imgs.shape)
print("nTransforms.shape: ", nTransforms.shape)
# -
""" Show original image"""
plt.imshow(imgs[0], cmap='gray')
""" Shoe transformed image"""
plt.imshow(n_imgs[0], cmap='gray')
plt.imshow(n_imgs[-1], cmap='gray')
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from platform import python_version
from typing import List
from collections import defaultdict
print(python_version())
# """
# On Twitter, the algorithmic timeline is sorted such that the tweet ranked higher has a higher likelihood of getting user engagement so as to maximize overall engagement in the app.
#
# The authors of tweets provide one of the key signals in determining users’ likelihood of engaging with a particular tweet. For example, <NAME> tends to generally like tweets authored by <NAME> -- so serving Biz’s tweets to Jack would ensure higher engagement. At the same time, we also have to make sure we are serving a more “diverse” timeline -- showing tweets for different authors -- to improve user experience.
#
# The question is: Given a list of tweets sorted by scores descendingly with their corresponding scores and authors, transform the list such that consecutive tweets cannot be from the same author whenever possible. Always prefer the author whose tweets have the highest score if there are multiple possible authors to be considered.
#
# Conditions:
# 0.0 < Score <= 1.0
# 0 < N (number of tweets) <= 1000
# 0 < K (number of distinct authors) <= 100
#
# Example IO
#
# Each tuple (score, authorId) represents a tweet. Input is a list of tweets with authors ranked in some initial ordering. The output is a list of tweets such that tweets by the same author are not together.
#
# Example 1
# Input: rankedTweetList = [(.6, "A"), (.5, "A"), (.4, "B"), (.3, "B"), (.2, "C"), (.1, "C")]
# Output:rankedTweetListAfterDiversity = [(.6, "A"), (.4, "B"), (.5, "A"), (.3, "B"), (.2, "C"), (.1, "C")]
#
# output = []
# dict = {"A":[0.6, 0.4], "B": []}
#
# Time Complexity
# N - number of tweets, K - number of authors
# - create dict - O(N)
# - loop the list - O(N)
# - first element is the largest O(1)
#
# - build a dictionary which key = author, value = tweet scores -> O(N)
# - loop the list
# -- find the *largest value* for each of the user AND tweet from a different author - O(K)
#
# - output = [A, B]
#
# O(N + N*K)
#
#
# input: AABBCC
#
# Example 2
# Input: rankedTweetList = [(.5, "A"), (.4, "A"), (.3, "A"), (.2, "B"), (.1, "A")]
# Output: rankedTweetListAfterDiversity = [(.5, "A"), (.2, "B"), (.4, "A"), (.3, "A"), (.1, "A")]
#
#
# [(0.9, "A"), (0.8, "A"), (0.2, "B"), (0.1, "C")] - > [(0.9, "A"), (0.2, "B"), (0.8, "A"), (0.1, "C")]
#
# """
#
def re_ranking(rankedTweetList: List[tuple]) -> List[tuple]:
output, lookup, tmp = [], defaultdict(list), dict()
#1. Create a dictionary of author name and a list of its tweet scores
for score, author in rankedTweetList:
lookup[author].append(score)
# 2. Add the first author and its max score to the output and remove it from the dictionary
max_author = max(lookup, key=lambda a: lookup[a])
output.append([lookup[max_author][0],max_author])
lookup[max_author].remove(lookup[max_author][0])
while lookup:
if len(lookup) > 1:
tmp = lookup.copy()
tmp.pop(output[-1][1], None)
else:
tmp = lookup.copy()
max_author = max(tmp, key=lambda a: lookup[a])
output.append((lookup[max_author][0], max_author))
lookup[max_author].remove(lookup[max_author][0])
if len(lookup[max_author]) < 1:
del lookup[max_author]
return output, lookup
re_ranking([(.6, "A"), (.5, "A"), (.4, "B"), (.3, "B"), (.2, "C"), (.1, "C")])
re_ranking([(.9, "A"), (.8, "A"), (.2, "B"), (.1, "C")])
re_ranking([(.5, "A"), (.4, "A"), (.3, "A"), (.2, "B"), (.1, "A")])
| notebooks/interviews/Twitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## callbacks.mem
# Memory profiling callbacks.
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.callbacks.mem import *
# + hide_input=true
show_doc(PeakMemMetric)
# -
# [`PeakMemMetric`](/callbacks.mem.html#PeakMemMetric) is a memory profiling callback.
#
# Here is how you can use it:
#
# ```
# learn = cnn_learner(data, model, metrics=[accuracy], callback_fns=PeakMemMetric)
# learn.fit_one_cycle(3, max_lr=1e-2)
# ```
#
# and a sample output:
# ```
# Total time: 00:59
# epoch train_loss valid_loss accuracy cpu used peak gpu used peak
# 1 0.325806 0.070334 0.978800 0 2 80 6220
# 2 0.093147 0.038905 0.987700 0 2 2 914
# 3 0.047818 0.027617 0.990600 0 2 0 912
# ```
#
# The last four columns are deltas memory usage for CPU and GPU (in MBs).
#
# * The "used memory" columns show the difference between memory usage before and after each epoch.
# * The "peaked memory" columns how much memory overhead the epoch used on top of used memory. With the rare exception of gpu measurements, where if "used memory" delta is negative, then it's calculated as a straight difference between the peak memory and the used memory at the beginning of the epoch. Also see
#
# For example in the first row of the above sample example it shows `used=80`, `peak=6220`. It means that during the execution of this thread the application used a maximum of 6300 MBs (`80+6220`), but then most of that memory was released, keeping only 80 MBs tied up. You can then see in the following epochs that while the application still uses temporary memory while execution, but it releases almost all of it at the end of its work.
#
# Also, it's very important to know that pytorch's memory allocator can work with less memory, so it doesn't mean that it needs 6300 MB to be able to run the first epoch. It will do with less, but it will just be slightly slower on the first epoch. For more details please see [this explanation](dev/gpu.html#peak-memory-usage).
#
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
| docs_src/callbacks.mem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supercritical Steam Cycle Example
#
# This example uses Jupyter Lab or Jupyter notebook, and demonstrates a supercritical pulverized coal (SCPC) steam cycle model. See the ```supercritical_steam_cycle.py``` to see more information on how to assemble a power plant model flowsheet. Code comments in that file will guide you through the process.
# ## Model Description
#
# The example model doesn't represent any particular power plant, but should be a reasonable approximation of a typical plant. The gross power output is about 620 MW. The process flow diagram (PFD) can be shown using the code below. The initial PFD contains spaces for model results, to be filled in later.
#
# To get a more detailed look at the model structure, you may find it useful to review ```supercritical_steam_cycle.py``` first. Although there is no detailed boiler model, there are constraints in the model to complete the steam loop through the boiler and calculate boiler heat input to the steam cycle. The efficiency calculation for the steam cycle doesn't account for heat loss in the boiler, which would be a result of a more detailed boiler model.
# +
# pkg_resources is used here to get the svg information from the
# installed IDAES package
import pkg_resources
from IPython.display import SVG, display
# Get the contents of the PFD (which is an svg file)
init_pfd = pkg_resources.resource_string(
"idaes.power_generation.flowsheets.supercritical_steam_cycle",
"supercritical_steam_cycle.svg"
)
# Make the svg contents into an SVG object and display it.
display(SVG(init_pfd))
# -
# ## Initialize the steam cycle flowsheet
#
# This example is part of the ```idaes``` package, which you should have installed. To run the example, the example flowsheet is imported from the ```idaes``` package. When you write your own model, you can import and run it in whatever way is appropriate for you. The Pyomo environment is also imported as ```pyo```, providing easy access to Pyomo functions and classes.
#
# The supercritical flowsheet example main function returns a Pyomo concrete mode (m) and a solver object (solver). The model is also initialized by the ```main()``` function.
# +
import pyomo.environ as pyo
from idaes.power_generation.flowsheets.supercritical_steam_cycle import (
main,
create_stream_table_dataframe,
pfd_result,
)
m, solver = main()
# -
# Inside the model, there is a subblock ```fs```. This is an IDAES flowsheet model, which contains the supercritical steam cycle model. In the flowsheet, the model called ```turb``` is a multistage turbine model. The turbine model contains an expression for total power, ```power```. In this case the model is steady-state, but all IDAES models allow for dynamic simulation, and contain time indexes. Power is indexed by time, and only the "0" time point exists. By convention, in the IDAES framework, power going into a model is positive, so power produced by the turbine is negative.
#
# The property package used for this model uses SI (mks) units of measure, so the power is in Watts. Here a function is defined which can be used to report power output in MW.
# +
# Define a function to report gross power output in MW
def gross_power_mw(model):
# pyo.value(m.fs.turb.power[0]) is the power consumed in Watts
return -pyo.value(model.fs.turb.power[0])/1e6
# Show the gross power
gross_power_mw(m)
# -
# ## Change the model inputs
#
# The turbine in this example simulates partial arc admission with four arcs, so there are four throttle valves. For this example, we will close one of the valves to 25% open, and observe the result.
m.fs.turb.throttle_valve[1].valve_opening[:].value = 0.25
# Next, we re-solve the model using the solver created by the ```supercritical_steam_cycle.py``` script.
solver.solve(m, tee=True)
# Now we can check the gross power output again.
gross_power_mw(m)
# ## Creating a PFD with results and a stream table
#
# A more detailed look at the model results can be obtained by creating a stream table and putting key results on the PFD. Of course, any unit model or stream result can be obtained from the model.
# +
# Create a Pandas dataframe with stream results
df = create_stream_table_dataframe(streams=m._streams, orient="index")
# Create a new PFD with simulation results
res_pfd = pfd_result(m, df, svg=init_pfd)
# -
# Display PFD with results.
display(SVG(res_pfd))
# Display the stream table.
df
| idaes/power_generation/flowsheets/supercritical_steam_cycle/supercritical_steam_cycle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Reading XRD data
# +
from XRDXRFutils import DataXRD
from numpy import arange,linspace
from matplotlib.pyplot import plot,subplots,xlim,ylim,hlines,vlines,xlabel,ylabel,imshow,figure
# -
data = DataXRD()
path = '/home/zdenek/Projects/pyMaXRDXRF/M491/ProfiloXRD/'
data.calibrate_from_file('/home/zdenek/Projects/pyMaXRDXRF/M491/ProfiloXRD/calibration.ini')
data.calibration.fce
data.calibration.plot()
# %%time
data.read_params(path + 'Scanning_Parameters.txt').read(path)
data.params
data.save_h5()
data.load_h5(path + 'xrd.h5')
fig,ax = subplots(2,2,figsize=(15,9))
ax[0,0].imshow(data.data.mean(-1))
ax[0,1].imshow(data.data.std(-1))
ax[1,0].imshow(data.data.min(-1))
ax[1,1].imshow(data.data.max(-1))
# +
figure(figsize=(12,4))
plot(data.x,data.data[50,50])
xlim(data.x[0],data.x[-1])
xlabel(r'angle $\theta [^\circ]$')
ylabel(r'counts')
| read_data_XRD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Run script to set up environment pip install packages*
# + tags=[]
# # !pip install --upgrade azureml-sdk azureml-widgets
# -
# # Automated ML
#
# TODO: Import Dependencies. In the cell below, import all the dependencies that you will need to complete the project.
# + gather={"logged": 1598423888013} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.core.dataset import Dataset
from azureml.pipeline.steps import AutoMLStep
# -
# ## Initialise the workspace
# have to initialise first in order to get and upload the data if not already created
ws = Workspace.from_config()
# ## Dataset
# +
# Try to load the dataset from the Workspace. Otherwise, create it from the file
# NOTE: update the key to match the dataset name
found = False
key = "job-leaver-aug-small"
local_path = './data_blob_upload/job_leaver_aug_small.csv'
target_path = "job-leaver-aug-small-automl-data"
description_text = "This is an extract from the original Kaggle source: https://www.kaggle.com/arashnic/hr-analytics-job-change-of-data-scientists."
if key in ws.datasets.keys():
found = True
dataset = ws.datasets[key]
print("dataset found")
if not found:
# I have changed the import method because uploading the csv conventionally created some issues
# Importing into pandas dataframe and then uploading the dataset stopped the previous import issues
# upload to pandas dataframe
df = pd.read_csv(local_path)
# get the datastore to upload prepared data
datastore = ws.get_default_datastore()
# upload the local file from src_dir to the target_path in datastore
dataset = Dataset.Tabular.register_pandas_dataframe(name=key, dataframe=df, target=(datastore, target_path), show_progress=True)
# create a dataset referencing the cloud location
dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, (target_path))], encoding='utf8', header=True)
dataset = dataset.register(
workspace=ws,
name=key,
description=description_text)
print("dataset uploaded and registered")
# -
# ## Experiment
# set up the experiment
# + gather={"logged": 1598423890461} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
experiment_name = 'data-science-job-leavers-small-automl'
project_folder = './project_automl'
experiment=Experiment(ws, experiment_name)
# +
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.core.compute_target import ComputeTargetException
amlcompute_cluster_name = "standard-ds12-v2"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',
vm_priority = 'lowpriority',
min_nodes=0,
max_nodes=5)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True, min_node_count = 1, timeout_in_minutes = 10)
# -
# ## AutoML Configuration
#
# TODO: Explain why you chose the automl settings and cofiguration you used below.
# + gather={"logged": 1598429217746} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
automl_settings = {
"experiment_timeout_minutes": 15,
"max_concurrent_iterations": 5,
"primary_metric" : 'AUC_weighted'
}
automl_config = AutoMLConfig(compute_target=compute_target,
task = "classification",
training_data=dataset,
label_column_name="target",
path = project_folder,
enable_early_stopping= True,
featurization= 'auto',
debug_log = "automl_errors.log",
**automl_settings
)
# + gather={"logged": 1598431107951} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# TODO: Submit your experiment
remote_run = experiment.submit(automl_config)
# -
# ## Run Details
#
# OPTIONAL: Write about the different models trained and their performance. Why do you think some models did better than others?
#
# TODO: In the cell below, use the `RunDetails` widget to show the different experiments.
# + gather={"logged": 1598431121770} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
from azureml.widgets import RunDetails
# Show the status in the notebook as the experiment runs
RunDetails(remote_run).show()
remote_run.wait_for_completion()
# -
# ## Best Model
#
# TODO: In the cell below, get the best model from the automl experiments and display all the properties of the model.
#
#
best_run, fitted_model = remote_run.get_output()
best_run_metrics = best_run.get_metrics()
print(f"Best run id: {best_run.id}")
for metric_name in best_run_metrics:
metric = best_run_metrics[metric_name]
print(metric_name, metric)
# ## Model Deployment
#
# Remember you have to deploy only one of the two models you trained.. Perform the steps in the rest of this notebook only if you wish to deploy this model.
#
# TODO: In the cell below, register the model, create an inference config and deploy the model as a web service.
# I closed the notebook and used this script to allow me
# to continue the work without rerunning all pf the previous code
# <DEV>
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
run_id = "AutoML_0d1cb3c5-44c5-4bee-81a5-a3646969425a"
remote_run = ws.get_run(run_id)
run = remote_run
best_run, fitted_model = run.get_output()
# </DEV>
print(fitted_model)
run = remote_run
model_name = best_run.properties["model_name"]
model = run.register_model(model_name=model_name, description="job-leaver automl", tags=None)
# +
import os
folder_name = 'job_leaver_service'
# Create a folder for the web service files
experiment_folder = './' + folder_name
os.makedirs(experiment_folder, exist_ok=True)
print(folder_name, 'folder created.')
# -
from azureml.core import Model
from azureml.core.webservice import AciWebservice, Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.automl.core.shared import constants
folder_name = 'job_leaver_service'
entry_script = os.path.join(folder_name, "score.py")
env_file = os.path.join(folder_name, "env.yml")
best_run.download_file("outputs/scoring_file_v_1_0_0.py", entry_script)
best_run.download_file(constants.CONDA_ENV_FILE_PATH, env_file)
# +
env = Environment.from_conda_specification(name="env", file_path=env_file)
inference_config = InferenceConfig(entry_script=entry_script, environment=env)
aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, description="job-leavers classification")
service = Model.deploy(ws,
name="job-leaver-service",
models=[model],
inference_config=inference_config,
deployment_config=aci_config)
service.wait_for_deployment(True)
# + [markdown] gather={"logged": 1598431657736} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# TODO: In the cell below, send a request to the web service you deployed to test it.
# + gather={"logged": 1598432707604} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
import re
import pandas as pd
import json
import requests
# get the associated scoring_uri from the workspace
# there must be a better way but ran out of time to find it
text = str(ws.webservices['job-leaver-service'])
try:
scoring_uri = re.search('tags=(http://[^,]+)', text).group(1)
except AttributeError:
scoring_uri = None
scoring_uri
# get sample data by importing the first record of the kaggle test file and outputting it as a json object
data = {"data": [{k: [i for i in v.values()][0] for (k,v) in pd.read_csv('data_archive/aug_test.csv', nrows=1).to_dict().items()}]}
# check if the scoring_uri has been found and submit a request to it,
# using the converted data extracted fromt the kaggle sample data
if scoring_uri:
# Convert to JSON string
input_data = json.dumps(data)
with open("data.json", "w") as _f:
_f.write(input_data)
# Set the content type
headers = {"Content-Type": "application/json"}
# Make the request and display the response
resp = requests.post(scoring_uri, input_data, headers=headers)
print(resp.json())
# + [markdown] gather={"logged": 1598432765711} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# TODO: In the cell below, print the logs of the web service and delete the service
# + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
print(service.get_logs())
# -
service.delete()
model.delete()
| automl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="nPI5opqcP-d9" outputId="f313ff0b-cd56-402c-bc3e-91078ee4baf4" colab={"base_uri": "https://localhost:8080/"}
# !git clone https://github.com/millermuttu/torch_soft.git
# !pip install -q -U albumentations
# !echo "$(pip freeze | grep albumentations) is successfully installed"
# + id="EheSSt0lQItS" outputId="aabe7f84-b7c3-4a85-895a-b94ba155374c" colab={"base_uri": "https://localhost:8080/"}
# !wget http://cs231n.stanford.edu/tiny-imagenet-200.zip
# !unzip /content/tiny-imagenet-200.zip
# + id="GJDz44os_FJK" outputId="0b746949-6984-45b6-8b93-38e74f44ad07" colab={"base_uri": "https://localhost:8080/"}
# cd '/content/torch_soft'
# + id="kzpPZ43DQg2K"
import torch
import dataset
import models
import utils
import visual
import gradcam
import numpy as np
from torchvision import transforms
from train_test import train, test, train_ocp
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torchvision.datasets as datasets
import torch.utils.data as data
from torch.utils.data import DataLoader
# + id="HlE06ApaRHqK" outputId="5ec5f3a6-f6d5-4d69-e367-117912ae686c" colab={"base_uri": "https://localhost:8080/"}
cuda, device = utils.misc.initialize_device(utils.config.SEED)
train_path = "/content/tiny-imagenet-200/train"
val_path = "/content/tiny-imagenet-200/val"
# + id="0OXz6boHRXaN"
# setting the configs
utils.config.BATCH_SIZE = 512
utils.config.EPOCHS = 5
utils.config.NUM_WORKERS = 16
# + id="GQOdNy3aSTsE"
# normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
# # [0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
augmentation = transforms.RandomApply([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.RandomResizedCrop(64)], p=.8)
training_transform = transforms.Compose([
transforms.Lambda(lambda x: x.convert("RGB")),
augmentation,
transforms.ToTensor()])
valid_transform = transforms.Compose([
transforms.Lambda(lambda x: x.convert("RGB")),
transforms.ToTensor()])
# + id="5VsIDAhRSXXY"
in_memory = False
# + id="2WSoRHPAWYWW"
root = '/content/tiny-imagenet-200'
training_set = dataset.TinyImageNet(root, 'train', transform=training_transform, in_memory=in_memory)
valid_set = dataset.TinyImageNet(root, 'val', transform=valid_transform, in_memory=in_memory)
# + id="_bWFqQ-5WfTb"
trainloader = DataLoader(training_set, batch_size=utils.config.BATCH_SIZE, shuffle=True, num_workers=utils.config.NUM_WORKERS)
validloader = DataLoader(valid_set, batch_size=utils.config.BATCH_SIZE, num_workers=utils.config.NUM_WORKERS)
# + id="Q1cbsStPzOhC" outputId="fda88b26-f432-4b58-d620-b177aff67890" colab={"base_uri": "https://localhost:8080/"}
model = models.ResNet18(num_classes=len(training_set.label_texts)).to(device)
utils.misc.summary(model, (3, 64, 64))
# + id="pNGF4sv2bQ-F"
# model = models.NaiveResNet(num_classes=200)
# model = model.to(device)
# + id="kkBSgU_AXxls"
loss_fn = utils.misc.cross_entropy_loss_fn()
optimizer = utils.misc.sgd_optimizer(model,lr=1E-6)
# + id="p-jZ7lcMYJ6Q" outputId="30718549-98b7-46e8-cc69-1488d32b8271" colab={"base_uri": "https://localhost:8080/", "height": 82, "referenced_widgets": ["25a43ec76b2a4a8e981c178e341ca761", "e38fdc2562c5477bac48e29ba3c309fd", "ec22002a96cc4782824cbd30dfabfa2f", "ff570f9c16ac4263922ddb8c99293c1c", "5ba2eb52d3244c1bb0aaed8d1ff3f8a1", "361c06a7300c411cb079bcf853c96472", "<KEY>", "3b2c6039ed244afaa16f72a03eab7373"]}
lr_finder = utils.lr_finder.LRFinder(model, optimizer, loss_fn, device=device)
lr_finder.range_test(trainloader, end_lr=100, num_iter=100)
# + id="yZzPejqZYqjj" outputId="45476597-e0c5-46f9-a537-a59d78e162ff" colab={"base_uri": "https://localhost:8080/", "height": 342}
lr_finder.plot()
# + id="bRwQvNjhjCeF"
# reset the lr graph
lr_finder.reset()
# + id="abGl4RFujIOH" outputId="818366c4-0fa9-472a-8656-533c0891f6e5" colab={"base_uri": "https://localhost:8080/"}
loss_fn = utils.misc.cross_entropy_loss_fn()
optimizer = utils.misc.sgd_optimizer(model,lr=1)
scheduler = utils.misc.one_cycle_lr(optimizer=optimizer,max_lr=5E-1,epochs=utils.config.EPOCHS,steps_per_epoch=len(trainloader),pct_start=0.5,div_factor=10,final_div_factor=5)
# scheduler = utils.misc.ReduceLROnPlateau(optimizer=optimizer,patience=2,min_lr=1E-6,verbose=True)
# scheduler = utils.misc.StepLR_scheduler(optimizer,10)
if utils.config.DEBUG == True:
utils.config.EPOCHS = 15
test_loss = []
test_accuracy = []
train_losses = []
train_accuracy = []
misclassified_imgs = []
for epoch in range(utils.config.EPOCHS):
print(f"Running Epoch {epoch+1}/{utils.config.EPOCHS}\n")
train_ocp(model, trainloader, optimizer,scheduler, loss_fn, device, train_losses, train_accuracy)
# train(model, trainloader, optimizer, loss_fn, device, train_losses, train_accuracy)
# scheduler.step()
tl = test(model, validloader, loss_fn, device, 25, test_loss, test_accuracy, misclassified_imgs)
# scheduler.step(test_loss[epoch])
# + id="yikpfeqRjOE8"
visual.plot_metrics([{'metric':test_accuracy, 'label':'Validation Accuracy'},
{'metric':train_accuracy, 'label':'Training Accuracy'}], "Accuracy")
# + id="RG2tcUiuflKF"
| examples/tiny_imagenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
#matrix math
import numpy as np
#graphing
import matplotlib.pyplot as plt
#graphing animation
#import matplotlib.animation as animation
import pandas as pd
#from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
#from collections import Counter
# + deletable=true editable=true
attr = ['sequence_name','Mcg','Gvh','Lip','Chg','Aac','Alm1','Alm2','Site']
data = pd.read_csv('ecoli.txt',header=None,delim_whitespace=True,names=attr)
#data = data.drop('sequence_name',1)
data.head()
# + deletable=true editable=true
ecoli_site = data['Site']
# + deletable=true editable=true
tmp_ecoli=list(set(ecoli_site))
tmp_ecoli
# + deletable=true editable=true
features = [i for i in range(len(tmp_ecoli))]
features
# + deletable=true editable=true
map_features = {v:k for k,v in zip(features,tmp_ecoli)}
map_features
# + deletable=true editable=true
tmp2_ecoli=list(ecoli_site)
# + deletable=true editable=true
tmp = []
for i in tmp2_ecoli:
if i in map_features:
tmp.append(map_features[i])
# + deletable=true editable=true
data['Site'] = tmp
# + deletable=true editable=true
data.head()
# + deletable=true editable=true
data.to_json('tm.json',date_format='dd-mm-yy')
data.to_csv('tm.txt')
dataset = pd.read_csv('tm.txt',index_col=0)
dataset.drop(['sequence_name'],axis=1,inplace=True)
dataset.to_csv('finalecoli.txt')
class_ = dataset['Site']
dataset.drop(['Site'],axis=1,inplace=True)
dataset
# -
dataset.shape
features = dataset.as_matrix
features()
# + deletable=true editable=true
dataset
# -
class_
def euclidian(a, b):
return np.linalg.norm(a-b)
def kmeans(k,dataset, epsilon=0, distance='euclidian'):
print("Working...")
history_centroids = []
if distance == 'euclidian':
dist_method = euclidian
# dataset = dataset[:, 0:dataset.shape[1] - 1]
num_instances, num_features = dataset.shape
prototypes = dataset[np.random.randint(0, num_instances - 1, size=k)]
history_centroids.append(prototypes)
prototypes_old = np.zeros(prototypes.shape)
belongs_to = np.zeros((num_instances, 1))
norm = dist_method(prototypes, prototypes_old)
iteration = 0
while norm > epsilon:
iteration += 1
norm = dist_method(prototypes, prototypes_old)
prototypes_old = prototypes
for index_instance, instance in enumerate(dataset):
dist_vec = np.zeros((k, 1))
for index_prototype, prototype in enumerate(prototypes):
dist_vec[index_prototype] = dist_method(prototype,instance)
belongs_to[index_instance, 0] = np.argmin(dist_vec)
tmp_prototypes = np.zeros((k, num_features))
for index in range(len(prototypes)):
instances_close = [i for i in range(len(belongs_to)) if belongs_to[i] == index]
prototype = np.mean(dataset[instances_close], axis=0)
# prototype = dataset[np.random.randint(0, num_instances, size=1)[0]]
tmp_prototypes[index, :] = prototype
prototypes = tmp_prototypes
history_centroids.append(tmp_prototypes)
# plot(dataset, history_centroids, belongs_to)
print(prototypes)
print("DONE!")
return prototypes, history_centroids, belongs_to
k=8
centroids, history_centroids, belongs_to = kmeans(k,features())
for index,centroid in enumerate(centroids):
print("#{} C({})".format(index,centroid))
def predict(centroids,vector_test):
tmp_dist = 9999
dist_vec = np.zeros((k,1))
for index_centroid,centroid in enumerate(centroids):
if euclidian(centroid,vector_test) <= tmp_dist:
tmp_dist = euclidian(centroid,vector_test)
closest_centroid = centroid
return np.argmin(np.sum((centroids - vector_test)**2, axis=1)),closest_centroid
trying = [0.31,0.36,0.48,0.5,0.58,0.94,0.94]
belong_to,closest_centroid=predict(centroids,trying)
belong_to,closest_centroid
map_features
belongs_to
features()
| ecoli_kmeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import psycopg2
conn = psycopg2.connect(dbname="anyway", user="anyway", password="<PASSWORD>", host="db")
cur = conn.cursor()
query = "SELECT * FROM waze_alerts ORDER BY created_at ASC"
cur.execute(query)
waze_alerts = cur.fetchall()
column_names = [desc[0] for desc in cur.description]
for column in column_names:
print(column)
import pandas as pd
df_waze_alerts = pd.DataFrame(waze_alerts, columns = column_names)
df_waze_alerts.head()
df_waze_alerts.tail()
df_waze_alerts.set_index('id',inplace=True)
df_waze_accidents = df_waze_alerts[df_waze_alerts["alert_type"] == 'ACCIDENT']
df_waze_accidents.shape
df_waze_accidents.head()
timestamp = df_waze_accidents.at[23,'created_at']
timestamp.date()
waze_accident_dates = df_waze_accidents['created_at'].apply(lambda created_at: created_at.date())
waze_accident_dates
waze_accident_dates.unique()
query = "SELECT DISTINCT alert_type FROM waze_alerts"
cur.execute(query)
alert_types = cur.fetchall()
alert_types
df_waze_accidents['confidence'].value_counts()
df_waze_accidents['number_thumbs_up'].value_counts()
df_waze_accidents['report_rating'].value_counts()
df_waze_accidents['reliability'].value_counts()
df_waze_accidents['street'].value_counts()
df_waze_accidents['road_type'].value_counts()
df_waze_accidents['city'].unique()
df_waze_accidents.at[23,'city']
# accidents from 31.08.2019
import datetime
date = datetime.date(2019, 8, 31)
is_from_31_08_2019 = df_waze_accidents['created_at'].apply(lambda created_at: created_at.date()) == date
is_from_31_08_2019
accidents_from_31_08_2019 = df_waze_accidents[is_from_31_08_2019]
accidents_from_31_08_2019
# accidents from 01.09.2019
date = datetime.date(2019, 9, 1)
is_from_01_09_2019 = df_waze_accidents['created_at'].apply(lambda created_at: created_at.date()) == date
accidents_from_01_09_2019 = df_waze_accidents[is_from_01_09_2019]
accidents_from_01_09_2019.drop_duplicates()
accidents_from_01_09_2019['street'].unique()
accidents_between_00_03 = accidents_from_01_09_2019[is_time_between_00_03]
accidents_between_00_03.head()
accidents_between_00_03
accidents_between_00_03['street'].unique()
accidents_between_00_03.loc[9216]
# accidents from 02.09.2019
date = datetime.date(2019, 9, 2)
is_from_02_09_2019 = df_waze_accidents['created_at'].apply(lambda created_at: created_at.date()) == date
accidents_from_02_09_2019 = df_waze_accidents[is_from_02_09_2019]
accidents_from_02_09_2019.drop_duplicates()
accidents_from_02_09_2019['street'].unique()
accidents_from_02_09_2019['city'].unique()
accidents_from_02_09_2019.drop_duplicates()['city'].value_counts()
accidents_from_02_09_2019_no_duplicates = accidents_from_02_09_2019.drop_duplicates()
accidents_from_02_09_2019_tel_aviv = accidents_from_02_09_2019_no_duplicates[accidents_from_02_09_2019_no_duplicates['city'] == 'תל אביב - יפו']
accidents_from_02_09_2019_tel_aviv
accidents_from_02_09_2019_tel_aviv['street'].unique()
accidents_from_02_09_2019_tel_aviv['street'].value_counts()
accidents_from_02_09_2019_Ibn_Gabirol_accidents = accidents_from_02_09_2019_tel_aviv[accidents_from_02_09_2019_tel_aviv['street'] == 'אבן גבירול']
accidents_from_02_09_2019_Ibn_Gabirol_accidents
df_waze_jams = df_waze_alerts[(df_waze_alerts["alert_type"] == 'JAM') & (df_waze_alerts['city'] == 'תל אביב - יפו') & (df_waze_alerts['street'] == 'אבן גבירול') & (df_waze_alerts['created_at'] > '2019-09-02 08:02:06') & (df_waze_alerts['created_at'] < '2019-09-02 12:00:00')]
df_waze_jams
| jupyter/work/waze_accidents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# DATASET ORIGINAL : https://archive.ics.uci.edu/ml/datasets/Activity+Recognition+system+based+on+Multisensor+data+fusion+(AReM)
#
#
import pandas as pd
file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter04/Dataset/activity.csv'
df = pd.read_csv(file_url)
df.head()
# Cada fila representar la actividad que realizo una persona y el nombre de la actividad es la de la columna 'Activity', las otras 6 caracteristicas son mediciones de distintos sensores.
#
# El objetivo es predecir que actividad se realizo a partir de las mediciones de los distintos sensores.
# pop() Elimina la columna del dataFrame, y la almacena en la variable de retorno
target = df.pop('Activity')
# Una vez separado el dataFrame, pasamos a generar nuestros sets de entramiento y test,
# con un 33% de tamaño para el set de entrenamiento
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df, target, test_size=0.33, random_state=42)
# Importamos la clase para crear una instancia del modelo e inicializarla
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state=1)
# Ajustamos el modelo a los datos de entrenamiento, en este paso es donde el modelo intenta aprender la relacion entre la variable de respuesta y las variables indepedneites y guardar los parametros aprendidos.
rf_model.fit(X_train, y_train)
#Podemos ver las predicciones para las variables independientes de entramiento
preds = rf_model.predict(X_train)
preds
# El entrenar el modelo es una de las partes mas sencilla, el reto real del proceso de Machine Learning es donde tenemos que evaluar el desempeño de nuestro modelo y afinarlo
# ### Evaluar el desempeño del modelo
#Como primer metrica usaremos la exactitud, que es el #predicciones correctas / #predicciones Tot
from sklearn.metrics import accuracy_score
accuracy_score(y_train, preds)
# Esto nos dice que nuestro modelo tuvo una exactitud de 99%, esto seria casi perfecto, pero no sabemos si este resultado sera el mismo para cuando lo probemos en nuevos datos.
#
# ¿Como saber si nuestro modelo se desempeñara bien para datos no conocidos?
# Para esto podemos usar nuestro set de test
test_preds = rf_model.predict(X_test)
accuracy_score(y_test, test_preds)
# Se puede ver que hay una diferencia del 20% de desempeño aproximadamente, esto quiere decir que nuestro modelo se esta sobreajustando a los datos de entrenamiento y no esta generalizando bien para cualquier dato, para esto nos podria ayudar el ajuste de los hyperparametros del modelo.
| Chapter-04.MultiClass_RandomForest/RandomForestExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
#load dataset
df=pd.read_csv("data.csv")
df.head(20)
x=df.iloc[:,:-1].values #iloc is used to split the data
y=df.iloc[:,3].values
#handling the missing values using scikit learn
from sklearn.preprocessing import Imputer
imputer=Imputer(missing_values='NaN',strategy='mean',axis=0)
imputer=imputer.fit(x[:,1:3])
x[:,1:3]=imputer.transform(x[:,1:3])
x
#handling categorical features
#categorical variables
df.head()
# +
#Encoded categorical values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_x=LabelEncoder()
x[:,0]=labelencoder_x.fit_transform(x[:,0])
onehotencoder_x= OneHotEncoder(categorical_features=[0])
x=onehotencoder_x.fit_transform(x).toarray()
x
labelencoder_y=LabelEncoder()
y=labelencoder_y.fit_transform(y)
y
# +
#splitting data into training and test
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
# -
df.head()
# +
#feature scalling
from sklearn.preprocessing import StandardScaler
SS_x=StandardScaler()
x_train=SS_x.fit_transform(x_train)
x_test=SS_x.fit_transform(x_test)
x_train
#what we learn yet from this?
#independent and dependent variable
#handling missing values
#encode categorical variables
#split data into training and test
#feature scaler
# -
| ML by ittools/Data_Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ws]
# language: python
# name: conda-env-ws-py
# ---
# +
# %matplotlib inline
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
from pytrackmate import trackmate_peak_import
# -
fname = "FakeTracks.xml"
spots = trackmate_peak_import(fname)
spots.head()
plt.scatter(spots["x"], spots["y"], s=spots["w"] * 50)
| notebooks/Trackmate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is code for
# 1. running our fmri scans through an autoencoder
# 2. encoding our fmri data from 4d scans to 1d arrays with a length of 1000
# 3. predicting schizophrenia with the encoded scans
#
# right now the predictions are still no better than random chance. But this is a template for how to autoencoder could work. We still need to work on preprocessing the fmri data before it even gets to the autoencoder. The data is either too noisy or too big for the autoencoder to accuratly condense the information into something that can accuratly predict schizophrenia. One solution would also be to make the autoencoder way larger, but since the input layer has a vector length of over 3 million, even small increases make the autoencoder way larger quickly. This one has 6 billion total weight and bias parameters. Our best option at the moment would be to work on preprocessing before the autoencoder.
# +
import os
import numpy as np
from nibabel.testing import data_path
import nibabel as nib
import nilearn
import keras
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
# %matplotlib inline
from IPython.display import display
# -
# To load these data files, look at other files in repository,
XData = np.load('/Users/KJP/Desktop/neural nets/Codes/XData.npy')
YData = np.load('/Users/KJP/Desktop/neural nets/Codes/YData.npy')
# fetch dataset
dataset = nilearn.datasets.fetch_cobre(n_subjects=146, data_dir="/Users/KJP/Desktop/neural nets/Cobre Dataset2", url=None, verbose=1)
file_paths = dataset["func"]
confounds = dataset["confounds"]
file_paths.sort() #sort file names by alphabetical order, which will result in sorting by patient number
confounds.sort()
del file_paths[74] # number 74 is misisng samples so it needs to be removed
del confounds[74]
# #fmri scans, l, h, w, time stamps for each scan
XData.shape
# standardize XData into X_scaled
X_scaled = XData.astype('float32').reshape(XData.shape[0],XData.shape[1]*XData.shape[2]*XData.shape[3]*XData.shape[4])
X_scaled = X_scaled - X_scaled.mean()
X_scaled = X_scaled / X_scaled.max()
#Split X_scaled and YData into testing and training
x_train = X_scaled[:115]
x_test = X_scaled[115:]
y_train = YData[:115]
y_test = YData[115:]
display(x_train.shape)
display(x_test.shape)
display(y_train.shape)
display(y_test.shape)
# +
#Make autoencoder network
input_dim = x_train.shape[1]
encoding_dim = 200
autoencoder = keras.Sequential()
# Encoder Layers
autoencoder.add(Dense(1000, input_shape=(input_dim,), activation='relu'))
autoencoder.add(Dense(500, activation='relu'))
autoencoder.add(Dense(200, activation='relu'))
autoencoder.add(Dense(encoding_dim, activation='relu'))
# Decoder Layers
autoencoder.add(Dense(200, activation='relu'))
autoencoder.add(Dense(500, activation='relu'))
autoencoder.add(Dense(1000, activation='relu'))
autoencoder.add(Dense(input_dim, activation='sigmoid'))
autoencoder.summary()
# +
# Make seperate encoder network from the autoencoder
input_img = Input(shape=(input_dim,))
encoder_layer1 = autoencoder.layers[0]
encoder_layer2 = autoencoder.layers[1]
encoder_layer3 = autoencoder.layers[2]
encoder_layer4 = autoencoder.layers[3]
encoder = Model(input_img, encoder_layer4(encoder_layer3(encoder_layer2(encoder_layer1(input_img)))))
encoder.summary()
# +
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', min_delta=.0003, patience=10)
autoencoder.compile(optimizer=keras.optimizers.Adam(lr=.0005), loss='mean_squared_error')
history = autoencoder.fit(x_train[10:20, :], x_train[10:20, :],
epochs=100,
batch_size=10,
validation_data=(x_test[:15,:], x_test[:15,:]),
verbose = 1,
callbacks=[early_stopping])
plt.figure()
# summarize history for accuracy
plt.subplot(211)
#plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.tight_layout()
plt.show()
score = autoencoder.evaluate(x_test, x_test, verbose=0)
print('Test loss:', score)
# -
# encode all of our x data from a 4d fmri to 1000 length 1d array
X_encoded = encoder.predict(X_scaled)
# standardize X
X_encoded = X_encoded - X_encoded.mean()
X_encoded = X_encoded / X_encoded.max()
x_train_encoded = X_encoded[:115, :]
x_test_encoded = X_encoded[115:, :]
display(x_train_encoded.shape)
display(x_test_encoded.shape)
# +
#train with X_encoded on a neural net
# Multi-layer net with ReLU hidden layer
model = keras.models.Sequential()
# Here we make the hidden layer (size 2) with a ReL
# activation function, but also initialize the bias
# weights in the network to a constant 0.1
model.add(keras.layers.Dense(10000,input_dim=len(X_encoded[0]),activation='relu',bias_initializer=keras.initializers.Constant(0.1)))
model.add(keras.layers.Dense(1000,activation='relu',bias_initializer=keras.initializers.Constant(0.1)))
model.add(keras.layers.Dense(100,activation='relu',bias_initializer=keras.initializers.Constant(0.1)))
# Output layer (size 1), sigmoid activation function
model.add(keras.layers.Dense(len(YData[0]),activation='softmax'))
# Compile as above (default learning rate and other
# hyperparameters for the Adam optimizer).
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=.0001),
metrics=['accuracy'])
# Display the model
print(model.summary())
# -
# Train it!
history = model.fit(x_train_encoded, y_train,
batch_size=100,
epochs=200,
verbose=1,
validation_split = 0.2)
plt.figure()
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.tight_layout()
plt.show()
score = model.evaluate(x_test_encoded, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# +
#Overall we still have not achieved better than random chance. The problem at the moment
#is that we still need more preprocessing before the autoencoder
#Either the data is too noisy or too big for the autoencoder to accurately encode useful
#data for predicting schizophrenia
# -
| FMRI Preprocessing/AutoEncoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %config IPCompleter.greedy = True
# # pyEPR Calculating Disipative Participation Ratios
# <NAME>
#
# **Summary:** Following Appendix E of the energy-participation-ratio (EPR) paper, here we demonstrate how to calcualte the dielectric EPR of a chip substrate in a qubit eigen mode.
#
# We use the following definitions for the RMS energy stored in a volume $V$,
# \begin{align}
# \mathcal{E}_{\mathrm{elec}}&=&\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}\;,\\\mathcal{E}_{\mathrm{mag}}&=&\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{H}_{\text{max}}^{*}\overleftrightarrow{\mu}\vec{H}_{\text{max}}\;,
# \end{align}
# ## The simple way
# Following the first tutorial, let's loads the `pyEPR` package under the shorthand name `epr`.
import pyEPR as epr
# #### Load Ansys HFSS tutorial file
# As we did in the previous tutorial, let us first determine where the example file is stored.
# For tutorial, let us get the path to the tutorial folder.
# Load Path temporarily just to find where the tutorial folder is
# return path_to_project
from pathlib import Path
path_to_project = Path(epr.__file__).parent.parent / '_example_files'
print(f'We will the example project located in\n {path_to_project}')
# Now, we will open Ansys Desktop and connect to a specific project and we will create the analsys eprh
pinfo = epr.ProjectInfo(project_path = path_to_project,
project_name = 'pyEPR_tutorial1',
design_name = '1. single_transmon')
eprh = epr.DistributedAnalysis(pinfo)
# # Calculate participation of the substrate for mode 1
# First, select which eigenmode to work on. Here the fundamental mode, mode 0, is the qubit.
# ```python
# eprh.set_mode(0)
# ```
#
# Let us now calculate the dielectric energy-participatio ratio
# of the substrate relative to the dielectric energy of all objects, using the function
# ```python
# eprh.calc_p_electric_volume
# ```
# Note that when all objects are specified, this does not include any energy
# that might be stored in any lumped elements or lumped capacitors.
#
# Returns:
# ---------
# ℰ_object/ℰ_total, (ℰ_object, _total)
# +
eprh.set_mode(0)
# Calculate the EPR p_dielectic
p_dielectic, (ℰ_substr, ℰ_total) = eprh.calc_p_electric_volume('substrate', 'AllObjects')
print(f'Energy in silicon substrate = {100*p_dielectic:.1f}%')
# -
# Now, compute the electric energy stored in the vacuum
# Use the calculated total energy in all objects
# so that we don't have to recompute it, since we
# computed it above
# +
# Here we will pass in the precomputed E_total=ℰ_total
p_vac, (ℰ_vac, ℰ_total) = eprh.calc_p_electric_volume('cavity_enclosure', E_total=ℰ_total)
print(f'''Energy in vacuum = {100*p_vac:.1f}%
Since there are no other volumes,
the two energies should sum to one: {p_dielectic + p_vac}''')
# -
# Let's find outmore about the functuion signature
# ? eprh.calc_p_electric_volume
# ## Calculating the energies directly
#
# Using lower level functions
ℰ_total = eprh.calc_energy_electric(volume='AllObjects')
ℰ_substr = eprh.calc_energy_electric(volume='substrate')
print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%')
# ?eprh.calc_energy_electric
# # Using the Fields calculator in HFSS directly
# We will do the same calculation again, but now using the internals of `eprh.calc_energy_electric` to demonstrate how the fields calcualtor object can be used for custom integrals and how the internals work.
#
# #### Using the HFSS Fields Calculator
# The Fields calculator enables you to perform computations using basic field quantities. The calculator will compute derived quantities from the general electric field solution; write field quantities to files, locate maximum and minimum field values, and perform other operations on the field solution.
#
# The calculator does not perform the computations until a value is needed or is forced for a result. This makes it more efficient, saving computing resources and time; you can do all the calculations without regard to data storage of all the calculated points of the field. It is generally easier to do all the calculations first, then plot the results.
#
# #### Direct calculation of
#
# \begin{align}
# \mathcal{E}_{\mathrm{elec}}&=&\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}\;.
# \end{align}
#
#
# +
from pyEPR.core import *
from pyEPR.core import CalcObject
self, volume = eprh, 'AllObjects'
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E").smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
E_total = A.evaluate(lv=self._get_lv())
# This command numerically evaluates and displays the
# results of calculator operations
E_total
# +
from pyEPR.core import *
self, volume = eprh, 'substrate'
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E").smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_vol(name=volume)
E_subs = A.evaluate(lv=self._get_lv())
# This command numerically evaluates and displays the
# results of calculator operations
E_subs
# -
print(f'Energy in substrate: {100*E_subs/E_total:.1f}%')
# # Summary
# We showed three levels of how to obtain the same result, using either the
# 1. `pyeprh.calc_p_electric_volume`
# 2. `pyeprh.calc_energy_electric`
# 3. Or the fields calcualtor
# **NEXT:** Please see *Part III* of the tutorial to continue. You will find it in the tutorial folder as a jupyter notebook.
| _tutorial_notebooks/Tutorial 2. Field calculations - dielectric energy participation ratios (EPRs).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pymongo
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# +
#dir(pymongo)
# -
#calling client for mongo database
myclient = pymongo.MongoClient("mongodb://localhost:27017")
myclient
#creating a mongo database
mydb = myclient["loaninfo"]
mydb
# +
#dir(mydb)
# -
#creating the 1st collection/table
mycol = mydb["LoanTest"]
mycol
#creating 2nd collection/table
collection = mydb["LoanTrain"]
collection
# +
#load data from csv file to the mongodb tables
train= pd.read_csv('train.csv')
test= pd.read_csv('test.csv')
df= train
data = df.to_dict('records')
x = mycol.insert_many(data, ordered=False)
# +
#x =mycol.find_one()
# +
dataframe=test
data = dataframe.to_dict('records')
y = collection.insert_many(data, ordered=False)
# +
#y=mycol.find_one()
# -
train.head(10)
#check for missing values anyone with more than 0 has missing values
train.isna().sum()
#filling in missing values fro train
train['Gender'].fillna(train['Gender'].mode()[0],inplace=True)
train['Dependents'].fillna(train['Dependents'].mode()[0],inplace=True)
train['Self_Employed'].fillna(train['Self_Employed'].mode()[0],inplace=True)
train['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0],inplace=True)
train['Credit_History'].fillna(train['Credit_History'].mode()[0],inplace=True)
train['Married'].fillna(train['Married'].mode()[0],inplace=True)
train['LoanAmount'].fillna(train['LoanAmount'].median(),inplace=True)
#filling missing values for test
test['Gender'].fillna(test['Gender'].mode()[0],inplace=True)
test['Dependents'].fillna(test['Dependents'].mode()[0],inplace=True)
test['Self_Employed'].fillna(test['Self_Employed'].mode()[0],inplace=True)
test['Loan_Amount_Term'].fillna(test['Loan_Amount_Term'].mode()[0],inplace=True)
test['Credit_History'].fillna(test['Credit_History'].mode()[0],inplace=True)
test['Married'].fillna(test['Married'].mode()[0],inplace=True)
test['LoanAmount'].fillna(test['LoanAmount'].median(),inplace=True)
#rechecking for missing values
train.isna().sum()
#loan status bar for training data
train['Loan_Status'].value_counts().plot.bar()
#visualizing using the boxplot to check for the outliers
train.boxplot(column ='ApplicantIncome')
train.boxplot(column ='LoanAmount')
#variables visual representation
plt.figure(1)
plt.subplot(221)
train['Married'].value_counts(normalize=True).plot.bar(figsize=(20,10),title="Married")
plt.subplot(222)
train['Gender'].value_counts(normalize=True).plot.bar(title="Gender")
plt.subplot(223)
train['Self_Employed'].value_counts(normalize=True).plot.bar(title="Self_Employed")
plt.subplot(224)
train['Credit_History'].value_counts(normalize=True).plot.bar(title="Credit_History")
plt.show()
# +
# ordinal variables
plt.figure(1)
plt.subplot(131)
train['Dependents'].value_counts(normalize=True).plot.bar(figsize=(20,6),title="Dependents")
plt.subplot(132)
train['Education'].value_counts(normalize=True).plot.bar(title="Education")
plt.subplot(133)
train['Property_Area'].value_counts(normalize=True).plot.bar(title="Property_Area")
plt.show()
# -
#
married= pd.crosstab(train['Married'],train['Loan_Status'])
married.div(married.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(4,4))
gender= pd.crosstab(train['Gender'],train['Loan_Status'])
gender.div(gender.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(4,4))
dependents= pd.crosstab(train['Dependents'],train['Loan_Status'])
dependents.div(dependents.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(4,4))
train.columns
education= pd.crosstab(train['Education'],train['Loan_Status'])
education.div(education.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(4,4))
# +
self= pd.crosstab(train['Self_Employed'],train['Loan_Status'])
self.div(self.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(4,4))
# -
# Self_Employed:
#normalizing the loan amount column
train['LoanAmount_log']=np.log(train['LoanAmount'])
test['LoanAmount_log'] = np.log(test['LoanAmount'])
#drop loan_id
train=train.drop('Loan_ID',axis=1)
test=test.drop('Loan_ID',axis=1)
X = train.drop('Loan_Status',1)
y = train.Loan_Status
#turning categorical data into 0 and 1s
X= pd.get_dummies(X)
train=pd.get_dummies(train)
test=pd.get_dummies(test)
# spliting train
from sklearn.model_selection import train_test_split
x_train, x_vl, y_train, y_vl= train_test_split(X,y, test_size=0.3,random_state=42)
from sklearn.linear_model import LogisticRegression
#fitting our data
model= LogisticRegression()
model.fit(x_train,y_train)
#predict
pred= model.predict(x_vl)
pred
#accuracy
from sklearn.metrics import accuracy_score
accuracy_score(y_vl,pred)
#test data
pred_test= model.predict(test)
pred_test
#import random forestmodel
from sklearn.ensemble import RandomForestClassifier
#instantiate the model with 1000 decision trees and random states(hyperparameters)
rf= RandomForestClassifier(n_estimators=1000,random_state=42)
#train data
rf.fit(x_train, y_train)
rfpred=rf.predict(x_vl)
rfpred
accuracy_score(rfpred,y_vl)
pred_test=rf.predict(test)
pred_test
#feature importance
important= pd.Series(rf.feature_importances_,index=X.columns)
important.plot(kind='barh',figsize=(12,8))
# +
#training for random forest
X=train[['ApplicantIncome', 'CoapplicantIncome','Credit_History', 'LoanAmount_log']]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=5)
rf.fit(X_train,y_train)
k=rf.predict(X_test)
accuracy_score(k,y_test)
# +
# retraining for logistic regression
X=train[['ApplicantIncome', 'CoapplicantIncome','Credit_History', 'LoanAmount_log']]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=5)
model.fit(X_train,y_train)
k=model.predict(X_test)
accuracy_score(k,y_test)
# -
test_loan= pd.crosstab(test['Credit_History'],pred_test)
test_loan.div(test_loan.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(4,4))
test_loan_amount_term= pd.crosstab(test['Loan_Amount_Term'],pred_test)
test_loan_amount_term.div(test_loan_amount_term.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(10,4))
# +
#test_gender= pd.crosstab(test['LoanAmount'],pred_test)
#test_gender.div(test_gender.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(100,4))
# -
# Allowing new users to apply for a loan using our model to determine if they are loan eligible or not
# +
def details():
global Loan_ID,Gender,Married,Dependents,Education,Self_Employed,ApplicantIncome,CoapplicantIncome,Loan_Amount,Loan_Amount_Term,Credit_History,Property_Area
Loan_ID = int(input("Enter the Loan_ID: "))
Gender = input("Enter your Gender: ")
Married = input("Enter your Marital status yes/no: ")
Dependents = input("Enter your Dependents: ")
Education = input("Are you a Graduate/Not: ")
Self_Employed = input("Are you Self_Employed Yes/No : ")
ApplicantIncome = input("Enter your Income: ")
CoapplicantIncome = input("Enter your Coapplicantincome : ")
Loan_Amount = input("Enter the Loan_Amount: ")
Loan_Amount_Term = input("Enter the Loan_Amount_Term : ")
Credit_History = input("Enter your Credit_History 0/1 : ")
Property_Area = input("Enter your Property_Area Urban/Semi_Urban/Rural: ")
#return (Loan_ID)
# +
def main():
runtime = 1
print('Welcome to Loan Automation System')
while runtime:
checker = int(input('1 Enter the required details: 2 to quit: '))
if checker == 1:
details()
else:
print('Invalid details, try again')
main()
print('========================')
break
main()
# -
Loan_ID
# +
Newdata = {"Loan_ID": Loan_ID,"Gender":Gender,"Married":Married,"Dependents":Dependents,"Education":Education,"Self_Employed":Self_Employed,"ApplicantIncome":ApplicantIncome, "CoapplicantIncome":CoapplicantIncome, "Loan_Amount":Loan_Amount,"Loan_Amount_Term":Loan_Amount_Term,"Credit_History":Credit_History,"Property_Area":"Property_Area"}
x = collection.insert_one(Newdata)
# +
y = collection.find_one()
y
# +
Inputdf = pd.DataFrame(y, index=[0])
Inputtrain=Inputdf.drop('_id',axis=1)
Inputtrain
# -
Inputtest=Inputtrain.drop('Loan_ID',axis=1)
Inputtest
#turning categorical data into 0 and 1s
X=pd.get_dummies(Inputtest)
X
Y=X[['ApplicantIncome', 'CoapplicantIncome','Credit_History', 'LoanAmount']]
# +
InputPred = model.predict(Y)
InputPred
# -
#Print the prediction results
print("ARE YOU ELLIGIBLE FOR THE LOAN?",InputPred[0])
# +
# -
| Beta signal loan prediction/project 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import Data
# +
# %matplotlib notebook
import locale
from locale import atof
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.finance as fin
import matplotlib.ticker as mticker
import datetime as dt
locale.setlocale(locale.LC_NUMERIC, '')
BTC_df = pd.read_csv("BitcoinHistoricalData_coinmarketcap.csv", sep = '\t', thousands=',')
ETH_df = pd.read_csv("EthereumHistoricalData_coinmarketcap.csv", sep = '\t', thousands=',')
BC_Events_df = pd.read_csv("BlockchainEvents_2016_May.csv", sep = '\t', thousands=',')
BTC_df['Date'] = pd.to_datetime(BTC_df['Date'])
BTC_df['Date'] = BTC_df['Date'].dt.date
BTC_df.set_index(['Date'],inplace=True)
BTC_df.sort_index(inplace=True)
BTC_df['Volume'] = BTC_df['Volume'].replace('-', np.NaN)
BTC_df.dropna(inplace=True)
BTC_df['Volume'] = BTC_df['Volume'].apply(atof)
BTC_df['Volume'] = BTC_df['Volume'].astype(int)
BC_Events_df['Date'] = pd.to_datetime(BC_Events_df['Date'])
ETH_df['Date'] = pd.to_datetime(ETH_df['Date'])
ETH_df['Date'] = ETH_df['Date'].dt.date
ETH_df.set_index(['Date'], inplace=True)
ETH_df.sort_index(inplace=True)
ETH_df['Market Cap'] = ETH_df['Market Cap'].replace('-', np.NaN)
ETH_df.dropna(inplace=True)
ETH_df['Market Cap'] = ETH_df['Market Cap'].apply(atof)
# +
#ETH_df
# -
# ## PLOTTING
# +
#BTC_df.plot(y=['Volume', 'Market Cap'], figsize=(10,5))
# +
#ETH_df.plot(y=['Volume', 'Market Cap'], figsize=(10,5))
# +
start_dt = '2016-10-01'
end_dt = '2017-10-01'
start_date = pd.to_datetime(start_dt).date()
end_date = pd.to_datetime(end_dt).date()
BTC_df = BTC_df[(BTC_df.index >= start_date) & (BTC_df.index <= end_date)]
ETH_df = ETH_df[(ETH_df.index >= start_date) & (ETH_df.index <= end_date)]
Dates = np.arange(pd.to_datetime(start_date), pd.to_datetime(end_date)+ pd.DateOffset(1), dtype='datetime64[D]')
BTC_Dates = np.arange(min(BTC_df.index) - pd.DateOffset(1), max(BTC_df.index), dtype='datetime64[D]')
ETH_Dates = np.arange(min(ETH_df.index) - pd.DateOffset(1), max(ETH_df.index), dtype='datetime64[D]')
# +
fig, (ax1, ax2) = plt.subplots(2, 1, sharex = True, figsize=(10,6))
ohlc1 = [(mdates.date2num(ind), x['Open'], x['High'], x['Low'], x['Close'], x['Volume']) for ind, x in BTC_df.iterrows()]
ohlc2 = [(mdates.date2num(ind), x['Open'], x['High'], x['Low'], x['Close'], x['Volume']) for ind, x in ETH_df.iterrows()]
fin.candlestick_ohlc(ax1, ohlc1, width=0.2, colorup='g', colordown='r', alpha=1.0)
fin.candlestick_ohlc(ax2, ohlc2, width=0.2, colorup='g', colordown='r', alpha=1.0)
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
for ax in fig.axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax2.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax2.xaxis.set_major_locator(mticker.MaxNLocator(15))
ax1.set_ylabel('USD', color='k')
ax2.set_ylabel('USD', color='k')
ax1.set_title('BTC')
ax2.set_title('ETH')
ax1.xaxis.set_ticks_position('none')
fig.tight_layout()
# +
fig, (ax1, ax3) = plt.subplots(2, 1, sharex = True, figsize=(10,6))
ax2 = ax1.twinx()
ax4 = ax3.twinx()
x = Dates
#x = list(map(pd.to_datetime, Dates))
#x = [dt.datetime.strptime(str(d),'%Y-%m-%d 00:00:00') for d in x]
#x1 = list(map(pd.to_datetime, BTC_Dates))
#x1 = [dt.datetime.strptime(str(d),'%Y-%m-%d 00:00:00') for d in x1]
#x2 = list(map(pd.to_datetime, ETH_Dates))
#x2 = [dt.datetime.strptime(str(d),'%Y-%m-%d 00:00:00') for d in x2]
y1 = BTC_df['Volume']
y2 = BTC_df['Market Cap']
y0 = BTC_df['Close']
y3 = ETH_df['Volume']
y4 = ETH_df['Market Cap']
y5 = ETH_df['Close']
#line1, = ax1.plot(x,y1, 'b', alpha =0.5)
line2, = ax1.plot(x,y3, 'b', alpha =0.5, label='ETH Volume')
line0, = ax1.plot(x,y1, 'r', alpha =0.5, label='BTC Volume')
ax2.scatter(x, y0, color='r', s=8, label='BTC Price')
ax2.scatter(x, y5, color='b', s=8, label='ETH Price')
#line3, = ax3.plot(x,y3, 'b', alpha =0.75)
line4, = ax3.plot(x,y4, 'b', alpha =0.5, label='ETH Market Cap')
line5, = ax3.plot(x,y2, 'r', alpha =0.5, label='BTC Market Cap')
ax4.scatter(x, y0, color='r', s=8, label='BTC Price')
ax4.scatter(x, y5, color='b', s=8, label='ETH Price')
ax1.fill_between(x, y3, y1, facecolor='grey', alpha=0.25)
ax3.fill_between(x, y2, y4, facecolor='grey', alpha=0.25)
ax1.set_ylabel('Volume', color='k')
ax2.set_ylabel('Closing Price', color='k')
ax3.set_ylabel('Market Cap', color='k')
ax4.set_ylabel('Closing Price', color='k')
ax1.set_title('Volume and Closing price (BTC/ETH)')
ax3.set_title('Market Cap and Closing price (BTC/ETH)')
for ax in fig.axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
#ax.yaxis.grid(color='k', which='major', linestyle='--', linewidth=.5)
#ax.yaxis.grid(True, which='major')
if ax == ax2 or ax == ax4:
ax.legend(loc='upper right', bbox_to_anchor=(0.19, 0.99))
else:
ax.legend(loc='upper right', bbox_to_anchor=(0.4, 0.99))
ax1.xaxis.set_ticks_position('none')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax3.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
fig.tight_layout()
#for d in BC_Events_df['Date']:
#ax1.axvline(x=d, color='k')
#plt.text()
# -
# ## ML modeling
# +
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
## BITCOIN CLOSING PRICE
X = BTC_df[['Volume', 'Open', 'High', 'Low']].copy()
#X = BTC_df[['Volume', 'Market Cap']]
#y = BTC_df['Open']
y = BTC_df['Close']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
#Dummy Regression
#dr = DummyRegressor().fit(X_train, y_train)
#print ('Dummy Regression: ', 'Train: ', r2_score(y_train, dr.predict(X_train)))
#print ('Test: ', r2_score(y_test, dr.predict(X_test)))
#Logistic Regression
lrc = LinearRegression().fit(X_train, y_train)
print ('Linear Regression (BTC):')
print ('\tTrain (r2_score): ', r2_score(y_train, lrc.predict(X_train)))
print ('\tTest (r2_score): ', r2_score(y_test, lrc.predict(X_test)))
#print ('\tTrain (explained_variance_score): ', explained_variance_score(y_train, lrc.predict(X_train)))
#print ('\tTest (explained_variance_score): ', explained_variance_score(y_test, lrc.predict(X_test)))
#Random Forest Regression
rfr = RandomForestRegressor(n_estimators = 300, max_depth=15, random_state=0).fit(X_train, y_train)
print ('Random Forest Regression (BTC): ')
print('\tTrain (r2 score): ', r2_score(y_train, rfr.predict(X_train)))
print('\tTest (r2 score): ', r2_score(y_test, rfr.predict(X_test)))
### ETHEREUM PRICE
#X_ETH = ETH_df[['Volume', 'Market Cap', 'Open', 'High', 'Low']].copy()
ETH_df['Avg Price'] = (ETH_df['High']+ETH_df['Low'])/2
ETH_df['Nodes'] = ETH_df['Market Cap']/ETH_df['Avg Price']
X_ETH = ETH_df[['Volume', 'Market Cap']]
#X_ETH = ETH_df[['Volume', 'Nodes']]
y_ETH = ETH_df['Close']
X_ETH_train, X_ETH_test, y_ETH_train, y_ETH_test = train_test_split(X_ETH, y_ETH, random_state=0)
#Dummy Regression
#dr2 = DummyRegressor().fit(X_ETH_train, y_ETH_train)
#print ('Dummy Regression: ', 'Train: ', r2_score(y_ETH_train, dr2.predict(X_ETH_train)))
#print ('Test: ', r2_score(y_ETH_test, dr2.predict(X_ETH_test)))
#Logistic Regression
lrc2 = LinearRegression().fit(X_ETH_train, y_ETH_train)
print ('Linear Regression (ETH): ')
print('\tTrain (r2 score): ', r2_score(y_ETH_train, lrc2.predict(X_ETH_train)))
print ('\tTest (r2 score): ', r2_score(y_ETH_test, lrc2.predict(X_ETH_test)))
#Random Forest Regression
rfr2 = RandomForestRegressor(n_estimators = 50, max_depth=10, random_state=0).fit(X_ETH_train, y_ETH_train)
print ('Random Forest Regression (ETH): ')
print('\tTrain (r2 score): ', r2_score(y_ETH_train, rfr2.predict(X_ETH_train)))
print ('\tTest (r2 score): ', r2_score(y_ETH_test, rfr2.predict(X_ETH_test)))
# -
cur_BTC = [[2401840000, 6006.00, 6075.59, 5732.47]]
lrc.predict(cur_BTC)
cur_ETH = [[482336000, 28111400000]]
lrc2.predict(cur_ETH)
# +
#explained_variance_score([5930.32], lrc.predict(cur_BTC))
#r2_score([5930.32], lrc.predict(cur_BTC))
# -
| CryptoAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %autosave 0
# + code_folding=[0]
#Import Packages
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import validation_curve
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeClassifier
# + code_folding=[]
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# + code_folding=[]
def printcfm(y_test,y_pred,title='confusion matrix'):
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Sem Perda','Perda'],
title=title)
# + code_folding=[]
def plotRoc(y_real, y_pred_prob):
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_real, y_pred_prob)
# Calculate AUC
auc = roc_auc_score(y_real, y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.text(1, 0.5, "AUC: %3.3f" % (auc), {'color': 'C2', 'fontsize': 18}, va="center", ha="right")
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# -
#Setando configurações de visualização
pd.options.display.max_rows=350
pd.options.display.max_columns=60
# + code_folding=[0]
# df=pd.read_csv('baseProjeto_over.csv', index_col=0)
# df
#df.columns
# X=df[['ATRIB_MAX1',
# 'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
# 'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
# X.head()
# X.info()
# #cat=['MGP1_sim', 'MGP2_sim', 'MGP3_sim', 'MGP4_sim',
# 'MGP5_sim', 'MGP6_sim', 'MGP7_sim', 'MGP8_sim', 'MGP9_sim', 'MGP10_sim',
# 'MGP11_sim', 'MGP12_sim', 'MGP13_sim', 'MGP14_sim',]
# #X[cat] = X[cat].astype('category')
# X.info()
# y = df['Perda30']
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=42, stratify=y)
#sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
#for train_index, test_index in sss.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = X[train_index], X[test_index]
# #y_train, y_test = y[train_index], y[test_index]
# +
# train=pd.read_csv('baseProjetoTrainOver.csv', index_col=0)
# test=pd.read_csv('baseProjetoTest.csv', index_col=0)
# -
train=pd.read_csv('baseProjetoTrainOverFase1.csv', index_col=0)
test=pd.read_csv('baseProjetoTestFase1.csv', index_col=0)
# +
X_train = train[['ATRIB_MAX1',
'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
X_test = test[['ATRIB_MAX1',
'ATRIB_DIST1', 'DIFP', 'MGP1', 'MGP2', 'MGP3', 'MGP4', 'MGP5', 'MGP6',
'MGP7', 'MGP8', 'MGP9', 'MGP10', 'MGP11', 'MGP12', 'MGP13', 'MGP14']]
y_test = test['Perda30']
y_train = train['Perda30']
# -
y_test.value_counts().plot(kind='bar', title='Count (Perda30)');
y_train.value_counts().plot(kind='bar', title='Count (Perda30)');
# <br>
# ## Neural Network - Scaled with StandardScaller
steps = [('scaler', StandardScaler()),(('neural', MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 32), random_state=42, max_iter=500, warm_start=True)))]
pipeline = Pipeline(steps)
# hidden_layer_sizes=(n1, n2,..., nx) <br>
# n1 = number of neurons in hidden layer_1 <br>
# nx = number of neurons in hidden layer_x <br>
neural_scaled = pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
accuracy_score(y_test, y_pred)
y_pred_prob = pipeline.predict_proba(X_test)[:,1]
# +
#for i in range(len(y_pred)):
# print(y_pred_prob[i],y_pred[i])
# -
plotRoc(y_test, y_pred_prob)
printcfm(y_test,y_pred, title='confusion matrix')
print(classification_report(y_test, y_pred))
# +
# cv_scores = cross_val_score(pipeline, X, y, cv=5)
# +
# print(cv_scores)
# print("Average 5-Fold CV Score: {}".format(np.mean(cv_scores)))
# -
# <br>
# ## Neural Network - Scaled with MinMaxScaller
steps = [('scaler', MinMaxScaler()),(('neural', MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 32), random_state=42, max_iter=1000, warm_start=True)))]
pipeline = Pipeline(steps)
# hidden_layer_sizes=(n1, n2,..., nx) <br>
# n1 = number of neurons in hidden layer_1 <br>
# nx = number of neurons in hidden layer_x <br>
#neural_scaled = pipeline.fit(X_train, y_train)
pipeline.fit(X_train, y_train)
print(neural_scaled)
y_pred = pipeline.predict(X_train)
accuracy_score(y_train, y_pred)
y_pred_prob = pipeline.predict_proba(X_train)[:,1]
y_scores = cross_val_predict(pipeline, X_train, y_train, cv=3, method='predict_proba' )
y_train_pred = cross_val_predict(pipeline, X_train, y_train, cv=3)
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
# print(y_scores)
# print(np.mean(y_scores))
# +
# for i in range(len(y_pred)):
# print(y_pred_prob[i],y_pred[i], y_scores[i])
# -
plotRoc(y_train, y_scores)
printcfm(y_train_pred, y_pred, title='confusion matrix')
print(classification_report(y_train_pred, y_pred))
y_pred = pipeline.predict(X_test)
y_pred_prob = pipeline.predict_proba(X_test)[:,1]
plotRoc(y_test, y_pred_prob)
printcfm(y_test,y_pred, title='confusion matrix')
print(classification_report(y_test, y_pred))
# +
# cv_scores = cross_val_score(pipeline, X_train, y_train, cv=5)
# print(cv_scores)
# -
# ## Fine-tunning the model.
# To turn on Fine-tunning: <br>
# define ft = 1
ft = 0
# ### 2 - Grid Search
if ft == 1 :
rn = MLPClassifier(max_iter=1000, random_state=42)
parameters = {'solver': ['lbfgs','adam','sgd'], 'alpha': 10.0 ** -np.arange(1, 7),
'hidden_layer_sizes': [x for x in itertools.product((5,10,20,30,60,100),repeat=3)]
}
cv = GridSearchCV(rn, param_grid=parameters, verbose=3, n_jobs=-1)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
# rf.fit(X_train, y_train);
cv.fit(X_train_scaled, y_train);
if ft == 1:
print("Best params: ", cv.best_params_,)
print("Best Score: %3.3f" %(cv.best_score_))
y_pred = cv.predict(X_train_scaled)
final_model =cv.best_estimator_
print(final_model)
# ### Best Model Result (11/2018) - 38 Wells
#
# MLPClassifier(activation='relu', alpha=0.001, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(60, 10, 30), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ### Best Model Result (11/2018) - 89 Wells
# MLPClassifier(activation='relu', alpha=0.01, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(30, 100, 5), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ### Best Model Result (09/2018) - 89 Wells
# MLPClassifier(activation='relu', alpha=alpha, batch_size='auto', beta_1=0.9,
# beta_2=0.999, early_stopping=False, epsilon=1e-08,
# hidden_layer_sizes=(5, 60), learning_rate='constant',
# learning_rate_init=0.001, max_iter=1000, momentum=0.9,
# nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
# solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
# ## Regularization of the best model
# +
# alpha=1e-5
# -
steps = [('scaler', StandardScaler()),(('neural', MLPClassifier(activation='relu', alpha=0.001, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(50, 32), learning_rate='constant',
learning_rate_init=0.001, max_iter=1000, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=42, shuffle=True,
solver='adam', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)))]
pipeline = Pipeline(steps)
#neural_scaled = pipeline.fit(X_train, y_train)
pipeline.fit(X_train, y_train)
# ## Predicting the Classes in Trainning Set
y_train_pred = pipeline.predict(X_train)
y_train_prob = pipeline.predict_proba(X_train)[:,1]
acc_train = accuracy_score(y_train, y_train_pred)
auc_train = roc_auc_score(y_train, y_train_pred)
plotRoc(y_train, y_train_prob)
printcfm(y_train, y_train_pred, title='confusion matrix')
print(classification_report(y_train, y_train_pred))
# ## Precision/Recall Tradeoff
# +
#y_scores = cross_val_predict(pipeline, X_train, y_train, cv=3, method='predict_proba' )
# print(y_scores)
# print(np.mean(y_scores))
# +
#y_pred_prob
# +
# y_scores.shape
# +
# # hack to work around issue #9589 in Scikit-Learn 0.19.0
# if y_scores.ndim == 2:
# y_scores = y_scores[:, 1]
# -
precisions, recalls, thresholds = precision_recall_curve(y_train, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0, 1])
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
cv_scores = cross_val_score(pipeline, X_train, y_train, cv=3)
print(cv_scores)
print(np.mean(cv_scores))
# +
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1.01, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
plt.show()
# +
# precisions, recalls, thresholds = precision_recall_curve(y_train, y_pred_prob)
# def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
# plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
# plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
# plt.xlabel("Threshold")
# plt.legend(loc="upper left")
# plt.ylim([0, 1])
# plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
# plt.show()
# -
# ## Varying the Threshold for train set
predict_mine = np.where(y_train_prob > 0.5, 1, 0)
y_train_pred_90 = (y_scores > .5)
precision = precision_score(y_train, y_train_pred_90)
recall = recall_score(y_train, y_train_pred_90)
print(precision, recall)
printcfm(y_train, predict_mine, title='confusion matrix')
print(classification_report(y_train, predict_mine))
# # Evaluating the model with Cross-Validation
y_pred_prob = pipeline.predict_proba(X_train)[:,1]
y_scores = cross_val_predict(pipeline, X_train, y_train, cv=3, verbose=3, method='predict_proba')
y_train_pred = cross_val_predict(pipeline, X_train, y_train, cv=3, verbose=3)
# hack to work around issue #9589 in Scikit-Learn 0.19.0
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
# print(y_scores)
# print(np.mean(y_scores))
plotRoc(y_train, y_scores)
auc_train = roc_auc_score(y_train, y_train_pred)
printcfm(y_train, y_train_pred, title='confusion matrix')
print(classification_report(y_train, y_train_pred))
# # Predicting the Classes in Test Set
y_pred = pipeline.predict(X_test)
acc_test = accuracy_score(y_test, y_pred)
y_pred_prob = pipeline.predict_proba(X_test)[:,1]
# +
#for i in range(len(y_pred)):
# print(y_pred_prob[i],y_pred[i])
# -
plotRoc(y_test, y_pred_prob)
auc_test = roc_auc_score(y_test, y_pred_prob)
printcfm(y_test, y_pred, title='confusion matrix')
print(classification_report(y_test, y_pred))
# ## Varying the Threshold for test set
predict_mine = np.where(y_pred_prob > .5, 1, 0)
printcfm(y_test, predict_mine, title='confusion matrix')
print(classification_report(y_test, predict_mine))
precision_score(y_test, predict_mine)
# ## Results
# print("Alpha: ", alpha)
print("AUC Train: %3.3f" % (auc_train))
print("Accuracy Train: %3.2f%%" % (acc_train*100))
print("AUC test: %3.2f" % (auc_test))
print("Accuracy Test %3.2f%%" % (acc_test*100))
X=np.concatenate((X_train,X_test),axis=0)
y=np.append(y_train,y_test)
# +
print(__doc__)
# param_range = 10.0 ** -np.arange(1, 7),
param_range = np.logspace(-10, 7, 10)
train_scores, test_scores = validation_curve(
pipeline, X, y, param_name="neural__alpha", param_range=param_range,
cv=10, scoring="roc_auc", n_jobs=-1, verbose=3)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with MLP")
plt.xlabel("alpha")
plt.ylabel("AUC")
#plt.ylim(0.0, 1.1)
#plt.xlim(-1, 22)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
# -
# # Add learning curves
| Model-Study/mlModelsMLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dominikaandrzejewska/computer-vision-challenge/blob/master/cats_dogs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="HnCID_umzqkx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 64} outputId="37aaeab9-5442-4c6f-94d7-0f3b8953d5db"
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
# + id="SaJxCb5e0pGl" colab_type="code" outputId="1c03dc36-49a7-4382-d50d-8ec9887575bb" colab={"base_uri": "https://localhost:8080/", "height": 208}
# !wget --no-check-certificate \
# https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
# -O /tmp/cats_and_dogs_filtered.zip
# + id="KzQpFadQ0-VY" colab_type="code" colab={}
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ = zipfile.ZipFile(local_zip)
zip_.extractall('/tmp')
zip_.close()
# + id="08wLNNrO2Vqe" colab_type="code" colab={}
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join( base_dir, 'train' )
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_dir = os.path.join(base_dir,'validation')
validation_cats_dir = os.path.join(validation_dir,'cats')
validation_dogs_dir = os.path.join(validation_dir,'dogs')
# + id="ngJOPWBx3q_0" colab_type="code" colab={}
def plot_images(item_dir, top=25):
all_item_dirs = os.listdir(item_dir)
item_files = [os.path.join(item_dir, file) for file in all_item_dirs[:top]]
plt.figure(figsize=(10,10))
for idx, img_path in enumerate(item_files):
plt.subplot(5,5,idx+1)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.tight_layout()
# + id="E_lesxJK5IBG" colab_type="code" outputId="c10c319c-2148-4546-8d37-458d195d1093" colab={"base_uri": "https://localhost:8080/", "height": 572}
input_shape = (150,150,3)
model = Sequential([
Conv2D(32, (3,3), input_shape = input_shape),
MaxPool2D((2,2)),
Conv2D(64, (3,3)),
MaxPool2D((2,2)),
Conv2D(64, (3,3)),
MaxPool2D((2,2)),
Conv2D(128, (3,3)),
MaxPool2D((2,2)),
Flatten(),
Dense(64, activation = 'relu'),
Dropout(0.5),
Dense(1, activation = 'sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.summary()
# + id="qGETo0-9-_5T" colab_type="code" colab={}
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# + id="D0L7EdGBB7BM" colab_type="code" outputId="ae0a71e1-2662-4e62-d686-de34527511c7" colab={"base_uri": "https://localhost:8080/", "height": 52}
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
# + id="3kSr6BhXPXwH" colab_type="code" outputId="2e41935b-34f8-44d8-97bb-2bb977fcf400" colab={"base_uri": "https://localhost:8080/", "height": 277}
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=5,
validation_data=validation_generator,
validation_steps=50,
verbose=2
)
# + id="30PQ83mJPuvO" colab_type="code" colab={}
| cats_dogs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Setup
import sys
sys.path.append('..')
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import shutil
import warnings
from utils.data.annotations import *
from utils.data.density_maps import create_and_save_density_maps
from utils.data.data_ops import move_val_split_to_train
from utils.input_output.io import save_np_arrays, load_np_arrays, load_images
from utils.input_output.io import save_gt_counts, load_gt_counts
from utils.preprocessing.misc import gaussian_smoothing
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
warnings.filterwarnings('ignore')
# -
# ## 2. Datasets
# ### 2.1 ShanghaiTech (Part B) Dataset
DATASET_PATH = '../datasets/shanghai_tech/part_b'
TRAIN_PATH = f'{DATASET_PATH}/train'
TRAIN_IMG_PATH = f'{TRAIN_PATH}/images'
TRAIN_GT_DOTS_PATH = f'{TRAIN_PATH}/gt_dots'
TRAIN_GT_COUNTS_PATH = f'{TRAIN_PATH}/gt_counts'
TRAIN_GT_DENSITY_MAPS_PATH = f'{TRAIN_PATH}/gt_density_maps'
print(DATASET_PATH)
print(os.listdir(DATASET_PATH))
print(TRAIN_PATH)
print(os.listdir(TRAIN_PATH))
# #### Load some train images and density maps
train_img_names = ['IMG_78.jpg', 'IMG_154.jpg']
train_images = load_images(TRAIN_IMG_PATH, train_img_names, num_images=2)
print(len(train_images))
print(train_images[0].dtype)
# #### Load dots images (.png)
# +
train_img_names_png = [img_name.split('.')[0] + '.png' for img_name in train_img_names]
train_dots_images = load_dots_images(TRAIN_GT_DOTS_PATH, train_img_names_png)
print(len(train_dots_images), train_dots_images[0].shape, train_dots_images[0].dtype,
train_dots_images[0].min(), train_dots_images[0].max(), train_dots_images[0].sum())
# -
# #### Create density maps (sigma=10)
SIGMA = 10
train_gt_density_maps = []
for dots_img in train_dots_images:
density_map = gaussian_smoothing([dots_img], sigma=SIGMA)
train_gt_density_maps.append(density_map[0])
print(train_gt_density_maps[0].shape)
# +
import skimage.morphology
ROWS = 2
COLS = 3
fontsize = 16
fraction = 0.0355
pad = 0.016
plt.figure(figsize=(22, 11))
plt.title('CARPK Dataset')
for i in range(ROWS):
count = train_dots_images[i].sum().astype(np.int)
plt.subplot(ROWS, COLS, COLS * i + 1)
if i == 0 :
plt.title(f'Original Image\n', fontsize=fontsize)
plt.imshow(train_images[i])
plt.colorbar(fraction=fraction, pad=pad)
plt.axis('off')
plt.subplot(ROWS, COLS, COLS * i + 2)
title = f'Dot Annotations: {count}'
if i == 0:
title = 'Ground Truth\n' + title
plt.title(title, fontsize=fontsize)
plt.imshow(skimage.morphology.binary_dilation(train_dots_images[i], np.ones((3, 3))).astype(float),
cmap='gray', interpolation='none')
plt.colorbar(fraction=fraction, pad=pad)
plt.axis('off')
plt.subplot(ROWS, COLS, COLS * i + 3)
title = f'Density Map: {train_gt_density_maps[i].sum():.0f}'
if i == 0:
title = 'Ground Truth\n' + title
plt.title(title, fontsize=fontsize)
plt.imshow(train_gt_density_maps[i], cmap='jet')
plt.colorbar(fraction=fraction, pad=pad)
plt.axis('off')
| playground/gt_shanghai_tech_b.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="iRD9uk5uiKEo" colab_type="text"
# # Getting Started with TensorFlow 2.0 in 7 Days
# ## 2.4 Optimizers
# + id="hILTlkIbeIuC" colab_type="code" outputId="ea3b669a-20fb-45ee-8a8f-26564f271525" colab={"base_uri": "https://localhost:8080/", "height": 373}
# !pip install tensorflow==2.0.0-beta0
# + id="GE1Tx9b8eRqY" colab_type="code" colab={}
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="Pcwc7403egwZ" colab_type="code" colab={}
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images = train_images / 255.0
test_images = test_images / 255.0
# + [markdown] id="y7rmHMrJe3K_" colab_type="text"
# ## Stochastic Gradient Descent
# + id="gTafzpttewVA" colab_type="code" colab={}
sgd = keras.optimizers.SGD(learning_rate=0.1, momentum=0.01, nesterov=False)
# + id="5i5ymslefllV" colab_type="code" outputId="269e4a56-0f1e-4639-a79e-6abbb7289851" colab={"base_uri": "https://localhost:8080/", "height": 306}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=32)
# + id="o5Pn3-bjfv03" colab_type="code" outputId="1024a2f5-31ad-469f-b09b-d3564241cd2f" colab={"base_uri": "https://localhost:8080/", "height": 218}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=128)
# + id="KkA94HgYj73G" colab_type="code" colab={}
sgd = keras.optimizers.SGD(learning_rate=0.05, momentum=0.1, nesterov=True)
# + id="yaxDeu5xlC8G" colab_type="code" outputId="4dfe01ac-477b-40ac-e16f-67fa9723fceb" colab={"base_uri": "https://localhost:8080/", "height": 218}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=128)
# + id="UK42cNmnlGBn" colab_type="code" colab={}
sgd = keras.optimizers.SGD(learning_rate=0.9, momentum=0.3, nesterov=True)
# + id="akbiKS5smlQq" colab_type="code" outputId="79a84a7e-c13e-4a88-e1ac-77dc44950bf6" colab={"base_uri": "https://localhost:8080/", "height": 218}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=128)
# + [markdown] id="DVuRE8tDp_VY" colab_type="text"
# ## Adaptive Momentum
# + id="AwZ_gLsbmnN6" colab_type="code" colab={}
adam = keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False)
# + id="SNymMlKXqo2P" colab_type="code" outputId="e54280e5-7a21-4826-d921-78cdb58b1c4d" colab={"base_uri": "https://localhost:8080/", "height": 218}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=adam, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=128)
# + [markdown] id="8ObhinrVslqE" colab_type="text"
# ## Adagrad
# + id="zWAxRk76qs6v" colab_type="code" colab={}
adagrad = keras.optimizers.Adagrad(learning_rate=0.1, initial_accumulator_value=0.1, epsilon=1e-7)
# + id="FrQh0oCytTl9" colab_type="code" outputId="f23299ce-5bd2-4b96-80f5-7cc20265a7a6" colab={"base_uri": "https://localhost:8080/", "height": 218}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(2000, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=adagrad, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=128)
# + id="bnq-dizFtep7" colab_type="code" outputId="15fd209a-740e-43c2-ea2b-1fbf0c65d7b8" colab={"base_uri": "https://localhost:8080/", "height": 302}
print(model.summary())
# + id="yLuWREj-0Z3_" colab_type="code" colab={}
| Section 2/Packt_2_4_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + iooxa={"id": {"block": "8hoXeANtrV3ZUHZuBUa8", "project": "VNMrkxzChhdveZyf6lmb", "version": 5}, "outputId": {"block": "c3ct5bmxGb0kJAJA80y1", "project": "VNMrkxzChhdveZyf6lmb", "version": 3}}
from geoscilabs.inversion.LinearInversionDirect import LinearInversionDirectApp
from ipywidgets import interact, FloatSlider, ToggleButtons, IntSlider, FloatText, IntText
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.size'] = 14
# + iooxa={"id": {"block": "XoBdBeGBCevcAMVqqZtM", "project": "VNMrkxzChhdveZyf6lmb", "version": 1}, "outputId": {"block": "nzHSDMZI4foQGe88I1D5", "project": "VNMrkxzChhdveZyf6lmb", "version": 1}}
app = LinearInversionDirectApp()
# + [markdown] iooxa={"id": {"block": "GAKaomYwcnkDpl2fpbAI", "project": "VNMrkxzChhdveZyf6lmb", "version": 1}}
# # Linear Inversion App
#
# This app is based upon the inversion tutorial: "INVERSION FOR APPLIED GEOPHYSICS" by Oldenburg and Li (2005).
#
# <NAME> and <NAME> (2005) 5. Inversion for Applied Geophysics: A Tutorial. Near-Surface Geophysics: pp. 89-150.
# eISBN: 978-1-56080-171-9
# print ISBN: 978-1-56080-130-6
# https://doi.org/10.1190/1.9781560801719.ch5
#
# + [markdown] iooxa={"id": {"block": "wzGxtmJjFucP0mYtZzVD", "project": "VNMrkxzChhdveZyf6lmb", "version": 4}}
# ## Purpose
#
# We to illustrate how a generic linear inverse problem can be solved using a Tikhonov approach. The App is dvided into two sections.
#
# ### Forward Problem
# - Step 1: Create a model, $\mathbf{m}$.
# - Step 2: Generate a sensitivity matrix $\mathbf{G}$.
# - Step 3: Simulate data ($\mathbf{d} = \mathbf{G}\mathbf{m}$) and add noise.
#
# These steps are explored individually but additional text given in CNArticleXXXX. For convenience, the widgets used to carry out all three steps are consolidated at the end of the section. A brief mathematical discription is also provided.
#
# ### Inverse Problem
#
# Here we provide widgets to adjust the parameters for the inverse problem. Some basic information is provided but details about the parameters are provided in the text CNArticleXXXX.
#
# The default parameters provided for the Forward and Inverse problems generate a reasonable example for illustrating the inversion but the learning comes when these paramters are changed and outcomes are observed.
# + [markdown] iooxa={"id": {"block": "EwFavHjklkQaVB3x4OP8", "project": "VNMrkxzChhdveZyf6lmb", "version": 2}}
# ## Mathematical Background for Forward Problem
#
#
# Let $g_j(x)$ denote the kernel function for $j$th datum. With a given model $m(x)$, the $j$th datum can be computed by solving following integral equation:
#
# $$ d_j = \int_a^{b} g_j(x) m(x) dx $$
#
# where
#
# $$ g_j(x) = e^{p_jx} cos (2 \pi q_jx) $$
#
# is the $j^{th}$ kernel function. By integrating $g_j(x)$ over cells of width $\Delta x$ and using the midpoint rule cell we obtain the sensitivities
#
# $$ \mathbf{g}_j(\mathbf{x}) = e^{p_j\mathbf{x}} cos (2 \pi q_j \mathbf{x}) \Delta x$$
#
# where
#
# - $\mathbf{g}_j$: $j$th row vector for the sensitivty matrix ($1 \times M$)
# - $\mathbf{x}$: model location ($1 \times M$)
# - $p_j$: decaying constant (<0)
# - $q_j$: oscillating constant (>0)
#
# By stacking multiple rows of $\mathbf{g}_j$, we obtain sensitivity matrix, $\mathbf{G}$:
#
# \begin{align}
# \mathbf{G} =
# \begin{bmatrix}
# \mathbf{g}_1\\
# \vdots\\
# \mathbf{g}_{N}
# \end{bmatrix}
# \end{align}
#
# Here, the size of the matrix $\mathbf{G}$ is $(N \times M)$.
# Finally data, $\mathbf{d}$, can be written as a linear equation:
#
# $$ \mathbf{d} = \mathbf{G}\mathbf{m}$$
#
# where $\mathbf{m}$ is an inversion model; this is a column vector ($M \times 1$).
#
# In real measurments, there will be various noise sources, and hence observation, $\mathbf{d}^{obs}$, can be written as
#
# $$ \mathbf{d}^{obs} = \mathbf{G}\mathbf{m} + \mathbf{noise}$$
# + [markdown] iooxa={"id": {"block": "gJYiOuHNv1wwsW0YhJOi", "project": "VNMrkxzChhdveZyf6lmb", "version": 3}}
#
# ## Step 1: Create a model, $\mathbf{m}$
#
# The model $m$ is a function defined on the interval [0,1] and discretized into $M$ equal intervals. It is the sum of a: (a) background $m_{background}$, (b) box car $m1$ and (c) Gaussian $m2$.
#
# - `m_background` : background value
#
# The box car is defined by
# - `m1` : amplitude
# - `m1_center` : center
# - `m1_width` : width
#
# The Gaussian is defined by
# - `m2` : amplitude
# - `m2_center` : center
# - `m2_sigma` : width of Gaussian (as defined by a standard deviation $\epsilon$)
# - `M` : number of model parameters
#
# + iooxa={"id": {"block": "wqWKYP3rp97i8uOzVF0I", "project": "VNMrkxzChhdveZyf6lmb", "version": 16}, "outputId": {"block": "pHWnVVIhOCtrN9paGv8Y", "project": "VNMrkxzChhdveZyf6lmb", "version": 16}}
Q_model = app.interact_plot_model()
# + iooxa={"id": {"block": "LgHPcvS6Uqvlo6TVmLsb", "project": "VNMrkxzChhdveZyf6lmb", "version": 8}, "outputId": {"block": "8WfrIYP5O9W6zakEQaa3", "project": "VNMrkxzChhdveZyf6lmb", "version": 7}}
app.return_axis = True
ax = app.plot_model_only(
m_background = 0.,
m1 = 1,
m1_center = 0.2,
dm1 = 0.2,
m2 = 2,
m2_center = 0.75,
sigma_2 = 0.07,
M=100
)
ax.set_xlabel("x")
app.return_axis = False
# + [markdown] iooxa={"id": {"block": "0zUv3gNqulptgg5RQABq", "project": "VNMrkxzChhdveZyf6lmb", "version": 3}}
# ## Step 2: Generate a sensitivity matrix, $\mathbf{G}$
#
# By using the following app, we explore each row vector, $\mathbf{g}_j$, of the kernel or sensitivity matrix , $\mathbf{G}$. Parameters of the apps are:
#
# - `M`: number of model parameters
# - `N`: number of data
# - `pmin`, `pmax`: minimum and maximum of the $M$-length range of decaying constant values (<0)
# - `qmin`, `qmax`: minimum and maximum of the $M$-length range of oscillating constant values (>0)
# - `ymin`, `ymax`: minimum and maximum of the y-axis
# + iooxa={"id": {"block": "x9PxRUeZbvYukM3fIzi0", "project": "VNMrkxzChhdveZyf6lmb", "version": 20}, "outputId": {"block": "xjP1IRGiznblm7UECCWU", "project": "VNMrkxzChhdveZyf6lmb", "version": 20}}
Q_kernel = app.interact_plot_G()
# + iooxa={"id": {"block": "Ydin77Yae3vj4VPzgonN", "project": "VNMrkxzChhdveZyf6lmb", "version": 12}, "outputId": {"block": "2btzRAPgnmItoV2GkvSV", "project": "VNMrkxzChhdveZyf6lmb", "version": 10}}
#plot for 3 kernels
app.return_axis = True
axs = app.plot_G(
N=3,
M=100,
pmin=0,
pmax=-2,
qmin=1,
qmax=3,
scale='log',
fixed=False,
ymin=-0.005,
ymax=0.011,
)
axs[0].set_title("kernel functions")
app.return_axis = False;
# + [markdown] iooxa={"id": {"block": "B3aFbgUWKTzmqttCPDu3", "project": "VNMrkxzChhdveZyf6lmb", "version": 2}}
# ## Step 3: Simulate data, $\mathbf{d}=\mathbf{Gm}$, and add noise
#
# The $j$-th datum is the inner product of the $j$-th kernel $g_j(x)$ and the model $m(x)$. In discrete form it can be written as the dot product of the vector $\mathbf{g}_j$ and the model vector $\mathbf{m}$.
#
# $$ d_j = \mathbf{g}_j \mathbf{m} $$
#
# If there are $N$ data, these data can be written as a column vector, $\mathbf{d}$:
#
# \begin{align}
# \mathbf{d} = \mathbf{G}\mathbf{m} =
# \begin{bmatrix}
# d_1\\
# \vdots\\
# d_{N}
# \end{bmatrix}
# \end{align}
#
# ### Adding Noise
#
# Observational data are always contaminated with noise. Here we add Gaussian noise $N(0,\epsilon)$ (zero mean and standard deviation $\epsilon$). Here we choose
#
# $$ \epsilon = \% |d| + \text{floor} $$
# + iooxa={"id": {"block": "Br1aHzGcS21VMQq0PxAG", "project": "VNMrkxzChhdveZyf6lmb", "version": 19}, "outputId": {"block": "BC1pTU9FgoP59URMHq1j", "project": "VNMrkxzChhdveZyf6lmb", "version": 19}}
app.reset_to_defaults()
Q_data = app.interact_plot_data()
# + iooxa={"id": {"block": "ZATX9KZ0O9drhzdzEFg9", "project": "VNMrkxzChhdveZyf6lmb", "version": 9}, "outputId": {"block": "f4ZOfRjwPn87rmRQmgT3", "project": "VNMrkxzChhdveZyf6lmb", "version": 8}}
#plot accurate data
app.return_axis = True
ax = app.plot_data_only(
add_noise=True,
percentage=0,
floor=0.0,
)
app.return_axis = False
# + [markdown] iooxa={"id": {"block": "Kfas1vohMX9B1w8haaeF", "project": "VNMrkxzChhdveZyf6lmb", "version": 3}}
# ## Composite Widget for Forward Modelling
# + iooxa={"id": {"block": "gpeg40wOdccGxZMMBIxU", "project": "VNMrkxzChhdveZyf6lmb", "version": 19}, "outputId": {"block": "yFInpwVxV9MsRBuL5pNr", "project": "VNMrkxzChhdveZyf6lmb", "version": 19}}
app.interact_plot_all_three_together()
# + iooxa={"id": {"block": "Kv4yD6mzwtWFn0qUtwbm", "project": "VNMrkxzChhdveZyf6lmb", "version": 15}, "outputId": {"block": "gqkXJ3NUlhOxm8lbUq2p", "project": "VNMrkxzChhdveZyf6lmb", "version": 14}}
# Default parameters: accurate data
app.return_axis = True
axs = app.plot_model(
m_background = 0,
m1 = 1,
m2 = 2,
m1_center = 0.2,
dm1 = 0.2,
m2_center = 0.75,
sigma_2 = 0.07,
percentage = 0,
floor = 0.0,
pmin=-0.25,
pmax=-3,
qmin=0.,
qmax=5,
)
axs[0].set_title("Model")
app.return_axis = False
# + iooxa={"id": {"block": "b6Ur3a4kwpT5Q3xRbUlN", "project": "VNMrkxzChhdveZyf6lmb", "version": 11}, "outputId": {"block": "hfAQ1kZIBBXKDAlHonfs", "project": "VNMrkxzChhdveZyf6lmb", "version": 9}}
# Default parameters: noisey data
app.return_axis = True
axs = app.plot_model(
m_background = 0,
m1 = 1,
m2 = 2,
m1_center = 0.2,
dm1 = 0.2,
m2_center = 0.75,
sigma_2 = 0.07,
percentage = 0,
floor = 0.03,
pmin=-0.25,
pmax=-3,
qmin=0.,
qmax=5,
)
axs[2].set_title("Noisy data")
app.return_axis = False
# + [markdown] iooxa={"id": {"block": "I5cRNN1g9FcrGvbH7UKL", "project": "VNMrkxzChhdveZyf6lmb", "version": 2}}
# ## Inverse Problem
#
# In the inverse problem we attempt to find the model $\mathbf{m}$ that gave rise to the observational data $\mathbf{d}^{obs}$. The inverse problem is formulated as an optimization problem:
#
#
# $$\text{minimize} \ \ \ \phi(\mathbf{m}) = \phi_d(\mathbf{m}) + \beta \phi_m(\mathbf{m}) $$
#
# where
#
# - $\phi_d$: data misfit
# - $\phi_m$: model regularization
# - $\beta$: trade-off (Tikhonov) parameter $0<\beta<\infty$
#
# Data misfit is defined as
#
# $$ \phi_d = \sum_{j=1}^{N}\Big(\frac{\mathbf{g}_j\mathbf{m}-d^{obs}_j}{\epsilon_j}\Big)^2$$
#
# where $\epsilon_j$ is an estimate of the standard deviation of the $j$th datum.
#
# The model regularization term, $\phi_m$, can be written as
#
# $$ \phi_m(\mathbf{m}) = \alpha_s \int \left(\mathbf{m}-\mathbf{m}_{ref}\right)^2 dx + \alpha_x \int \left(\frac{d \mathbf{m}}{dx}\right)^2 dx$$
#
# The first term is referred to as the "smallness" term. Minimizing this generates a model that is close to a reference model $\mathbf{m}_{ref}$. The second term penalizes roughness of the model. It is generically referred to as a "flattest" or "smoothness" term.
# + [markdown] iooxa={"id": {"block": "j7hVPYIA73jbxv0bAinV", "project": "VNMrkxzChhdveZyf6lmb", "version": 4}}
# ## Step 4: Invert the data, and explore inversion results
#
# In the inverse problem we define parameters needed to evaluate the data misfit and the model regularization terms. We then deal with parameters associated with the inversion.
#
# ### Parameters
#
# - `mode`: `Run` or `Explore`
# - `Run`: Each click of the app, will run `n_beta` inversions
# - `Explore`: Not running inversions, but explore result of the previously run inversions
#
# #### Misfit
# - `percent`: estiamte uncertainty as a percentage of the data (%)
#
# - `floor`: estimate uncertainty floor
#
# - `chifact`: chi factor for stopping criteria (when $\phi_d^{\ast}=N \rightarrow$ `chifact=1`)
#
# #### Model norm
# - `mref`: reference model
#
# - `alpha_s`: $\alpha_s$ weight for smallness term
#
# - `alpha_x`: $\alpha_x$ weight for smoothness term
#
# #### Beta
# - `beta_min`: minimum $\beta$
#
# - `beta_max`: maximum $\beta$
#
# - `n_beta`: the number of $\beta$
#
# #### Plotting options
#
# - `data`: `obs & pred` or `normalized misfit`
# - `obs & pred`: show observed and predicted data
# - `normalized misfit`: show normalized misfit
#
# - `tikhonov`: `phi_d & phi_m` or `phi_d vs phi_m`
# - `phi_d & phi_m`: show $\phi_d$ and $\phi_m$ as a function of $\beta$
# - `phi_d vs phi_m`: show tikhonov curve
#
# - `i_beta`: i-th $\beta$ value
#
# - `scale`: `linear` or `log`
# - `linear`: linear scale for plotting the third panel
# - `log`: log scale for plotting the third panel
# + iooxa={"id": {"block": "zYsKlEza7On1cZcJUO1x", "project": "VNMrkxzChhdveZyf6lmb", "version": 14}, "outputId": {"block": "XHsuSTVmGPtaRtV3WO9m", "project": "VNMrkxzChhdveZyf6lmb", "version": 14}}
app.interact_plot_inversion()
# + iooxa={"id": {"block": "Xv29hSrdiN6k8ilityUE", "project": "VNMrkxzChhdveZyf6lmb", "version": 18}, "outputId": {"block": "ejAaQ7Q2KvAmHkRE6dwa", "project": "VNMrkxzChhdveZyf6lmb", "version": 18}}
app.return_axis = True
axs = app.plot_inversion(
mode="Run", #"Explore"
mref=0.0,
percentage=app.percentage,
floor=app.floor,
beta_min=1e-3,
beta_max=1e5,
n_beta=81,
alpha_s=1,
alpha_x=0,
tikhonov="phi_d & phi_m",
data_option="obs & pred",
scale="log",
i_beta=0,
chifact=1,
)
# axs[2].set_title('Hello!')
app.return_axis = False
| linear-inversion-notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -
# # Ensemble learning
#
# - Data https://archive.ics.uci.edu/ml/datasets/letter+recognition
# - URL https://www.pluralsight.com/guides/ensemble-modeling-scikit-learn
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
from sklearn import model_selection
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
# ### Load data
df = pd.read_csv('data/letter-recognition.data')
print(df.shape)
df.head(10)
# ### Create arrays for the features and the response variable
y = df['T'].values
x = df.drop('T', axis=1).values
# ### Logistic Regression
# +
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.3)
logreg = LogisticRegression(max_iter=10000)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# -
# ### Bagged Decision Trees for Classification
kfold = model_selection.KFold(n_splits=10, shuffle=True)
model_1 = BaggingClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, random_state=10)
results_1 = model_selection.cross_val_score(model_1, x, y, cv=kfold)
print(results_1.mean())
# ### Random Forest Classification
kfold_rf = model_selection.KFold(n_splits=10, shuffle=True)
model_rf = RandomForestClassifier(n_estimators=100, max_features=5)
results_rf = model_selection.cross_val_score(model_rf, x, y, cv=kfold_rf)
print(results_rf.mean())
# ### Adaptive Boosting or AdaBoost
kfold_ada = model_selection.KFold(n_splits=10, shuffle=True)
model_ada = AdaBoostClassifier(n_estimators=30, random_state=10)
results_ada = model_selection.cross_val_score(model_ada, x, y, cv=kfold_ada)
print(results_ada.mean())
# ### Stochastic Gradient Boosting
kfold_sgb = model_selection.KFold(n_splits=10, shuffle=True)
model_sgb = GradientBoostingClassifier(n_estimators=100, random_state=10)
results_sgb = model_selection.cross_val_score(model_sgb, x, y, cv=kfold_sgb)
print(results_sgb.mean())
# ### Stacking/Voting Ensemble
# +
kfold_vc = model_selection.KFold(n_splits=10, shuffle=True)
estimators = []
mod_lr = LogisticRegression(max_iter=1000)
estimators.append(('logistic', mod_lr))
mod_dt = DecisionTreeClassifier()
estimators.append(('cart', mod_dt))
mod_sv = SVC()
estimators.append(('svm', mod_sv))
ensemble = VotingClassifier(estimators)
results_vc = model_selection.cross_val_score(ensemble, x, y, cv=kfold_vc)
print(results_vc.mean())
| cvicenia/notebooks/tyzden-08/IAU_ensemble.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BellaZeng2019/pianoclassifier/blob/master/pianoclassifier_prod.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="5Kea6QQmnw39"
#hide
# !pip install -Uqq fastbook
# !pip install torchtext==0.8.1
import fastbook
fastbook.setup_book()
from fastbook import *
# + id="GssvrYaHMd95"
from fastai.vision.all import *
from fastai.vision.widgets import *
# + id="xfemxfUbnn_i"
# + [markdown] id="Wv4U_aHfNORT"
# # The Piano Classifier
# Upright pianos: Upright pianos are generally 48” and over. These
# pianos are the choice of players looking for grand piano type sound without taking up as much space.
#
# ---
#
#
# Grand pianos: Grand pianos are characterized by the way they are constructed. The strings and soundboard lay horizontally and the hammer that strikes the string swings up and then falls back to rest. Grand pianos generally offer more sound projection and faster key repetition than vertical pianos.
#
#
# ---
# Digital pianos: Digital pianos come in many styles. From slim upright designs to a range of grand piano sizes, there is a digital for most price points and feature needs.
# High quality digital pianos offer a tremendous value with progressive hammer actions which require no regulating and concert grand quality sound without tuning.
# Digital pianos also offer other features for education, variety of voices, customizable piano sounds, and the ability to control the volume or even wear headphones.
#
#
# + id="FOD6wDFnNDsl" colab={"base_uri": "https://localhost:8080/"} outputId="e6b007bd-622f-4ae9-9747-222cfa295ba0"
#hide
# %cd gdrive/MyDrive/Colab\ Notebooks/fastai_exercises
# + id="3kwjaMqPOVAJ"
path=Path()
learn_inf = load_learner(path/'export.pkl', cpu=True)
btn_upload = widgets.FileUpload()
out_pl = widgets.Output()
lbl_pred = widgets.Label()
# + id="bGITVaW0PC5i"
def on_click(change):
img = PILImage.create(btn_upload.data[-1])
out_pl.clear_output()
with out_pl: display(img.to_thumb(128,128))
pred,pred_idx, probs = learn_inf.predict(img)
lbl_pred.value=f'Prediction: {pred}; Probability: {probs[pred_idx]: 0.04f}'
# + id="58Jpl7MdP_9C"
btn_upload.observe(on_click, names=['data'])
# + colab={"base_uri": "https://localhost:8080/", "height": 228, "referenced_widgets": ["a6f56f65d8b24dbf83e44c3b33023c01", "46993136e5f34d80932eb79aa6d2e840", "2d2f2ccf265946998b279601dec24918", "cf41c8bc3f0d44ce91f8be4d12a8a363", "<KEY>", "c65fd3f2dda94306b54ce933dd7af3ca", "be62de80837245d0996c6039743a2ab6", "<KEY>", "5fd4e0bf901a4d34a445a9953ff811e4", "79f7931d4d8e4d0d814a35d9cee64c8b", "<KEY>", "737a4babe380438495e3eecc35f9a0e7", "eca1d8c570f54b48ac0cd09500e0a891"]} id="a3gPQdrUQPFa" outputId="5b6c0bdd-e6c8-4769-abb7-47731a70a845"
display(VBox([widgets.Label('Select your piano'), btn_upload, out_pl, lbl_pred]))
# + id="d8RGsauNQf4_"
# + id="wFaDWY3-SkML"
| _notebooks/2020-04-06-pianoclassifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scikit-learn from CSV
#
# This notebook reads the CSV data written out by the Dataflow program of [1_explore.ipynb](./1_explore.ipynb) and trains a scikit-learn model on Cloud ML Engine.
#
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
# + language="bash"
# gcloud config set project $PROJECT
# gcloud config set compute/region $REGION
# + language="bash"
# if ! gsutil ls | grep -q gs://${BUCKET}/; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# fi
# + language="bash"
# pip install cloudml-hypertune
# -
# ## Train sklearn model locally
# + language="bash"
# OUTDIR=skl_trained
# DATADIR=${PWD}/preproc/csv
# rm -rf $OUTDIR
# gcloud ml-engine local train \
# --module-name=trainer.train_skl --package-path=${PWD}/ltgpred/trainer \
# -- \
# --job-dir=$OUTDIR --train_data=${DATADIR}/train* --eval_data=${DATADIR}/eval*
# -
# ## Training sklearn model on CMLE
# %writefile largemachine.yaml
trainingInput:
scaleTier: CUSTOM
masterType: complex_model_l
# + language="bash"
# OUTDIR=gs://${BUCKET}/lightning/skl_trained
# DATADIR=gs://$BUCKET/lightning/preproc_0.02_32_2/csv
# JOBNAME=ltgpred_skl_$(date -u +%y%m%d_%H%M%S)
# gsutil -m rm -rf $OUTDIR
# gcloud alpha ml-engine jobs submit training $JOBNAME \
# --module-name=ltgpred.trainer.train_skl --package-path=${PWD}/ltgpred --job-dir=$OUTDIR \
# --region=${REGION} --scale-tier=custom --config=largemachine.yaml \
# --python-version=3.5 --runtime-version=1.8 \
# -- \
# --train_data=${DATADIR}/train-001* --eval_data=${DATADIR}/eval-0000*
# -
# When I ran it, training finished with a RMSE=0.34.
# This can serve as a benchmark.
#
# Note, however, that I trained and evaluated on a subset of the data, since even the "largemachine" doesn't have the memory needed to hold entire dataset
# Copyright 2018 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| blogs/lightning/2_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 1: Introduction
# [Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2020)
#
# [<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2020/master?urlpath=lab/tree/01/Introduction.ipynb)
# 1. [Solve the consumer problem](#Solve-the-consumer-problem)
# 2. [Simulate the AS-AD model](#Simulate-the-AS-AD-model)
# 3. [Using modules](#Using-modules)
#
# **Summary:** The Jupyter notebook is a document with text, code and results.
# This is a text cell, or more precisely a *markdown* cell.
#
# * Pres <kbd>Enter</kbd> to *edit* the cell.
# * Pres <kbd>Ctrl+Enter</kbd> to *run* the cell.
# * Pres <kbd>Shift+Enter</kbd> to *run* the cell + advance.
# We can make lists:
#
# 1. **First** item
# 2. *Second* item
# 3. ~~Third~~ item
# We can also do LaTeX math, e.g. $\alpha^2$ or
# $$
# X = \int_0^{\infty} \frac{x}{x+1} dx
# $$
# +
# this is a code cell
# let us do some calculations
a = 2
b = 3
c = a+b
# lets print the results (shown below the cell)
print(c)
# -
# We can now write some more text, and continue with our calculations.
d = c*2
print(d)
# **Note:** Despite JupyterLab is running in a browser, it is running offline (the path is something like *localhos:8888/lab*).<br>
# **Binder:** The exception is if you use *binder*, then JupyterLab wil run in the cloud, and the path will begin with *hub.mybinder.org*:
#
# [<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2020/master?urlpath=lab/tree/01/Introduction.ipynb)
# **Note:** *You cannot save your result when using binder*.
# <a id="Solve-the-consumer-problem"></a>
#
# # 1. Solve the consumer problem
# Consider the following consumer problem:
# $$
# \begin{aligned}
# V(p_{1},p_{2},I) & = \max_{x_{1},x_{2}} x_{1}^{\alpha}x_{2}^{1-\alpha}\\
# & \text{s.t.}\\
# p_{1}x_{1}+p_{2}x_{2} & \leq I,\,\,\,p_{1},p_{2},I>0\\
# x_{1},x_{2} & \geq 0
# \end{aligned}
# $$
# We can solve this problem _numerically_ in a few lines of code.
# 1. Choose some **parameters**:
alpha = 0.5
I = 10
p1 = 1
p2 = 2
# 2. The **consumer objective** is:
def value_of_choice(x1,alpha,I,p1,p2):
# a. all income not spent on the first good
# is spent on the second
x2 = (I-p1*x1)/p2
# b. the resulting utility is
utility = x1**alpha * x2**(1-alpha)
return utility
# 3. We can now use a function from the *scipy* module to **solve the consumer problem**.
# +
# a. load external module from scipy
from scipy import optimize
# b. make value-of-choice as a funciton of only x1
obj = lambda x1: -value_of_choice(x1,alpha,I,p1,p2)
# c. call minimizer
solution = optimize.minimize_scalar(obj,bounds=(0,I/p1))
# d. print result
x1 = solution.x
x2 = (I-x1*p1)/p2
print(x1,x2)
# -
# **Task**: Solve the consumer problem with the CES utility funciton.
#
# $$
# u(x_1,x_2) = (\alpha x_1^{-\beta} + (1-\alpha) x_2^{-\beta})^{-1/\beta}
# $$
# +
# a. choose parameters
alpha = 0.5
beta = 0.000001
I = 10
p1 = 1
p2 = 2
# b. value-of-choice
def value_of_choice_ces(x1,alpha,beta,I,p1,p2):
x2 = (I-p1*x1)/p2
if x1 > 0 and x2 > 0:
utility = (alpha*x1**(-beta)+(1-alpha)*x2**(-beta))**(-1/beta)
else:
utility = 0
return utility
# c. objective
obj = lambda x1: -value_of_choice_ces(x1,alpha,beta,I,p1,p2)
# d. solve
solution = optimize.minimize_scalar(obj,bounds=(0,I/p1))
# e. result
x1 = solution.x
x2 = (I-x1*p1)/p2
print(x1,x2)
# -
# <a id="Simulate-the-AS-AD-model"></a>
#
# # 2. Simulate the AS-AD model
# Consider the following AS-AD model:
#
# $$
# \begin{aligned}
# \hat{y}_{t} &= b\hat{y}_{t-1}+\beta(z_{t}-z_{t-1})-a\beta s_{t}+a\beta\phi s_{t-1} \\
# \hat{\pi}_{t} &= b\hat{\pi}_{t-1}+\beta\gamma z_{t}-\beta\phi\gamma z_{t}+\beta s_{t}-\beta\phi s_{t-1} \\
# z_{t} &= \delta z_{t-1}+x_{t}, x_{t} \sim N(0,\sigma_x^2) \\
# s_{t} &= \omega s_{t-1}+c_{t}, c_{t} \sim N(0,\sigma_c^2) \\
# b &= \frac{1+a\phi\gamma}{1+a\gamma} \\
# \beta &= \frac{1}{1+a\gamma}
# \end{aligned}
# $$
#
# where $\hat{y}_{t}$ is the output gap, $\hat{\pi}_{t}$ is the inflation gap, $z_{t}$ is a AR(1) demand shock, and $\hat{s}_{t}$ is a AR(1) supply shock.
# 1. Choose **parameters**:
a = 0.4
gamma = 0.1
phi = 0.9
delta = 0.8
omega = 0.15
sigma_x = 1
sigma_c = 0.2
T = 100
# 2. Calculate **combined parameters**:
b = (1+a*phi*gamma)/(1+a*gamma)
beta = 1/(1+a*gamma)
# 3. Define **model functions**:
y_hat_func = lambda y_hat_lag,z,z_lag,s,s_lag: b*y_hat_lag + beta*(z-z_lag) - a*beta*s + a*beta*phi*s_lag
pi_hat_func = lambda pi_lag,z,z_lag,s,s_lag: b*pi_lag + beta*gamma*z - beta*phi*gamma*z_lag + beta*s - beta*phi*s_lag
z_func = lambda z_lag,x: delta*z_lag + x
s_func = lambda s_lag,c: omega*s_lag + c
# 4. Run the **simulation**:
# +
import numpy as np
# a. set setup
np.random.seed(2015)
# b. allocate simulation data
x = np.random.normal(loc=0,scale=sigma_x,size=T)
c = np.random.normal(loc=0,scale=sigma_c,size=T)
z = np.zeros(T)
s = np.zeros(T)
y_hat = np.zeros(T)
pi_hat = np.zeros(T)
# c. run simulation
for t in range(1,T):
# i. update z and s
z[t] = z_func(z[t-1],x[t])
s[t] = s_func(s[t-1],c[t])
# ii. compute y og pi
y_hat[t] = y_hat_func(y_hat[t-1],z[t],z[t-1],s[t],s[t-1])
pi_hat[t] = pi_hat_func(pi_hat[t-1],z[t],z[t-1],s[t],s[t-1])
# -
# 5. **Plot** the simulation:
# +
import matplotlib.pyplot as plt
# %matplotlib inline
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(y_hat,label='$\\hat{y}$')
ax.plot(pi_hat,label='$\\hat{pi}$')
ax.set_xlabel('time')
ax.set_ylabel('percent')
ax.set_ylim([-8,8])
ax.legend(loc='upper left');
# -
# I like the **seaborn style**:
# +
plt.style.use('seaborn')
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(y_hat,label='$\\hat{y}$')
ax.plot(pi_hat,label='$\\hat{pi}$')
ax.set_xlabel('time')
ax.set_ylabel('percent')
ax.set_ylim([-8,8])
ax.legend(loc='upper left',facecolor='white',frameon='True');
# -
# <a id="Using-modules"></a>
#
# # 3. Using modules
# A **module** is a **.py**-file with functions you import and can then call in the notebook.
#
# Try to open **mymodule.py** and have a look.
import mymodule
x = 5
y = mymodule.myfunction(5)
print(y)
| web/01/Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Atrial Fibrillation Dashboard Creation <a id="top"></a>
# #### Table of Contents
# * [Set-up](#setup)
# * [App Features](#features)
# + [Upload Button](#upload)
# + [Calculation Form](#form)
# + [Score Display Cards](#cards)
# + [Risk Score Comparison Graph](#compare)
# + [Miniature Display Cards](#minicards)
# + [AFRI Results Tab](#afri)
# + [CHADS Results Tab](#chads)
# + [POAF Results Tab](#poaf)
# + [NPOAF Results Tab](#npoaf)
# + [Simplified POAF Results Tab](#simplified)
# + [COM-AF Results Tab](#comaf)
# * [App Layout](#layout)
# * [App Callbacks and Configuration](#callbacks)
# #### Set-up <a id="setup"></a>
# ##### Import necessary packages
from jupyter_dash import JupyterDash
import dash
from waitress import serve
from dash import Dash, html, dcc
import dash_bootstrap_components as dbc
from dash_bootstrap_templates import load_figure_template
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import statsmodels.api as sm
# ##### Define the app and set the style guide
# +
external_stylesheets = [dbc.themes.LUX]
app = JupyterDash(__name__, external_stylesheets=external_stylesheets, assets_external_path='assets')
# -
# [return to top](#top)
# #### App Features <a id="features"></a>
# ##### Add the text for the hover Tooltips
# +
#Dataset specification requirements explanation
upload_tip = html.Div([
html.P("Upload your own reference dataset to compare your patient to a well-matched population"),
html.P("File types accepted: .csv, .txt, .xlsx"),
html.P("Data should have columns for each calculated score and the atrial fibrillation outcome"),
html.P("To fit the parameters of the dashboard, column names should be as follows"
" (if using other column names, you must adjust code accordingly): "
"'afri', 'chads', 'poaf', 'npoaf', 'simplified', 'comaf', 'AF'")
])
#AFRI calculation explanation
afri_tip = html.Div([
html.P("Score Range: 0-4"),
html.P("Males:"),
html.P("Age 60+ years: +1"),
html.P("Weight 76+ kg: +1"),
html.P("Height 176+ kg: +1"),
html.P("Peripheral Vascular Disease: +1"),
html.P("Females:"),
html.P("Age 66+ years: +1"),
html.P("Weight 64+ kg: +1"),
html.P("Height 168+ kg: +1"),
html.P("Peripheral Vascular Disease: +1"),
])
#CHA2DS2-VASc calculation explanation
chads_tip = html.Div([
html.P("Score Range: 0-8"),
html.P("Age 65-74 years: +1"),
html.P("Age 75+ years: +2"),
html.P("Congestive Heart Failure: +1"),
html.P("High Blood Pressure: +1"),
html.P("Diabetes: +1"),
html.P("History of Stroke: +1"),
html.P("Peripheral Vascular Disease: +1"),
html.P("Female: +1"),
])
#POAF calculation explanation
poaf_tip = html.Div([
html.P("Score Range: 0-9"),
html.P("Age 60-69 years: +1"),
html.P("Age 70-79 years: +2"),
html.P("Age 80+ years: +3"),
html.P("COPD: +1"),
html.P("eGFR<15 or on dialysis: +1"),
html.P("Emergent surgery: +1"),
html.P("Pre-operative intra-aortic balloon pump: +1"),
html.P("Left ventricular ejection fraction < 30%: +1"),
html.P("Valve surgery: +1"),
])
#NPOAF calculation explanation
npoaf_tip = html.Div([
html.P("Score Range: 0-7"),
html.P("Age 65-74 years: +2"),
html.P("Age 75+ years: +3"),
html.P("Mild mitrovalve disease: +1"),
html.P("Moderate to severe mitral valve disease: +3"),
html.P("Left atrial dilation: +1"),
])
#Simplified POAF calculation explanation
simplified_tip = html.Div([
html.P("Score Range: 0-7"),
html.P("Age 65+ years: +2"),
html.P("High Blood Pressure: +2"),
html.P("Myocardial Infarction: +1"),
html.P("Congestive Heart Failure: +2"),
])
#COM-AF calculation explanation
comaf_tip = html.Div([
html.P("Score Range: 0-6"),
html.P("Age 65-74 years: +1"),
html.P("Age 75+ years: +2"),
html.P("Female: +1"),
html.P("High Blood Pressure: +1"),
html.P("Diabetes: +1"),
html.P("History of Stroke: +1"),
])
# -
# ##### Create a data upload button <a id="upload"></a>
upload = html.Div([
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'textAlign': 'center',
'margin' : '10px'
},
# Prevent multiple files from being uploaded
multiple=False
),
dbc.Tooltip(
upload_tip,
target='upload-data',
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
# ##### Store the data locally
store_data = dcc.Store(id='input-dataset', storage_type='local')
# ##### Create a form to calculate new risk scores <a id="form"></a>
# +
age_input = html.Div([
dbc.Input(id='age-state', type='number')
])
gender_input = html.Div([
dbc.RadioItems(
options=[
{'label':'Male', 'value':'M'},
{'label':'Female', 'value':'F'}
],
value='M',
id='gender-state',
)
])
vitals_input = html.Div([
dbc.Input(placeholder='Input weight (kg)', id='weight-state', type='number'),
dbc.Input(placeholder='Input height (cm)', id='height-state', type='number'),
dbc.Input(placeholder='Input ejection fraction (percent)',id='ef-state', type='number'),
dbc.Input(placeholder='Input eGFR',id='eGFR-state', type='number'),
dbc.Checklist(
options=[{'label':'Emergency', 'value':1}],
id='emergency-state',
switch=True
)
])
conditions_input = html.Div([
dbc.Label("Select all that apply", html_for="vitals-list"),
dbc.Checklist(
options=[
{"label": "COPD", "value": 'copd'},
{"label": "Hypertension", "value": 'hbp'},
{"label": "Diabetes Mellitus", "value": 'dm'},
{"label": "Congestive Heart Failure", "value": 'chf'},
{"label": "Left Ventricular Dysfunction", "value": 'lvd'},
{"label": "History of Stroke", "value": 'stroke'},
{"label": "Peripheral Vascular Disease", "value": 'pvd'},
{"label": "Vascular Disease", "value": 'vd'},
{"label": "Left Atrial Dilation", "value": 'lad'},
{"label": "Mild Mitral Valve Disease", "value": 'mmvd'},
{"label": "Mod-to-Severe Mitral Valve Disease", "value": 'smvd'},
{"label": "Myocardial Infarction", "value":'mi'}
],
id="conditions-state",
label_checked_style={"color": "success"}
),
])
procedures_input = html.Div([
dbc.Label("Select all that apply", html_for="conditions-list"),
dbc.Checklist(
options=[
{"label": "Intra-aortic Balloon Pump", "value": 'iabp'},
{"label": "Combined Valve/Artery Surgery", "value": 'cvas'},
{"label": "Dialysis", "value": 'dialysis'},
],
id="procedures-state",
label_checked_style={"color": "success"}
),
])
accordion = html.Div(
dbc.Accordion(
[
dbc.AccordionItem(
[age_input],
title="Age",
item_id="item-1",
),
dbc.AccordionItem(
[gender_input],
title="Gender",
item_id="item-2",
),
dbc.AccordionItem(
[vitals_input],
title="Vitals",
item_id="item-3",
),
dbc.AccordionItem(
[conditions_input],
title="Underlying Conditions",
item_id="item-4",
),
dbc.AccordionItem(
[procedures_input],
title="Procedures",
item_id="item-5",
),
],
active_item="item-4",
),
style={'margin-left' : '10px ', 'margin-top': '10px'}
)
calculate_button = html.Div(
dbc.Button(
"Calculate",
id="submit-button",
className="button",
n_clicks=0,
outline=True,
color="secondary"
),
style={'margin-left' : '10px ', 'margin-top': '10px', 'margin-bottom': '10px'}
)
# -
# ##### Store the calculated values locally
afri_state = dcc.Store(id='afri-state', storage_type='local')
chads_state = dcc.Store(id='chads-state', storage_type='local')
poaf_state = dcc.Store(id='poaf-state', storage_type='local')
npoaf_state = dcc.Store(id='npoaf-state', storage_type='local')
simplified_state = dcc.Store(id='simplified-state', storage_type='local')
comaf_state = dcc.Store(id='comaf-state', storage_type='local')
# ##### Create display cards for the calculated risk scores <a id="cards"></a>
# +
### --> AFRI Card
card1 = html.Div([
dbc.Card(
dbc.CardBody(
[
html.H4(id='afri-card', className="card-val1"),
html.P(
["Atrial Fibrillation Risk Index"],
className="card-text1",
style={'textAlign': 'center'}
)
]),
style={
'margin-right' : '10px',
'margin-top': '10px',
}, id='afri-tip'
),
dbc.Tooltip(
afri_tip,
target="afri-tip",
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
### --> CHADS Card
card2 = html.Div([
dbc.Card(
dbc.CardBody(
[
html.H4(id='chads-card', className="card-val2"),
html.P(
["CHA2DS2-VASc Score"],
className="card-text2",
style={'textAlign': 'center'}
)
]),
style={
'margin-right' : '10px',
'margin-top': '10px',
}, id='chads-tip'
),
dbc.Tooltip(
chads_tip,
target="chads-tip",
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
### --> POAF Card
card3 = html.Div([
dbc.Card(
dbc.CardBody(
[
html.H4(id='poaf-card', className="card-val3"),
html.P(
["Postoperative Atrial Fibrillation Score"],
className="card-text3",
style={'textAlign': 'center'}
)
]),
style={
'margin-right' : '10px',
'margin-top': '10px',
}, id='poaf-tip'
),
dbc.Tooltip(
poaf_tip,
target="poaf-tip",
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
### --> NPOAF Card
card4 = html.Div([
dbc.Card(
dbc.CardBody(
[
html.H4(id='npoaf-card', className="card-val4"),
html.P(
["New-onset Postoperative Atrial Fibrillation Score"],
className="card-text4",
style={'textAlign': 'center'}
)
]),
style={
'margin-right' : '10px',
'margin-top': '10px',
}, id='npoaf-tip'
),
dbc.Tooltip(
npoaf_tip,
target="npoaf-tip",
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
### --> Simplified POAF Card
card5 = html.Div([
dbc.Card(
dbc.CardBody(
[
html.H4(id='simplified-card', className="card-val5"),
html.P(
["Simplified Postoperative Atrial Fibrillation Score"],
className="card-text5",
style={'textAlign': 'center'}
)
]),
style={
'margin-right' : '10px',
'margin-top': '10px',
}, id='simplified-tip'
),
dbc.Tooltip(
simplified_tip,
target="simplified-tip",
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
### --> COM-AF Card
card6 = html.Div([
dbc.Card(
dbc.CardBody(
[
html.H4(id='comaf-card', className="card-val6"),
html.P(
["Combined Risk Score to Predict Atrial Fibrillation "],
className="card-text6",
style={'textAlign': 'center'}
)
]),
style={
'margin-right' : '10px',
'margin-top': '10px',
'margin-bottom': '10px'
}, id='comaf-tip'
),
dbc.Tooltip(
comaf_tip,
target="comaf-tip",
placement='left',
style={'color':'white', 'textAlign':'left'}
)
])
# -
# ##### Create a dropdown menu for the risk score comparison graph <a id="compare"></a>
dropdowns = html.Div([
html.P("x-axis: ", className="crossfilter-xaxis-label", style={'margin-left': '10px'}),
dcc.Dropdown(
id='crossfilter-xaxis-column',
options=[
{'label': 'AFRI', 'value': 'afri'},
{'label': 'CHA2DS2-VASc', 'value': 'chads'},
{'label': 'POAF', 'value': 'poaf'},
{'label': 'NPOAF', 'value': 'poaf'},
{'label': 'Simplified', 'value': 'simplified'},
{'label': 'COM-AF', 'value': 'comaf'}
],
value='afri',
style={'margin-left': '5px'}
),
html.P("y-axis: ", className="crossfilter-yaxis-label", style={'margin-left': '10px'}),
dcc.Dropdown(
id='crossfilter-yaxis-column',
options=[
{'label': 'AFRI', 'value': 'afri'},
{'label': 'CHA2DS2-VASc', 'value': 'chads'},
{'label': 'POAF', 'value': 'poaf'},
{'label': 'NPOAF', 'value': 'npoaf'},
{'label': 'Simplified', 'value': 'simplified'},
{'label': 'COM-AF', 'value': 'comaf'}
],
value='npoaf',
style={'margin-left': '5px'}
)
])
# ##### Create miniature cards to display calculated scores on page 2 <a id="minicards"></a>
# +
### --> AFRI Minicard
minicard1 = dbc.Card(
dbc.CardBody(
[
html.H4(id='afri-mini', className="card-val1"),
html.P(
["AFRI"],
className="card-text1",
style={'textAlign': 'center'}
)
])
)
### --> CHADS Minicard
minicard2 = dbc.Card(
dbc.CardBody(
[
html.H4(id='chads-mini', className="card-val2"),
html.P(
["CHA2DS2-VASc"],
className="card-text2",
style={'textAlign': 'center'}
)
])
)
### --> POAF Minicard
minicard3 = dbc.Card(
dbc.CardBody(
[
html.H4(id='poaf-mini', className="card-val3"),
html.P(
["POAF"],
className="card-text3",
style={'textAlign': 'center'}
)
])
)
### --> NPOAF Minicard
minicard4 = dbc.Card(
dbc.CardBody(
[
html.H4(id='npoaf-mini', className="card-val4"),
html.P(
["NPOAF"],
className="card-text4",
style={'textAlign': 'center'}
)
])
)
### --> Simplified Minicard
minicard5 = dbc.Card(
dbc.CardBody(
[
html.H4(id='simplified-mini', className="card-val5"),
html.P(
["Simplified POAF"],
className="card-text5",
style={'textAlign': 'center'}
)
])
)
### --> COM-AF Minicard
minicard6 = dbc.Card(
dbc.CardBody(
[
html.H4(id='comaf-mini', className="card-val6"),
html.P(
["COM-AF"],
className="card-text6",
style={'textAlign': 'center'}
)
])
)
minicards = html.Div([
dbc.Row([
dbc.Col(minicard1, width=2),
dbc.Col(minicard2, width=2),
dbc.Col(minicard3, width=2),
dbc.Col(minicard4, width=2),
dbc.Col(minicard5, width=2),
dbc.Col(minicard6, width=2)
])
], style={'margin-top': '10px','margin-left': '10px'})
# -
# ##### Create a tab for AFRI results <a id="afri"></a>
# AFRI results card and tab format
### --> output the results on a card
card_afri = html.Div([
dbc.Row([
dbc.Col([
dbc.Card(id="afri-val", style={'margin-right': '10px', 'margin-bottom': '10px'})
],
width=10)
],
justify='center')
])
### --> establish the format for the AFRI tab
afri_tab = html.Div([
html.Div(id="afri-hist", style={'margin-right': '10px', 'margin-bottom': '10px'}),
card_afri
])
# ##### Create a tab for CHADS results <a id="chads"></a>
# CHADS results card and tab format
### --> output the results on a card
card_chads = html.Div([
dbc.Row([
dbc.Col([
dbc.Card(id="chads-val", style={'margin-right': '10px', 'margin-bottom': '10px'})
],
width=10)
],
justify='center')
])
### --> establish the format for the CHADS tab
chads_tab = html.Div([
html.Div(id="chads-hist", style={'margin-right': '10px', 'margin-bottom': '10px'}),
card_chads
])
# ##### Create a tab for POAF results <a id="poaf"></a>
# POAF results card and tab format
### --> output the results on a card
card_poaf = html.Div([
dbc.Row([
dbc.Col([
dbc.Card(id="poaf-val", style={'margin-right': '10px', 'margin-bottom': '10px'})
],
width=10)
],
justify='center')
])
### --> establish the format for the POAF tab
poaf_tab = html.Div([
html.Div(id="poaf-hist", style={'margin-right': '10px', 'margin-bottom': '10px'}),
card_poaf
])
# ##### Create a tab for NPOAF results <a id="npoaf"></a>
# POAF results card and tab format
### --> output the results on a card
card_npoaf = html.Div([
dbc.Row([
dbc.Col([
dbc.Card(id="npoaf-val", style={'margin-right': '10px', 'margin-bottom': '10px'})
],
width=10)
],
justify='center')
])
### --> establish the format for the NPOAF tab
npoaf_tab = html.Div([
html.Div(id="npoaf-hist", style={'margin-right': '10px', 'margin-bottom': '10px'}),
card_npoaf
])
# ##### Create a tab for Simplified POAF results <a id="simplified"></a>
# Simplified POAF results card and tab format
### --> output the results on a card
card_simplified = html.Div([
dbc.Row([
dbc.Col([
dbc.Card(id="simplified-val", style={'margin-right': '10px', 'margin-bottom': '10px'})
],
width=10)
],
justify='center')
])
### --> establish the format for the AFRI tab
simplified_tab = html.Div([
html.Div(id="simplified-hist", style={'margin-right': '10px', 'margin-bottom': '10px'}),
card_simplified
])
# ##### Create a tab for COM-AF results <a id="comaf"></a>
# COM-AF results card and tab format
### --> output the results on a card
card_comaf = html.Div([
dbc.Row([
dbc.Col([
dbc.Card(id="comaf-val", style={'margin-right': '10px', 'margin-bottom': '10px'})
],
width=10)
],
justify='center')
])
### --> establish the format for the COM-AF tab
comaf_tab = html.Div([
html.Div(id="comaf-hist", style={'margin-right': '10px', 'margin-bottom': '10px'}),
card_comaf
])
# [return to top](#top)
# #### App Layout <a id="layout"></a>
# ##### Define the tabs for the risk scores
score_tab = dbc.Tabs(
[
dbc.Tab(afri_tab, label="AFRI", activeTabClassName="fw-bold", tabClassName="flex-grow-1 text-center", tab_id="afri-tab"),
dbc.Tab(chads_tab, label="CHADS", activeTabClassName="fw-bold", tabClassName="flex-grow-1 text-center", tab_id="chads-tab"),
dbc.Tab(poaf_tab, label="POAF", activeTabClassName="fw-bold", tabClassName="flex-grow-1 text-center", tab_id="poaf-tab"),
dbc.Tab(npoaf_tab, label="NPOAF", activeTabClassName="fw-bold", tabClassName="flex-grow-1 text-center", tab_id="npoaf-tab"),
dbc.Tab(simplified_tab, label="Simplified POAF", activeTabClassName="fw-bold", tabClassName="flex-grow-1 text-center", tab_id="simplified-tab"),
dbc.Tab(comaf_tab, label="COM-AF", activeTabClassName="fw-bold", tabClassName="flex-grow-1 text-center", tab_id="comaf-tab"),
], id="score-tab"
)
# ##### Define the layout of the two pages
# +
tab1 = dbc.Row(
[
dbc.Col([accordion, calculate_button], width=8),
dbc.Col([card1, card2, card3, card4, card5, card6], width=4),
]
)
tab2 = dbc.Row(
[
dbc.Col([
dcc.Graph(id="stripchart", style={'margin-left': '10px'}),
dropdowns,
minicards
], width=6),
dbc.Col(score_tab, width=6),
]
)
# -
# ##### Define the app layout
app.layout = html.Div(
[
html.H1(children='Atrial Fibrillation Risk Prediction',
style={
'textAlign': 'center',
'margin': '10px'
}),
dbc.Row(
[
dbc.Col(upload, width=4),
],
justify="center",
),
dbc.Tabs(
[
dbc.Tab(tab1, label="Calculate Patient Scores", active_tab_style={"textTransform": "uppercase"}),
dbc.Tab(tab2, label="Compare Scores", active_tab_style={"textTransform": "uppercase"}),
]
),
store_data,
afri_state,
chads_state,
poaf_state,
npoaf_state,
simplified_state,
comaf_state
],
style={'background-color': '#EEF3F8'}
)
# [return to top](#top)
# #### App Callbacks and Configuration <a id="callbacks"></a>
# ##### Establish a Callback for storing the input dataset
# +
default_data = pd.read_csv('../../Data/risk.csv')
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')))
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
return df
except Exception as e:
print(e)
# -
# ##### Establish a callback for AFRI Calculation
@app.callback(
[
dash.dependencies.Output('afri-card', 'children'),
dash.dependencies.Output('afri-card', 'style'),
dash.dependencies.Output('afri-mini', 'children'),
dash.dependencies.Output('afri-mini', 'style'),
dash.dependencies.Output('afri-state', 'data')
],
[
dash.dependencies.Input('submit-button', 'n_clicks')
],
[
dash.dependencies.State('age-state', 'value'),
dash.dependencies.State('gender-state', 'value'),
dash.dependencies.State('weight-state', 'value'),
dash.dependencies.State('height-state', 'value'),
dash.dependencies.State('ef-state', 'value'),
dash.dependencies.State('eGFR-state', 'value'),
dash.dependencies.State('emergency-state', 'value'),
dash.dependencies.State('conditions-state', 'value'),
dash.dependencies.State('procedures-state', 'value')
],
)
def afri_calc(button_click, age_state, gender_state, weight_state, height_state, ef_state, eGFR_state, emergency_state, conditions_state, procedures_state):
ctx = dash.callback_context
changed_id = ctx.triggered[0]['prop_id'].split('.')[0]
if ('submit-button' in changed_id):
afri=0
if (gender_state=='M'):
if (age_state > 60):
afri=afri+1
if (weight_state > 76):
afri=afri+1
if (height_state > 176):
afri=afri+1
if ('pvd' in conditions_state):
afri=afri+1
elif (gender_state == 'F'):
if (age_state > 66):
afri=afri+1
if (weight_state > 64):
afri=afri+1
if (height_state > 169):
afri=afri+1
if ('pvd' in conditions_state):
afri=afri+1
else:
afri=None
afri2 = afri
afri3 = afri
if afri==None:
style={'textAlign': 'center', 'color':'slateblue'}
elif afri>=2:
style={'textAlign': 'center', 'color':'crimson'}
else:
style={'textAlign': 'center', 'color':'slateblue'}
style2 = style
return afri, style, afri2, style2, afri3
# ##### Establish a callback for CHADS Calculation
@app.callback(
[
dash.dependencies.Output('chads-card', 'children'),
dash.dependencies.Output('chads-card', 'style'),
dash.dependencies.Output('chads-mini', 'children'),
dash.dependencies.Output('chads-mini', 'style'),
dash.dependencies.Output('chads-state', 'data')
],
[
dash.dependencies.Input('submit-button', 'n_clicks')
],
[
dash.dependencies.State('age-state', 'value'),
dash.dependencies.State('gender-state', 'value'),
dash.dependencies.State('weight-state', 'value'),
dash.dependencies.State('height-state', 'value'),
dash.dependencies.State('ef-state', 'value'),
dash.dependencies.State('eGFR-state', 'value'),
dash.dependencies.State('emergency-state', 'value'),
dash.dependencies.State('conditions-state', 'value'),
dash.dependencies.State('procedures-state', 'value')
],
)
def chads_calc(button_click, age_state, gender_state, weight_state, height_state, ef_state, eGFR_state, emergency_state, conditions_state, procedures_state):
ctx = dash.callback_context
changed_id = ctx.triggered[0]['prop_id'].split('.')[0]
if ('submit-button' in changed_id):
chads=0
if ('chf' in conditions_state):
chads=chads+1
if ('hbp' in conditions_state):
chads=chads+1
if (age_state >= 75):
chads=chads+2
if ('dm' in conditions_state):
chads=chads+1
if ('stroke' in conditions_state):
chads=chads+2
if ('pvd' in conditions_state):
chads=chads+1
if (65 <= age_state <= 74):
chads=chads+1
if (gender_state == 'F'):
chads=chads+1
else:
chads=None
chads2 = chads
chads3 = chads
if chads==None:
style={'textAlign': 'center', 'color':'slateblue'}
elif chads>=4:
style={'textAlign': 'center', 'color':'crimson'}
else:
style={'textAlign': 'center', 'color':'slateblue'}
style2 = style
return chads, style, chads2, style2, chads3
# ##### Establish a callback for POAF Calculation
@app.callback(
[
dash.dependencies.Output('poaf-card', 'children'),
dash.dependencies.Output('poaf-card', 'style'),
dash.dependencies.Output('poaf-mini', 'children'),
dash.dependencies.Output('poaf-mini', 'style'),
dash.dependencies.Output('poaf-state', 'data')
],
[
dash.dependencies.Input('submit-button', 'n_clicks')
],
[
dash.dependencies.State('age-state', 'value'),
dash.dependencies.State('gender-state', 'value'),
dash.dependencies.State('weight-state', 'value'),
dash.dependencies.State('height-state', 'value'),
dash.dependencies.State('ef-state', 'value'),
dash.dependencies.State('eGFR-state', 'value'),
dash.dependencies.State('emergency-state', 'value'),
dash.dependencies.State('conditions-state', 'value'),
dash.dependencies.State('procedures-state', 'value')
],
)
def poaf_calc(button_click, age_state, gender_state, weight_state, height_state, ef_state, eGFR_state, emergency_state, conditions_state, procedures_state):
ctx = dash.callback_context
changed_id = ctx.triggered[0]['prop_id'].split('.')[0]
if ('submit-button' in changed_id):
poaf=0
if (60 <= age_state <= 69):
poaf=poaf+1
if (760 <= age_state <= 79):
poaf=poaf+2
if (age_state >= 80):
poaf=poaf+3
if ('copd' in conditions_state):
poaf=poaf+1
if (eGFR_state < 15):
poaf=poaf+1
elif ('dialysis' in procedures_state):
poaf=poaf+1
if (emergency_state == 1):
poaf=poaf+1
if ('iabp' in procedures_state):
poaf=poaf+1
if ('cvas' in procedures_state):
poaf=poaf+1
else:
poaf=None
poaf2 = poaf
poaf3 = poaf
if poaf==None:
style={'textAlign': 'center', 'color':'slateblue'}
elif poaf>=3:
style={'textAlign': 'center', 'color':'crimson'}
else:
style={'textAlign': 'center', 'color':'slateblue'}
style2 = style
return poaf, style, poaf2, style2, poaf3
# ##### Establish a callback for NPOAF Calculation
@app.callback(
[
dash.dependencies.Output('npoaf-card', 'children'),
dash.dependencies.Output('npoaf-card', 'style'),
dash.dependencies.Output('npoaf-mini', 'children'),
dash.dependencies.Output('npoaf-mini', 'style'),
dash.dependencies.Output('npoaf-state', 'data')
],
[
dash.dependencies.Input('submit-button', 'n_clicks')
],
[
dash.dependencies.State('age-state', 'value'),
dash.dependencies.State('gender-state', 'value'),
dash.dependencies.State('weight-state', 'value'),
dash.dependencies.State('height-state', 'value'),
dash.dependencies.State('ef-state', 'value'),
dash.dependencies.State('eGFR-state', 'value'),
dash.dependencies.State('emergency-state', 'value'),
dash.dependencies.State('conditions-state', 'value'),
dash.dependencies.State('procedures-state', 'value')
],
)
def npoaf_calc(button_click, age_state, gender_state, weight_state, height_state, ef_state, eGFR_state, emergency_state, conditions_state, procedures_state):
ctx = dash.callback_context
changed_id = ctx.triggered[0]['prop_id'].split('.')[0]
if ('submit-button' in changed_id):
npoaf=0
if (65 <= age_state <= 74):
npoaf=npoaf+2
if (age_state >= 75):
npoaf=npoaf+3
if ('mmvd' in conditions_state):
npoaf=npoaf+1
if ('smvd' in conditions_state):
npoaf=npoaf+3
if ('lad' in conditions_state):
npoaf=npoaf+1
else:
npoaf=None
npoaf2 = npoaf
npoaf3 = npoaf
if npoaf==None:
style={'textAlign': 'center', 'color':'slateblue'}
elif npoaf>=2:
style={'textAlign': 'center', 'color':'crimson'}
else:
style={'textAlign': 'center', 'color':'slateblue'}
style2 = style
return npoaf, style, npoaf2, style2, npoaf3
# ##### Establish a callback for Simplified POAF Calculation
@app.callback(
[
dash.dependencies.Output('simplified-card', 'children'),
dash.dependencies.Output('simplified-card', 'style'),
dash.dependencies.Output('simplified-mini', 'children'),
dash.dependencies.Output('simplified-mini', 'style'),
dash.dependencies.Output('simplified-state', 'data')
],
[
dash.dependencies.Input('submit-button', 'n_clicks')
],
[
dash.dependencies.State('age-state', 'value'),
dash.dependencies.State('gender-state', 'value'),
dash.dependencies.State('weight-state', 'value'),
dash.dependencies.State('height-state', 'value'),
dash.dependencies.State('ef-state', 'value'),
dash.dependencies.State('eGFR-state', 'value'),
dash.dependencies.State('emergency-state', 'value'),
dash.dependencies.State('conditions-state', 'value'),
dash.dependencies.State('procedures-state', 'value')
],
)
def simplified_calc(button_click, age_state, gender_state, weight_state, height_state, ef_state, eGFR_state, emergency_state, conditions_state, procedures_state):
ctx = dash.callback_context
changed_id = ctx.triggered[0]['prop_id'].split('.')[0]
if ('submit-button' in changed_id):
simplified=0
if (age_state >= 65):
simplified=simplified+2
if ('hbp' in conditions_state):
simplified=simplified+2
if ('MI' in conditions_state):
simplified=simplified+1
if ('chf' in conditions_state):
simplified=simplified+2
else:
simplified=None
simplified2 = simplified
simplified3 = simplified
if simplified==None:
style={'textAlign': 'center', 'color':'slateblue'}
elif simplified>=3:
style={'textAlign': 'center', 'color':'crimson'}
else:
style={'textAlign': 'center', 'color':'slateblue'}
style2 = style
return simplified, style, simplified2, style2, simplified3
# ##### Establish a callback for COM-AF Calculation
@app.callback(
[
dash.dependencies.Output('comaf-card', 'children'),
dash.dependencies.Output('comaf-card', 'style'),
dash.dependencies.Output('comaf-mini', 'children'),
dash.dependencies.Output('comaf-mini', 'style'),
dash.dependencies.Output('comaf-state', 'data')
],
[
dash.dependencies.Input('submit-button', 'n_clicks')
],
[
dash.dependencies.State('age-state', 'value'),
dash.dependencies.State('gender-state', 'value'),
dash.dependencies.State('weight-state', 'value'),
dash.dependencies.State('height-state', 'value'),
dash.dependencies.State('ef-state', 'value'),
dash.dependencies.State('eGFR-state', 'value'),
dash.dependencies.State('emergency-state', 'value'),
dash.dependencies.State('conditions-state', 'value'),
dash.dependencies.State('procedures-state', 'value')
],
)
def comaf_calc(button_click, age_state, gender_state, weight_state, height_state, ef_state, eGFR_state, emergency_state, conditions_state, procedures_state):
ctx = dash.callback_context
changed_id = ctx.triggered[0]['prop_id'].split('.')[0]
if ('submit-button' in changed_id):
comaf=0
if (65 <= age_state <= 74):
comaf=comaf+1
if (age_state >= 75):
comaf=comaf+2
if (gender_state == 'F'):
comaf=comaf+1
if ('hbp' in conditions_state):
comaf=comaf+1
if ('dm' in conditions_state):
comaf=comaf+1
if ('stroke' in conditions_state):
comaf=comaf+2
else:
comaf=None
comaf2 = comaf
comaf3 = comaf
if comaf==None:
style={'textAlign': 'center', 'color':'slateblue'}
elif comaf>=3:
style={'textAlign': 'center', 'color':'crimson'}
else:
style={'textAlign': 'center', 'color':'slateblue'}
style2 = style
return comaf, style, comaf2, style2, comaf3
# ##### Establish a Callback for the comparison graph
@app.callback(
dash.dependencies.Output('stripchart', 'figure'),
[
dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('afri-state', 'data'),
dash.dependencies.Input('chads-state', 'data'),
dash.dependencies.Input('poaf-state', 'data'),
dash.dependencies.Input('npoaf-state', 'data'),
dash.dependencies.Input('simplified-state', 'data'),
dash.dependencies.Input('comaf-state', 'data'),
dash.dependencies.Input('crossfilter-xaxis-column', 'value'),
dash.dependencies.Input('crossfilter-yaxis-column', 'value')
]
)
def compare_graph(contents, filename, afri_val, chads_val, poaf_val, npoaf_val, simplified_val, comaf_val, xaxis, yaxis):
#### Create a graph to compare risk scores two at a time
if contents is not None:
df = parse_contents(contents, filename)
else:
df = default_data
fig = px.strip(x=df[xaxis], y=df[yaxis], color=df['AF'],
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'},
labels={'AF':'Atrial Fibrillation', 'npoaf':'NPOAF Score', 'afri': 'AFRI Score'})
newnames={'0': 'no', '1': 'yes'}
fig.for_each_trace(lambda t: t.update(name = newnames[t.name]))
fig.update_layout(title_text='Comparison of Two Scores', title_x=0.5)
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)','paper_bgcolor': 'rgba(0, 0, 0, 0)'})
fig.update_layout(
xaxis=dict(
title=xaxis,
linecolor="#BCCCDC", # Sets color of X-axis line
showgrid=False, # Removes X-axis grid lines
fixedrange=True
),
yaxis=dict(
title=yaxis,
linecolor="#BCCCDC", # Sets color of Y-axis line
showgrid=False, # Removes Y-axis grid lines
fixedrange=True
))
if afri_val is not None:
if xaxis=='afri':
xval=afri_val
elif xaxis=='chads':
xval=chads_val
elif xaxis=='poaf':
xval=poaf_val
elif xaxis=='npoaf':
xval=npoaf_val
elif xaxis=='simplified':
xval=simplified_val
elif yaxis=='comaf':
xval=comaf_val
if yaxis=='afri':
yval=afri_val
elif yaxis=='chads':
yval=chads_val
elif yaxis=='poaf':
yval=poaf_val
elif yaxis=='npoaf':
yval=npoaf_val
elif yaxis=='simplified':
yval=simplified_val
elif yaxis=='comaf':
yval=comaf_val
figa = fig
figa.add_trace(
go.Scatter(
x=[xval],
y=[yval],
mode="markers",
marker=dict(color="crimson"),
showlegend=False)
)
return figa
else:
return fig
# ##### Establish a callback for calculating validation metrics
@app.callback(
[
dash.dependencies.Output('afri-val', 'children'),
dash.dependencies.Output('chads-val', 'children'),
dash.dependencies.Output('poaf-val', 'children'),
dash.dependencies.Output('npoaf-val', 'children'),
dash.dependencies.Output('simplified-val', 'children'),
dash.dependencies.Output('comaf-val', 'children')
],
[
dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('afri-state', 'data'),
dash.dependencies.Input('chads-state', 'data'),
dash.dependencies.Input('poaf-state', 'data'),
dash.dependencies.Input('npoaf-state', 'data'),
dash.dependencies.Input('simplified-state', 'data'),
dash.dependencies.Input('comaf-state', 'data'),
dash.dependencies.Input('score-tab', 'active_tab')
]
)
def score_val(contents, filename, afri_val, chads_val, poaf_val, npoaf_val, simplified_val, comaf_val, score_tab):
if contents is not None:
df = parse_contents(contents, filename)
else:
df = default_data
if score_tab == "afri-tab":
df['score'] = df['afri']
val = afri_val
cut = 2
elif score_tab == "chads-tab":
df['score'] = df['chads']
val = chads_val
cut = 4
elif score_tab == 'poaf-tab':
df['score'] = df['poaf']
val = poaf_val
cut = 3
elif score_tab == 'npoaf-tab':
df['score'] = df['npoaf']
val = npoaf_val
cut = 2
elif score_tab == 'simplified-tab':
df['score'] = df['simplified']
val = simplified_val
cut = 3
elif score_tab == 'comaf-tab':
df['score'] = df['comaf']
val = comaf_val
cut = 3
### --> calculate percentile
if val is not None:
n_total = len(df)
n_less = len(df[df['score']<val])
percentile = round((n_less/n_total)*100)
else:
percentile=None
### --> classify predicted AF outcome based on cut point
df['AF_cut'] = np.where((df['score']>=cut),1,0)
### --> tabulate totals for TP, FP, FN, and TN
TP = len(df[(df['AF']==1) & (df['AF_cut']==1)])
FP = len(df[(df['AF']==0) & (df['AF_cut']==1)])
FN = len(df[(df['AF']==1) & (df['AF_cut']==0)])
TN = len(df[(df['AF']==0) & (df['AF_cut']==0)])
### --> define the independent and response variables
independent1 = df['score']
response1 = df['AF']
### --> bulid the logistic regression model
log1 = sm.Logit(response1,sm.add_constant(independent1)).fit() #use 'add_constant' to add the intercept to the model
### --> format the CI for the estimate
ci1 = np.exp(log1.conf_int(alpha=0.05)).drop(index="const", axis=0)
ci1.columns = ["2.5%", "97.5%"]
or1 = np.exp(log1.params['score'].item())
ci1_lower = ci1['2.5%'].item()
ci1_higher = ci1['97.5%'].item()
### --> format the results for the card
OR = round(or1, 2)
lower = round(ci1_lower,2)
higher = round(ci1_higher,2)
sensitivity = round((TP/(TP+FN))*100)
specificity = round((TN/(TN+FP))*100)
PPV = round((TP/(TP+FP))*100)
NPV = round((TN/(TN+FN))*100)
afri_val = dbc.CardBody([
html.P(["Percentile: ", percentile, "%"]),
html.P(["Odds Ratio: ", OR, " (95% CI: ", lower, "-", higher, ")"]),
html.P(["Cut Point: ", cut]),
html.P(["Sensitivity: ", sensitivity, "%"]),
html.P(["Specificity: ", specificity, "%"]),
html.P(["Positive Predictive Value: ", PPV, "%"]),
html.P(["Negative Predictive Value: ", NPV, "%"])
])
chads_val = dbc.CardBody([
html.P(["Percentile: ", percentile, "%"]),
html.P(["Odds Ratio: ", OR, " (95% CI: ", lower, "-", higher, ")"]),
html.P(["Cut Point: ", cut]),
html.P(["Sensitivity: ", sensitivity, "%"]),
html.P(["Specificity: ", specificity, "%"]),
html.P(["Positive Predictive Value: ", PPV, "%"]),
html.P(["Negative Predictive Value: ", NPV, "%"])
])
poaf_val = dbc.CardBody([
html.P(["Percentile: ", percentile, "%"]),
html.P(["Odds Ratio: ", OR, " (95% CI: ", lower, "-", higher, ")"]),
html.P(["Cut Point: ", cut]),
html.P(["Sensitivity: ", sensitivity, "%"]),
html.P(["Specificity: ", specificity, "%"]),
html.P(["Positive Predictive Value: ", PPV, "%"]),
html.P(["Negative Predictive Value: ", NPV, "%"])
])
npoaf_val = dbc.CardBody([
html.P(["Percentile: ", percentile, "%"]),
html.P(["Odds Ratio: ", OR, " (95% CI: ", lower, "-", higher, ")"]),
html.P(["Cut Point: ", cut]),
html.P(["Sensitivity: ", sensitivity, "%"]),
html.P(["Specificity: ", specificity, "%"]),
html.P(["Positive Predictive Value: ", PPV, "%"]),
html.P(["Negative Predictive Value: ", NPV, "%"])
])
simplified_val = dbc.CardBody([
html.P(["Percentile: ", percentile, "%"]),
html.P(["Odds Ratio: ", OR, " (95% CI: ", lower, "-", higher, ")"]),
html.P(["Cut Point: ", cut]),
html.P(["Sensitivity: ", sensitivity, "%"]),
html.P(["Specificity: ", specificity, "%"]),
html.P(["Positive Predictive Value: ", PPV, "%"]),
html.P(["Negative Predictive Value: ", NPV, "%"])
])
comaf_val = dbc.CardBody([
html.P(["Percentile: ", percentile, "%"]),
html.P(["Odds Ratio: ", OR, " (95% CI: ", lower, "-", higher, ")"]),
html.P(["Cut Point: ", cut]),
html.P(["Sensitivity: ", sensitivity, "%"]),
html.P(["Specificity: ", specificity, "%"]),
html.P(["Positive Predictive Value: ", PPV, "%"]),
html.P(["Negative Predictive Value: ", NPV, "%"])
])
return afri_val, chads_val, poaf_val, npoaf_val, simplified_val, comaf_val
# ##### Establish a callback for producing score histograms
@app.callback(
[
dash.dependencies.Output('afri-hist', 'children'),
dash.dependencies.Output('chads-hist', 'children'),
dash.dependencies.Output('poaf-hist', 'children'),
dash.dependencies.Output('npoaf-hist', 'children'),
dash.dependencies.Output('simplified-hist', 'children'),
dash.dependencies.Output('comaf-hist', 'children')
],
[
dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename'),
dash.dependencies.Input('score-tab', 'active_tab')
]
)
def afri_val(contents, filename, score_tab):
if contents is not None:
df = parse_contents(contents, filename)
else:
df = default_data
if score_tab == "afri-tab":
### --> establish histogram
fig1 = px.histogram(df, x="afri", histnorm="probability", color="AF",
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'}, barmode='overlay',
labels={'AF':'Atrial Fibrillation', 'afri':'AFRI Score'})
### --> change figure title
fig1.update_layout(title_text='AFRI Scores by Atrial Fibrillation Outcome', title_x=0.5)
elif score_tab == "chads-tab":
### --> establish histogram
fig1 = px.histogram(df, x="chads", histnorm="probability", color="AF",
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'}, barmode='overlay',
labels={'AF':'Atrial Fibrillation', 'chads':'CHA2DS2-VASc Score'})
### --> change figure title
fig1.update_layout(title_text='CHA2DS2-VASc Scores by Atrial Fibrillation Outcome', title_x=0.5)
elif score_tab == "poaf-tab":
### --> establish histogram
fig1 = px.histogram(df, x="poaf", histnorm="probability", color="AF",
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'}, barmode='overlay',
labels={'AF':'Atrial Fibrillation', 'poaf':'POAF Score'})
### --> change figure title
fig1.update_layout(title_text='POAF Scores by Atrial Fibrillation Outcome', title_x=0.5)
elif score_tab == "npoaf-tab":
### --> establish histogram
fig1 = px.histogram(df, x="npoaf", histnorm="probability", color="AF",
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'}, barmode='overlay',
labels={'AF':'Atrial Fibrillation', 'npoaf':'NPOAF Score'})
### --> change figure title
fig1.update_layout(title_text='NPOAF Scores by Atrial Fibrillation Outcome', title_x=0.5)
elif score_tab == "simplified-tab":
### --> establish histogram
fig1 = px.histogram(df, x="simplified", histnorm="probability", color="AF",
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'}, barmode='overlay',
labels={'AF':'Atrial Fibrillation', 'simplified':'Simplified POAF Score'})
### --> change figure title
fig1.update_layout(title_text='Simplified POAF Scores by Atrial Fibrillation Outcome', title_x=0.5)
elif score_tab == "comaf-tab":
### --> establish histogram
fig1 = px.histogram(df, x="comaf", histnorm="probability", color="AF",
color_discrete_map = {0:'midnightblue',1:'lightsteelblue'}, barmode='overlay',
labels={'AF':'Atrial Fibrillation', 'comaf':'COM-AF Score'})
### --> change figure title
fig1.update_layout(title_text='COM-AF Scores by Atrial Fibrillation Outcome', title_x=0.5)
### --> update formatting of the figure
newnames={'0': 'no', '1': 'yes'}
fig1.for_each_trace(lambda t: t.update(name = newnames[t.name]))
fig1.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)','paper_bgcolor': 'rgba(0, 0, 0, 0)'})
fig1.update_layout(xaxis=dict(
linecolor="#BCCCDC", # Sets color of X-axis line
showgrid=False, # Removes X-axis grid lines
fixedrange=True
),
yaxis=dict(
title="Probability",
linecolor="#BCCCDC", # Sets color of Y-axis line
showgrid=False, # Removes Y-axis grid lines
fixedrange=True
))
fig1.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.85
))
afri_hist=dcc.Graph(figure=fig1)
chads_hist=dcc.Graph(figure=fig1)
poaf_hist=dcc.Graph(figure=fig1)
npoaf_hist=dcc.Graph(figure=fig1)
simplified_hist=dcc.Graph(figure=fig1)
comaf_hist=dcc.Graph(figure=fig1)
return afri_hist, chads_hist, poaf_hist, npoaf_hist, simplified_hist, comaf_hist
# ##### Define a function for running the server with an option for specifying the port
def run_server(self, host,
port=8050):
serve(self, host=host, port=port)
# ##### Configure the settings to avoid an attribute error when using JupyterDash
# + tags=[]
del app.config._read_only["requests_pathname_prefix"]
# -
# ##### Run the app
app.run_server(host='127.0.0.1', port=13333)
# [return to top](#top)
| Code/Notebooks/AF_dashboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combined_data_df = pd.merge(mouse_metadata, study_results, how='outer', on='Mouse ID')
# Display the data table for preview
combined_data_df
# -
# Checking the number of mice.
mice = combined_data_df['Mouse ID'].value_counts()
mice_num = len(mice)
mice_num
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
mice_duplicate = combined_data_df.loc[combined_data_df.duplicated(subset = ['Mouse ID', 'Timepoint']), 'Mouse ID'].unique()
# Optional: Get all the data for the duplicate mouse ID.
mice_duplicate_id = pd.DataFrame({'Duplicate ID(s)': mice_duplicate})
mice_duplicate_id
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_combined_df = combined_data_df[combined_data_df['Mouse ID'].isin(mice_duplicate)==False]
# Checking the number of mice in the clean DataFrame.
mice_clean = clean_combined_df['Mouse ID'].value_counts()
mice_num_clean = len(mice_clean)
mice_num_clean
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
regimen_mean = clean_combined_df.groupby('Drug Regimen').mean()["Tumor Volume (mm3)"]
regimen_median = clean_combined_df.groupby('Drug Regimen').median()["Tumor Volume (mm3)"]
regimen_variance = clean_combined_df.groupby('Drug Regimen').var()["Tumor Volume (mm3)"]
regimen_stdev = clean_combined_df.groupby('Drug Regimen').std()["Tumor Volume (mm3)"]
regimen_sem = clean_combined_df.groupby('Drug Regimen').sem()["Tumor Volume (mm3)"]
# +
# Assemble the resulting series into a single summary dataframe.
regimen_stats_df = pd.DataFrame({'Mean': regimen_mean,
'Median': regimen_median,
'Variance': regimen_variance,
'Standard Deviation': regimen_stdev,
'SEM': regimen_sem
})
# Format dataframe
pd.options.display.float_format = '{:,.2f}'.format
regimen_stats_df
# -
# Using the aggregation method, produce the same summary statistics in a single line
regimen_agstats_df = clean_combined_df.groupby('Drug Regimen')[['Tumor Volume (mm3)']].agg(['mean', 'median', 'var', 'std', 'sem'])
regimen_agstats_df
# ## Bar and Pie Charts
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
mice_drug_count = combined_data_df['Drug Regimen'].value_counts()
bar_pandas = mice_drug_count.plot.bar(color='g')
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.
x_axis = mice_drug_count.index.values
y_axis = mice_drug_count
# Create a Pyplot bar plot based off of the group series from before and label the title
plt.bar(x_axis, y_axis, color='g', alpha=0.6, align='center')
# Set the xlabel and ylabel, title using class methods
plt.title("Number of Mice Tested per Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Mice")
plt.xticks(rotation="vertical")
plt.show()
# -
# Generate a pie plot showing the distribution of female versus male mice using pandas
gender_data = clean_combined_df["Sex"].value_counts()
gender_data.plot.pie(autopct= "%1.1f%%")
plt.title("Female Vs Male Mice")
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pyplot
labels = ['Female', 'Male']
sizes = [49.7999197, 50.200803]
plot = gender_data.plot.pie(y='Total Count', autopct="%1.1f%%")
plt.title('Male vs Female Mouse Population')
plt.ylabel('Sex')
plt.show()
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
capomulin_df = clean_combined_df.loc[clean_combined_df["Drug Regimen"] == "Capomulin",:]
ramicane_df = clean_combined_df.loc[clean_combined_df["Drug Regimen"] == "Ramicane", :]
infubinol_df = clean_combined_df.loc[clean_combined_df["Drug Regimen"] == "Infubinol", :]
ceftamin_df = clean_combined_df.loc[clean_combined_df["Drug Regimen"] == "Ceftamin", :]
# Start by getting the last (greatest) timepoint for each mouse
capomulin_last = capomulin_df.groupby('Mouse ID').max()['Timepoint']
capomulin_vol = pd.DataFrame(capomulin_last)
ramicane_last = ramicane_df
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
capomulin_merge = pd.merge(capomulin_vol, clean_combined_df, on=("Mouse ID","Timepoint"),how="left")
capomulin_merge.head()
# +
# Put treatments into a list for for loop (and later for plot labels)
# Create empty list to fill with tumor vol data (for plotting)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# -
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# ## Line and Scatter Plots
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple Character Level LSTM Text Generator
# Trains character by character on text, and then generates new text character by character. The training is done with the public domain text of Leo Tolstoi's novel Anna Karenina.
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
# ## Training Data
# Load in the training book text and encode it as numbers
# + tags=[]
# Read file
with open("anna_karenina_text.txt") as bk:
txt = bk.read()
# Map each character to intergers and provide 2 way mapping dictionaries
chars = tuple(set(txt))
int2char = dict(enumerate(chars))
char2int = {char: i for i, char in int2char.items() }
encoded = np.array([char2int[char] for char in txt])
# Sanity Check:
print("Beginning of text: ", txt[:50], " ...")
print("--------------------")
print("Beginning of encoded: ", encoded[:50], " ...")
# -
# ## Preprocess inputs to be one-hot encoded
def one_hot_encode(array, nr_labels):
'''
Function to create one-hot encoded array where each characted is one hot-encoded as a column
'''
# Creat array
one_hot_array = np.zeros((np.multiply(*array.shape), nr_labels), dtype=np.float32)
#one_hot_array = np.zeros((np.multiply(*array.shape), nr_labels), dtype=np.long)
# Fill in ones
one_hot_array[np.arange(one_hot_array.shape[0]), array.flatten()] = 1
one_hot_array = one_hot_array.reshape((*array.shape, nr_labels))
return one_hot_array
# ## Split the training data into mini-batches and sequences
def create_batches(array, nr_sequences, nr_steps):
'''
Create batches with number of sequences (nr_sequences) & number of steps (nr_steps) from array
'''
# Size of one batch
batch_size = nr_sequences * nr_steps
# How many batches can be made
nr_batches = len(array) // batch_size
# Crop to integer multiple of batches
array = array[: nr_batches * batch_size]
# Reshape
array = array.reshape((nr_sequences, -1))
# Loop to create sequences
for i in range(0, array.shape[1], nr_steps):
x = array[:, i:i + nr_steps]
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1: ], array[:, i + nr_steps]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1: ], array[:, 0]
yield x,y
# ### Sanity Check on batches and sequences
# + tags=[]
batches = create_batches(encoded, 10, 50)
x, y = next(batches)
print("x:")
print(x[:10, :10])
print("y:")
print(y[:10, :10])
# -
# ## Define the LSTM Network
class characterLSTM(nn.Module):
'''
Character Level Text Generator LSTM
'''
def __init__(self, chars, nr_steps = 100, nr_hidden = 256, nr_layers = 2, dropout_prob = 0.5, lr = 0.001):
super().__init__()
self.dropout_prob = dropout_prob
self.nr_layers = nr_layers
self.nr_hidden = nr_hidden
self.lr = lr
# Make mapping dictionaries
self.chars = chars
self.int2char = dict(enumerate(chars))
self.char2int = {char: i for i, char in self.int2char.items() }
# LSTM Cell Definition
self.lstm = nn.LSTM(len(self.chars), self.nr_hidden, self.nr_layers, dropout = self.dropout_prob, batch_first = True)
# Add dropout layer to reduce likelihood of overfitting
self.dropout = nn.Dropout(self.dropout_prob)
# Final linear fully connected layer for output
self.fc = nn.Linear(self.nr_hidden, len(self.chars))
# Initialize weights
self.initialize_weights()
def forward(self, x, hc):
'''
Forward pass thru the network with inputs (x) and hidden cell state (hc)
'''
# Get x and hidden state from LSTM
x, (h, c) = self.lstm(x, hc)
# Pass thru dropout
x = self.dropout(x)
# Stack up LSTM outputs
print("x.size()[0] before: ", x.size()[0])
print("x.size()[1] before: ", x.size()[1])
print("self.nr_hidden: ", self.nr_hidden)
# x = x.view(x.size()[0] * x.size()[1], self.nr_hidden)
x = x.reshape(x.size()[0] * x.size()[1], self.nr_hidden)
print("x.size()[0] after: ", x.size()[0])
print("x.size()[1] after: ", x.size()[1])
# Pass thru fully connected layer
x = self.fc(x)
# Return x and hidden state
return x, (h, c)
def predict(self, char, h=None, top_k=None):
'''
Predict next character
'''
if h is None:
h = self.initialize_hidden_layer(1)
x = np.array([[self.char2int[char]]])
x = one_hot_encode(x, len(self.chars))
# Convert to input Tensor
inputs = torch.from_numpy(x).type(torch.FloatTensor)
# Run forward pass
h = tuple([each.data for each in h])
out, h = self.forward(inputs, h)
# Run thru Softmax
p = F.softmax(out, dim=1).data
if top_k is None:
top_char = np.arange(len(self.chars))
else:
p, top_char = p.topk(top_k)
top_char = top_char.numpy().squeeze()
p = p.numpy().squeeze()
char = np.random.choice(top_char, p = p/p.sum())
return self.int2char[char], h
def initialize_weights(self):
'''
Initialize fully connected layer weights
'''
# Set bias tensor = 0
self.fc.bias.data.fill_(0)
# Random fully connected weights
self.fc.weight.data.uniform_(-1, 1)
def initialize_hidden_layer(self, nr_sequences):
'''
Initializes hidden state
'''
# Tensors with sizes nr_layers x nr_sequences x nr_hidden initialzied to zero
weight = next(self.parameters()).data
return (weight.new(self.nr_layers, nr_sequences, self.nr_hidden).zero_(),
weight.new(self.nr_layers, nr_sequences, self.nr_hidden).zero_())
# # Train the network
#
# Define training function
def train(net, train_data, nr_epochs=10, nr_sequences=10, nr_steps=50, lr=0.001, gradient_clip=5, val_data_fraction=0.1, print_every=10):
'''
Train the network
'''
net.train()
# Use Adam and Cross Entropy Loss
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# Separate data and validation data
val_idx = int(len(train_data) * (1-val_data_fraction) )
train_data, val_data = train_data[:val_idx], train_data[val_idx:]
counter = 0
nr_chars = len(net.chars)
for e in range(nr_epochs):
h = net.initialize_hidden_layer(nr_sequences)
for x, y in create_batches(train_data, nr_sequences, nr_steps):
counter += 1
#print("type(x): ", type(x))
#print("x.shape: ", x.shape)
# One-hot encode train_data
x = one_hot_encode(x, nr_chars)
#print("after one_hot_encode:")
#print("type(x): ", type(x))
#print("x.shape: ", x.shape)
#print("type(x[0,0,0]): ", type(x[0,0,0]))
# Convert to Tensor
inputs, targets = torch.from_numpy(x).type(torch.LongTensor), torch.from_numpy(y).type(torch.LongTensor)
print("type(inputs): ", type(inputs))
print("inputs.shape: ", inputs.shape)
# New variables for the hidden state
h = tuple([each.data for each in h])
print("type(h): ", type(h))
print("type(h[0]):", type(h[0]))
print("h: ", h)
net.zero_grad()
# Forward Pass
output, h = net.forward(inputs, h)
# Calculate loss
loss = criterion(output, targets.view(nr_sequences * nr_steps))
# Back propagate
loss.backward()
# Use clip_grad_norm to prevent exploding gradient problem
nn.utils.clip_grad_norm_(net.parameters(), gradient_clip)
opt.step()
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(nr_sequences)
val_losses = []
for x, y in create_batches(val_data, nr_sequences, nr_steps):
# One-hot encode data
x = one_hot_encode(x, nr_chars)
# Conver to Tensors
x, y = torch.from_numpy(x).type(torch.FloatTensor), torch.from_numpy(y).type(torch.FloatTensor)
# New variables for the hidden state
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
output, val_h = net.forward(inputs, val_h)
val_loss = criterion(output, targets.view(nr_sequences * nr_steps))
val_losses.append(val_loss.item())
print("Epoch #: {}/{}...".format(e+1, nr_epochs),
"Step #: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Validation Loss: {:.4f}".format(np.mean(val_losses)))
# Do actual training
#
# + tags=[]
# Clean up old stuff
if 'net' in locals():
del net
# Instantiate new network
net = characterLSTM(chars, nr_hidden=512, nr_layers=2)
# Print the network
print(net)
# + tags=[]
nr_sequences = 128
nr_steps = 100
# Train
train(net, encoded, nr_epochs=1, nr_sequences = nr_sequences, nr_steps = nr_steps, lr=0.001, print_every=10)
# -
# ## Save the Model
# +
model_name = "lstm_try_1.net"
checkpoint = {'nr_hidden': net.nr_hidden,
'nr_layers': net.nr_layers,
'state_dict': net.state_dict(),
'chars': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
# -
# ## Load the model back
# +
model_name = "lstm_try_1.net"
with open(model_name, 'rb') as f:
checkpoint = torch.load(f)
loaded_net = characterLSTM(checkpoint['chars'], nr_hidden = checkpoint['nr_hidden'], nr_layers = checkpoint['nr_layers'])
loaded_net.load_state_dict(checkpoint['state_dict'])
# -
# ## Generate
def generate(net, size, prime='In the beginning', top_k=None):
net.eval()
# Start with prime characters
chars = [c for c in prime]
h = net.initialize_hidden_layer(1)
for c in prime:
char, h = net.predict(c, h, top_k=top_k)
chars.append(char)
# Pass in previous character and generate the next
for i in range(size):
char, h = net.predict(chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
# + tags=["outputPrepend"]
print(generate(net, 2000, top_k=5))
# -
| ML_PyTorch_LSTM_Character_Level_Text_Generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Example DECAPS - LSST processing of DECam data
# All image data is from NOAO : http://archive.noao.edu
#
# Catalogs are from https://decaps.rc.fas.harvard.edu/catalogs.html
#
#
#
# +
# %matplotlib inline
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
from astropy.table import Column
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.table import hstack
from astropy.table import vstack
import os
from matplotlib import rcParams
rcParams['ytick.labelsize'] = 15
rcParams['xtick.labelsize'] = 15
rcParams['axes.labelsize'] = 20
rcParams['axes.linewidth'] = 2
rcParams['font.size'] = 15
rcParams['axes.titlesize'] = 18
# -
lsstDir = 'LSST_Stack/'
decapsDir = 'DECAPS/'
# Read the image database...
image_database = 'imdb.fits'
# download if not yet present
if image_database not in os.listdir() :
print('Downloading the catalog...')
url = 'http://faun.rc.fas.harvard.edu/decaps/release/imdb.fits'
urllib.request.urlretrieve(url, image_database)
# read into AstroPy table
imdb_hdu = fits.open(image_database)
imdb = Table(imdb_hdu[1].data)
# +
# From the image database, figure out names of DECam visits...
visits = {611980:'g',611529:'r'}
for visit in visits.keys() :
cat_name = imdb[imdb['expnum'] == visit]['catfname'].data[0]
print('The single-band catalog name corresponding to visit %d is %s' %(visit,
cat_name))
# -
# Read in the single-epoch DECAPS catalogs...
#
decaps_data = {}
decaps_hdu_dic = {}
for visit in visits.keys():
cat_name = imdb[imdb['expnum'] == visit]['catfname'].data[0]
print('\nThe single-band catalog name corresponding \
to visit %d is %s' %(visit, cat_name))
file_name = decapsDir + cat_name
#open the decaps catalog ...
decaps_hdu = fits.open(file_name)
band = visits[visit]
decaps_hdu_dic[band] = decaps_hdu
# decaps_hdu.info() would display all available tables -
# there is a single catalog per CCD,
# called 'S21_CAT', etc, based on CCD name.
print(visit)
print(decaps_hdu[0].header['DATE-OBS'])
print('ra=',decaps_hdu[0].header['RA'],'dec=',decaps_hdu[0].header['DEC'])
# save the zero point for this catalog
decaps_zeropoint = decaps_hdu[0].header['MAGZERO']
print('For this catalog, zeropoint is ',decaps_zeropoint)
# only XTENSION 'IMAGE' are the catalogs...
# since I don't care here to compare exactly ccd-by-ccd to LSST,
# I just read in the decaps catalog for all the sources in the
# full image mosaic.
arr = {'decaps_mag':[],'decaps_magerr':[],'ra':[],'dec':[], 'flags':[]}
decaps_stack = Table(arr, names = ('decaps_mag', 'decaps_magerr',
'ra', 'dec', 'flags'),
dtype = ('f8', 'f8','f8', 'f8', 'i8'))
for i in range(1,len(decaps_hdu[:])) :
if 'IMAGE' in decaps_hdu[i].header['XTENSION'] :
# if the i-th element is image ,
# the catalog is i+2 element
ccd_decaps_cat = Table(decaps_hdu[i+2].data)
# convert the fluxes to magnitudes
ccd_decaps_cat['decaps_mag'] = -2.5 * np.log10(ccd_decaps_cat['flux'].data) +\
decaps_zeropoint
ccd_decaps_cat['decaps_magerr'] = (5.0 / (2*np.log(10))) * \
ccd_decaps_cat['dflux'].data / ccd_decaps_cat['flux'].data
# keep only the relevant info
ccd_decaps = ccd_decaps_cat[['decaps_mag','decaps_magerr',
'ra','dec', 'flags']]
decaps_stack = vstack([decaps_stack, ccd_decaps])
decaps_stack.rename_column('ra', 'ra_decaps')
decaps_stack.rename_column('dec', 'dec_decaps')
# store the catalog for a given filter in a dictionary
band = visits[visit]
decaps_data[band] = decaps_stack
# Now each decaps catalog is an element of a table ...
decaps_data['g'][:10]
decaps_data['r'][:10]
# Keep only good rows...
decaps_clean = {}
# remove decaps detections with bad flags ...
for band in decaps_data.keys() :
mask_bad_pixels = decaps_data[band]['flags'] != 1
mask_nan = np.bitwise_not(np.isnan(decaps_data[band]['decaps_mag']))
mask_good_pixels = np.bitwise_not(mask_bad_pixels)
mask = mask_nan * mask_good_pixels
decaps_clean[band] = decaps_data[band][mask]
# Read in the LSST CCD catalogs, and put them into one table...
lsst_zeropoints = Table.read('LSST_Stack/lsst_zeropoints.txt', format='ascii')
lsst_zeropoints
# +
lsst_data = {}
for visit in visits.keys() :
print('For visit=',visit)
select_rows = lsst_zeropoints['visit'].data == visit
lsst_zeropoint = lsst_zeropoints['zeropoint'][select_rows]
print('The LSST- measured zeropoint for decam is %f'%lsst_zeropoint)
# Initialize storage AstroPy tables :
arr = {'lsst_mag':[], 'lsst_magerr':[],'coord_ra':[],'coord_dec':[]}
ccd_lsst_stack = Table(arr, names=('lsst_mag', 'lsst_magerr','coord_ra', 'coord_dec'),
dtype=('f8', 'f8','f8', 'f8'))
outDir = lsstDir+str(visit)+'/'
# loop over all ccds adding to stacks...
src_files = os.listdir(outDir)
start = len('src-0'+str(visit)+'_')
stop = len('.fits')
print('Reading information from all LSST src catalogs...')
for i in range(len(src_files)):
ccdnum = src_files[i][start:-stop] # string
ccd_number = float(ccdnum)
fname = 'src-0'+str(visit)+'_'+ccdnum+'.fits'
hdu = fits.open(outDir + fname)
# convert to an AstroPy table
ccd_data = Table(hdu[1].data)
# only consider positive fluxes...
mask_neg_fluxes = ccd_data['base_PsfFlux_flux'].data > 0
SN = ccd_data['base_PsfFlux_flux'].data / \
ccd_data['base_PsfFlux_fluxSigma'].data
mask_low_sn = SN > 5
mask_total = mask_neg_fluxes * mask_low_sn
# just select rows that don't have negative fluxes...
ccd_data_good = ccd_data[mask_total]
ccd_data_good['lsst_mag'] = -2.5* np.log10(ccd_data_good['base_PsfFlux_flux']) +\
lsst_zeropoint
ccd_data_good['lsst_magerr'] = (5.0 / (2*np.log(10))) *\
ccd_data_good['base_PsfFlux_fluxSigma'].data / \
ccd_data_good['base_PsfFlux_flux'].data
# keep only most relevant info...
ccd_lsst = ccd_data_good[['lsst_mag', 'lsst_magerr',
'coord_ra', 'coord_dec']]
# add to the stack
ccd_lsst_stack = vstack([ccd_lsst_stack ,ccd_lsst] )
# rename the products
ccd_lsst_stack.rename_column('coord_ra', 'ra_lsst')
ccd_lsst_stack.rename_column('coord_dec', 'dec_lsst')
# store as elements of a dictionary
band = visits[visit]
lsst_data[band] = ccd_lsst_stack
print('Done')
# -
# For DECAPS cross match the two single-band catalogs :
# +
coord_1 = SkyCoord(ra = decaps_clean['g']['ra_decaps']*u.degree,
dec = decaps_clean['g']['dec_decaps']*u.degree)
coord_2 = SkyCoord(ra = decaps_clean['r']['ra_decaps']*u.degree,
dec = decaps_clean['r']['dec_decaps']*u.degree)
idx, d2d, d3d = coord_1.match_to_catalog_sky(coord_2)
# stack the two catalogs
decaps_gr = hstack([decaps_clean['g'] ,decaps_clean['r'][idx]],
table_names=['g','r'] )
# -
# Do the same for LSST :
# +
# NOTE: all LSST detection positions are in radians!
coord_1 = SkyCoord(ra = lsst_data['g']['ra_lsst']*u.rad,
dec = lsst_data['g']['dec_lsst']*u.rad)
coord_2 = SkyCoord(ra = lsst_data['r']['ra_lsst']*u.rad,
dec = lsst_data['r']['dec_lsst']*u.rad)
idx, d2d, d3d = coord_1.match_to_catalog_sky(coord_2)
# stack the two catalogs
lsst_gr = hstack([lsst_data['g'] ,lsst_data['r'][idx]],
table_names=['g','r'] )
# -
# At this point we may want to eg. plot each catalog, and show that it makes sense...
# Display all the column names ...
np.ravel(lsst_gr.colnames)
# plot the histogram
plt.hist(lsst_gr['lsst_mag_g'], histtype='step')
plt.hist(lsst_gr['lsst_mag_r'], histtype='step')
import matplotlib.colors as colors
# %matplotlib inline
g = lsst_gr['lsst_mag_g']
r = lsst_gr['lsst_mag_r']
fig,ax = plt.subplots(1,1, figsize=(8,6))
ax.hist2d(g-r,r, range=[[-1,1],[14,22]], cmin=10,
norm = colors.LogNorm(), bins=50)
plt.gca().invert_yaxis()
ax.set_xlabel('g-r')
ax.set_ylabel('r')
ax.set_title('LSST 20% DECam field')
# Makes sense (cf. Sesar+2010 Fig.23 http://faculty.washington.edu/ivezic/Publications/apj_708_1_717.pdf)
# Color by a third variable, eg. mean photometric error:
#
lsst_gr.colnames
# +
from scipy.stats import binned_statistic_2d
fig,ax = plt.subplots(figsize=(8,6))
x = g-r
y = r
z = g
xmin,xmax = -2,2
ymin,ymax = 14,21
mx = (xmin<x)*(x<xmax)
my = (ymin<y)*(y<ymax)
m = mx*my
stats = binned_statistic_2d(x[m], y[m], values = z[m], statistic='mean', bins=50)
z_sigma, x_edges, y_edges = stats[0], stats[1], stats[2]
# replace all nan's by 0 ...
z_sigma[np.isnan(z_sigma)] =0
z_reduce = z_sigma # [:-1, :-1] no need to reduce here because x_edges are already given with the right size
z_min, z_max = z_reduce.min(), np.abs(z_reduce).max()
z_rot = np.rot90(z_reduce) # rotate and flip to properly display...
z_rot_flip = np.flipud(z_rot)
z_masked = np.ma.masked_where(z_rot_flip == 0 , z_rot_flip) # mask out zeros...
# Plot 2D histogram using pcolor
image = ax.pcolormesh(x_edges,y_edges,z_masked, cmap='jet') # np.log10(z_masked) gives log counts
# add a colorbar : define cax : colorbar axis
colorbar_ax = fig.add_axes([0.2, 0.89, 0.4, 0.01]) # (x0 ,y0 , dx, dy )
colorbar = fig.colorbar(image, cax = colorbar_ax, orientation='vertical')
# -
| data_products/example_LSST_DECAPS/example_LSST_DECAPS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1 align="center">Non-Rigid Registration: Demons</h1>
#
# This notebook illustrates the use of the Demons based non-rigid registration set of algorithms in SimpleITK. These include both the DemonsMetric which is part of the registration framework and Demons registration filters which are not.
#
# The data we work with is a 4D (3D+time) thoracic-abdominal CT, the Point-validated Pixel-based Breathing Thorax Model (POPI) model. This data consists of a set of temporal CT volumes, a set of masks segmenting each of the CTs to air/body/lung, and a set of corresponding points across the CT volumes.
#
# The POPI model is provided by the Léon Bérard Cancer Center & CREATIS Laboratory, Lyon, France. The relevant publication is:
#
# <NAME>, <NAME>, <NAME>, "The POPI-model, a point-validated pixel-based breathing thorax model",
# Proc. XVth International Conference on the Use of Computers in Radiation Therapy (ICCR), Toronto, Canada, 2007.
#
# The POPI data, and additional 4D CT data sets with reference points are available from the CREATIS Laboratory <a href="http://www.creatis.insa-lyon.fr/rio/popi-model?action=show&redirect=popi">here</a>.
# +
import SimpleITK as sitk
import numpy as np
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
# %run setup_for_testing
import registration_utilities as ru
import registration_callbacks as rc
# %matplotlib inline
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
#utility method that either downloads data from the Girder repository or
#if already downloaded returns the file name for reading from disk (cached data)
# %run update_path_to_download_script
from downloaddata import fetch_data as fdata
# -
# ## Utilities
#
# Load utilities that are specific to the POPI data, functions for loading ground truth data, display and the labels for masks.
# %run popi_utilities_setup.py
# ## Loading Data
#
# Load all of the images, masks and point data into corresponding lists. If the data is not available locally it will be downloaded from the original remote repository.
#
# Take a look at the images. According to the documentation on the POPI site, volume number one corresponds to end inspiration (maximal air volume).
# +
images = []
masks = []
points = []
for i in range(0,10):
image_file_name = f'POPI/meta/{i}0-P.mhd'
mask_file_name = f'POPI/masks/{i}0-air-body-lungs.mhd'
points_file_name = f'POPI/landmarks/{i}0-Landmarks.pts'
images.append(sitk.ReadImage(fdata(image_file_name), sitk.sitkFloat32)) #read and cast to format required for registration
masks.append(sitk.ReadImage(fdata(mask_file_name)))
points.append(read_POPI_points(fdata(points_file_name)))
interact(display_coronal_with_overlay, temporal_slice=(0,len(images)-1),
coronal_slice = (0, images[0].GetSize()[1]-1),
images = fixed(images), masks = fixed(masks),
label=fixed(lung_label), window_min = fixed(-1024), window_max=fixed(976));
# -
# ## Demons Registration
#
# This function will align the fixed and moving images using the Demons registration method. If given a mask, the similarity metric will be evaluated using points sampled inside the mask. If given fixed and moving points the similarity metric value and the target registration errors will be displayed during registration.
#
# As this notebook performs intra-modal registration, we can readily use the Demons family of algorithms.
#
# We start by using the registration framework with SetMetricAsDemons. We use a multiscale approach which is readily available in the framework. We then illustrate how to use the Demons registration filters that are not part of the registration framework.
def demons_registration(fixed_image, moving_image, fixed_points = None, moving_points = None):
registration_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacment_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacment_field_filter.SetReferenceImage(fixed_image)
# The image returned from the initial_transform_filter is transferred to the transform and cleared out.
initial_transform = sitk.DisplacementFieldTransform(transform_to_displacment_field_filter.Execute(sitk.Transform()))
# Regularization (update field - viscous, total field - elastic).
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0.0, varianceForTotalField=2.0)
registration_method.SetInitialTransform(initial_transform)
registration_method.SetMetricAsDemons(10) #intensities are equal if the difference is less than 10HU
# Multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[8,4,0])
registration_method.SetInterpolator(sitk.sitkLinear)
# If you have time, run this code as is, otherwise switch to the gradient descent optimizer
#registration_method.SetOptimizerAsConjugateGradientLineSearch(learningRate=1.0, numberOfIterations=20, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=20, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
# If corresponding points in the fixed and moving image are given then we display the similarity metric
# and the TRE during the registration.
if fixed_points and moving_points:
registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_and_reference_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_and_reference_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_and_reference_plot_values(registration_method, fixed_points, moving_points))
return registration_method.Execute(fixed_image, moving_image)
# Running the Demons registration with the conjugate gradient optimizer on this data <font color="red">takes a long time</font> which is why the code above uses gradient descent. If you are more interested in accuracy and have the time then switch to the conjugate gradient optimizer.
# +
# #%%timeit -r1 -n1
# Uncomment the line above if you want to time the running of this cell.
# Select the fixed and moving images, valid entries are in [0,9]
fixed_image_index = 0
moving_image_index = 7
tx = demons_registration(fixed_image = images[fixed_image_index],
moving_image = images[moving_image_index],
fixed_points = points[fixed_image_index],
moving_points = points[moving_image_index]
)
initial_errors_mean, initial_errors_std, _, initial_errors_max, initial_errors = ru.registration_errors(sitk.Euler3DTransform(), points[fixed_image_index], points[moving_image_index])
final_errors_mean, final_errors_std, _, final_errors_max, final_errors = ru.registration_errors(tx, points[fixed_image_index], points[moving_image_index])
plt.hist(initial_errors, bins=20, alpha=0.5, label='before registration', color='blue')
plt.hist(final_errors, bins=20, alpha=0.5, label='after registration', color='green')
plt.legend()
plt.title('TRE histogram');
print(f'Initial alignment errors in millimeters, mean(std): {initial_errors_mean:.2f}({initial_errors_std:.2f}), max: {initial_errors_max:.2f}')
print(f'Final alignment errors in millimeters, mean(std): {final_errors_mean:.2f}({final_errors_std:.2f}), max: {final_errors_max:.2f}')
# -
# SimpleITK also includes a set of Demons filters which are independent of the ImageRegistrationMethod. These include:
# 1. DemonsRegistrationFilter
# 2. DiffeomorphicDemonsRegistrationFilter
# 3. FastSymmetricForcesDemonsRegistrationFilter
# 4. SymmetricForcesDemonsRegistrationFilter
#
# As these filters are independent of the ImageRegistrationMethod we do not have access to the multiscale framework. Luckily it is easy to implement our own multiscale framework in SimpleITK, which is what we do in the next cell.
# +
def smooth_and_resample(image, shrink_factors, smoothing_sigmas):
"""
Args:
image: The image we want to resample.
shrink_factor(s): Number(s) greater than one, such that the new image's size is original_size/shrink_factor.
smoothing_sigma(s): Sigma(s) for Gaussian smoothing, this is in physical units, not pixels.
Return:
Image which is a result of smoothing the input and then resampling it using the given sigma(s) and shrink factor(s).
"""
if np.isscalar(shrink_factors):
shrink_factors = [shrink_factors]*image.GetDimension()
if np.isscalar(smoothing_sigmas):
smoothing_sigmas = [smoothing_sigmas]*image.GetDimension()
smoothed_image = sitk.SmoothingRecursiveGaussian(image, smoothing_sigmas)
original_spacing = image.GetSpacing()
original_size = image.GetSize()
new_size = [int(sz/float(sf) + 0.5) for sf,sz in zip(shrink_factors,original_size)]
new_spacing = [((original_sz-1)*original_spc)/(new_sz-1)
for original_sz, original_spc, new_sz in zip(original_size, original_spacing, new_size)]
return sitk.Resample(smoothed_image, new_size, sitk.Transform(),
sitk.sitkLinear, image.GetOrigin(),
new_spacing, image.GetDirection(), 0.0,
image.GetPixelID())
def multiscale_demons(registration_algorithm,
fixed_image, moving_image, initial_transform = None,
shrink_factors=None, smoothing_sigmas=None):
"""
Run the given registration algorithm in a multiscale fashion. The original scale should not be given as input as the
original images are implicitly incorporated as the base of the pyramid.
Args:
registration_algorithm: Any registration algorithm that has an Execute(fixed_image, moving_image, displacement_field_image)
method.
fixed_image: Resulting transformation maps points from this image's spatial domain to the moving image spatial domain.
moving_image: Resulting transformation maps points from the fixed_image's spatial domain to this image's spatial domain.
initial_transform: Any SimpleITK transform, used to initialize the displacement field.
shrink_factors (list of lists or scalars): Shrink factors relative to the original image's size. When the list entry,
shrink_factors[i], is a scalar the same factor is applied to all axes.
When the list entry is a list, shrink_factors[i][j] is applied to axis j.
This allows us to specify different shrink factors per axis. This is useful
in the context of microscopy images where it is not uncommon to have
unbalanced sampling such as a 512x512x8 image. In this case we would only want to
sample in the x,y axes and leave the z axis as is: [[[8,8,1],[4,4,1],[2,2,1]].
smoothing_sigmas (list of lists or scalars): Amount of smoothing which is done prior to resmapling the image using the given shrink factor. These
are in physical (image spacing) units.
Returns:
SimpleITK.DisplacementFieldTransform
"""
# Create image pyramid in a memory efficient manner using a generator function.
# The whole pyramid never exists in memory, each level is created when iterating over
# the generator.
def image_pair_generator(fixed_image, moving_image, shrink_factors, smoothing_sigmas):
end_level = 0
start_level = 0
if shrink_factors is not None:
end_level = len(shrink_factors)
for level in range(start_level, end_level):
f_image = smooth_and_resample(fixed_image, shrink_factors[level], smoothing_sigmas[level])
m_image = smooth_and_resample(moving_image, shrink_factors[level], smoothing_sigmas[level])
yield(f_image, m_image)
yield(fixed_image, moving_image)
# Create initial displacement field at lowest resolution.
# Currently, the pixel type is required to be sitkVectorFloat64 because
# of a constraint imposed by the Demons filters.
if shrink_factors is not None:
original_size = fixed_image.GetSize()
original_spacing = fixed_image.GetSpacing()
s_factors = [shrink_factors[0]]*len(original_size) if np.isscalar(shrink_factors[0]) else shrink_factors[0]
df_size = [int(sz/float(sf) + 0.5) for sf,sz in zip(s_factors,original_size)]
df_spacing = [((original_sz-1)*original_spc)/(new_sz-1)
for original_sz, original_spc, new_sz in zip(original_size, original_spacing, df_size)]
else:
df_size = fixed_image.GetSize()
df_spacing = fixed_image.GetSpacing()
if initial_transform:
initial_displacement_field = sitk.TransformToDisplacementField(initial_transform,
sitk.sitkVectorFloat64,
df_size,
fixed_image.GetOrigin(),
df_spacing,
fixed_image.GetDirection())
else:
initial_displacement_field = sitk.Image(df_size, sitk.sitkVectorFloat64, fixed_image.GetDimension())
initial_displacement_field.SetSpacing(df_spacing)
initial_displacement_field.SetOrigin(fixed_image.GetOrigin())
# Run the registration.
# Start at the top of the pyramid and work our way down.
for f_image, m_image in image_pair_generator(fixed_image, moving_image, shrink_factors, smoothing_sigmas):
initial_displacement_field = sitk.Resample (initial_displacement_field, f_image)
initial_displacement_field = registration_algorithm.Execute(f_image, m_image, initial_displacement_field)
return sitk.DisplacementFieldTransform(initial_displacement_field)
# -
# Now we will use our newly minted multiscale framework to perform registration with the Demons filters. Some things you can easily try out by editing the code below:
# 1. Is there really a need for multiscale - just call the multiscale_demons method without the shrink_factors and smoothing_sigmas parameters.
# 2. Which Demons filter should you use - configure the other filters and see if our selection is the best choice (accuracy/time).
# +
# Define a simple callback which allows us to monitor the Demons filter's progress.
def iteration_callback(filter):
print(f'\r{filter.GetElapsedIterations()}: {filter.GetMetric():.2f}', end='')
fixed_image_index = 0
moving_image_index = 7
# Select a Demons filter and configure it.
demons_filter = sitk.FastSymmetricForcesDemonsRegistrationFilter()
demons_filter.SetNumberOfIterations(20)
# Regularization (update field - viscous, total field - elastic).
demons_filter.SetSmoothDisplacementField(True)
demons_filter.SetStandardDeviations(2.0)
# Add our simple callback to the registration filter.
demons_filter.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(demons_filter))
# Run the registration.
tx = multiscale_demons(registration_algorithm=demons_filter,
fixed_image = images[fixed_image_index],
moving_image = images[moving_image_index],
shrink_factors = [4,2],
smoothing_sigmas = [8,4])
# Compare the initial and final TREs.
initial_errors_mean, initial_errors_std, _, initial_errors_max, initial_errors = ru.registration_errors(sitk.Euler3DTransform(), points[fixed_image_index], points[moving_image_index])
final_errors_mean, final_errors_std, _, final_errors_max, final_errors = ru.registration_errors(tx, points[fixed_image_index], points[moving_image_index])
plt.hist(initial_errors, bins=20, alpha=0.5, label='before registration', color='blue')
plt.hist(final_errors, bins=20, alpha=0.5, label='after registration', color='green')
plt.legend()
plt.title('TRE histogram');
print(f'\nInitial alignment errors in millimeters, mean(std): {initial_errors_mean:.2f}({initial_errors_std:.2f}), max: {initial_errors_max:.2f}')
print(f'Final alignment errors in millimeters, mean(std): {final_errors_mean:.2f}({final_errors_std:.2f}), max: {final_errors_max:.2f}')
# -
# ## Transferring Segmentation
#
# The following example illustrates the use of the Demons registration method to transfer a segmentation from one dataset to another. Note that we use the same multi scale demons framework we used above for registering 3D images to register 2D images.
#
# Data provided courtesy of <NAME>, Signal Analysis and Interpretation Laboratory University of Southern California. Data acquisition protocol described in:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> "State of the art MRI protocol for comprehensive assessment of vocal tract structure and function" Proceedings of Interspeech, San Francisco, 2016.
#
#
# **NOTE**: Segmented contour points are given in pixel coordinates. This is useful for plotting. To transfer the points to the second image we need to (1) use their physical coordinates, (2) transform them, and then (3) obtain the pixel coordinates for plotting.
#
# ### Load Data
# +
import glob
import pandas as pd
from gui import multi_image_display2D
# Fetch all of the data associated with this example.
data_directory = os.path.dirname(fdata("mr_slice_atlas/readme.txt"))
segmented_img = sitk.ReadImage(os.path.join(data_directory,'segmented_image.mha'))
new_img = sitk.ReadImage(os.path.join(data_directory,'new_image.mha'))
contours_list = []
for file_name in glob.glob(os.path.join(data_directory,'*.csv')):
df = pd.read_csv(file_name)
contours_list.append((list(df['X']), list(df['Y'])))
# Display the images and overlay the contours onto the segmented image.
fig,axes = multi_image_display2D([segmented_img, new_img])
for contour in contours_list:
axes[0].plot(contour[0], contour[1], linewidth=5)
# -
# ### Register and transfer the segmentation.
# +
# Select a Demons filter and configure it.
demons_filter = sitk.DiffeomorphicDemonsRegistrationFilter()
demons_filter.SetNumberOfIterations(20)
# Regularization (update field - viscous, total field - elastic).
demons_filter.SetSmoothDisplacementField(True)
demons_filter.SetStandardDeviations(0.8)
# create initial transform
initial_tfm = initial_transform = sitk.CenteredTransformInitializer(segmented_img,
new_img,
sitk.Euler2DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Run the registration.
final_tfm = multiscale_demons(registration_algorithm=demons_filter,
fixed_image = segmented_img,
moving_image = new_img,
initial_transform = initial_tfm,
shrink_factors = [6,4,2],
smoothing_sigmas = [6,4,2])
# Display the transformed segmentation.
fig,axes = multi_image_display2D([segmented_img, new_img])
for contour in contours_list:
# Plot on segmented image.
axes[0].plot(contour[0], contour[1], linewidth=5)
# Transform the contour points from segmented image to new image (requires the use of points in physical space)
transformed_contour = [new_img.TransformPhysicalPointToContinuousIndex(final_tfm.TransformPoint(segmented_img.TransformContinuousIndexToPhysicalPoint(p))) for p in zip(contour[0],contour[1])]
x_coords, y_coords = zip(*transformed_contour)
axes[1].plot(x_coords, y_coords, linewidth=5)
# -
| Python/66_Registration_Demons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/msiddalingaiah/MachineLearning/blob/master/Recommendations/Movie_Recommendations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="NT4wCSFZmJnl"
# # Movie Recommendations
#
# Recommendations are a common machine learning task widely used by many leading companies, such as Netflix, Amazon, and YouTube. If you have used any of these online services, you are familiar with recommendations that are often prefixed with "You might also like.." or "Recommended items other customers buy...".
#
# There are many ways to generate recommendations. It could be done based on simple criteria, such as movie genre, e.g. comedies or action adventure. More sophisticated recommendations might consider many more factors, such as the director, when the movie was produced and so on.
#
# In this example, we will use a common, straightforward method known as [collaborative filtering](https://en.wikipedia.org/wiki/Collaborative_filtering). This method is based on idea that many customers have similar likes and dislikes. It also considers similarities between products. It's a simple, yet effective technique that depends only user preferences, such as product ratings. If you have a sufficiently large dataset of ratings from your customers, then this approach is a good place to start.
# + colab_type="code" id="WStenIzSmBFf" colab={}
# %tensorflow_version 2.x
# + colab_type="code" id="s_kskAKSmP_8" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Input, Embedding, Flatten, Dot, Dense, Add, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import LabelEncoder
# + [markdown] colab_type="text" id="shIqoQ8CmZbb"
# # Load data
#
# In this example, we will make movie recommendations given about 100,000 samples from roughly 10,000 customers or users.
#
# The data set is freely available on the [MovieLens website](https://grouplens.org/datasets/movielens/).
# + colab_type="code" id="8iSN-FLpmfqc" outputId="42009c1c-bc00-43aa-cb69-e22256bd3ec0" colab={"base_uri": "https://localhost:8080/", "height": 211}
# !wget http://files.grouplens.org/datasets/movielens/ml-latest-small.zip
# + colab_type="code" id="r9VHQzMBm7EN" outputId="d0882272-6bac-43a3-fc8c-2551d881a7ef" colab={"base_uri": "https://localhost:8080/", "height": 140}
# !unzip ml-latest-small.zip
# + colab_type="code" id="m8gfiZ0QnROz" outputId="c030dab8-7b8e-4066-edc2-6ab7aafb8385" colab={"base_uri": "https://localhost:8080/", "height": 203}
movies = pd.read_csv('ml-latest-small/movies.csv')
movies.head()
# + colab_type="code" id="LwtxE_wWAS_P" outputId="c53d9908-f2e5-45f8-8df8-e4696d849f32" colab={"base_uri": "https://localhost:8080/", "height": 203}
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
# + [markdown] colab_type="text" id="K7uejLK0AFi0"
# ## Join Ratings with Movies
#
# The ratings don't contain movie titles, so let's join or merge these two sets for convenience.
# + colab_type="code" id="trWhyJF9neTr" outputId="99727059-b3c8-4f0c-fc75-200c589494cc" colab={"base_uri": "https://localhost:8080/", "height": 203}
ratings = ratings.merge(movies, on='movieId').drop(['genres','timestamp'],axis=1)
ratings.head()
# + [markdown] colab_type="text" id="cnWx4rr2KEht"
# ## Generate Sequential Identifiers
#
# `userId` and `movieId` are not sequential, which causes problems for our model. To compensate, we can use the [LabelEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class from [scikit-learn](https://scikit-learn.org/) to generate sequential identifiers for users and movies. The original identifiers are still available, so we can always join back to the original data set if needed.
# + colab_type="code" id="JUOvcGER93u4" outputId="e08a0726-82f5-4da5-bcbf-672b1b9c54c5" colab={"base_uri": "https://localhost:8080/", "height": 203}
user_enc = LabelEncoder()
ratings['userSeq'] = user_enc.fit_transform(ratings['userId'].values)
item_enc = LabelEncoder()
ratings['movieSeq'] = item_enc.fit_transform(ratings['movieId'].values)
ratings.head()
# + [markdown] colab_type="text" id="QKug9IoGBlvo"
# ## Train/Test Split
#
# This case is a bit unusual because we need ratings for every movie from every user to train an accurate model. If we used a traditional split, some movies might be left out, which will cause problems during prediction.
#
# For this reason, we will use all of the data for training and a subset for model validation only.
# + colab_type="code" id="cWzCvTtHno9b" outputId="bf64cc10-7a58-49f1-9446-af9abb0ae47d" colab={"base_uri": "https://localhost:8080/", "height": 52}
train_unused, test = train_test_split(ratings, test_size=0.20, random_state=0)
# All data is used for training
train = ratings
numUsers = len(train.userSeq.unique())
numMovies = len(train.movieSeq.unique())
print((numUsers, numMovies))
print((len(train), len(test)))
# + [markdown] colab_type="text" id="wEOJRfE2tBmO"
# ## Recommendation Model
#
# Collaborative filtering tries to minimize the error between a predicted value and ground truth. This is similar to many supervised machine learning problems. The model learns a set of features that similar movies share. The number of features could be as simple as the genre or more complex. The `numFeatures` variable below is a hyperparameter that can be tuned to optimize performance.
#
# This model uses the [Keras functional API](https://keras.io/getting-started/functional-api-guide/) rather than adding layers to a Sequential model. This is necessary because we have two sets of inputs, userSeq and movieSeq.
# + colab_type="code" id="s6WzjAvComok" outputId="5ccadb8a-77cc-435d-e56c-6b75e741d7cc" colab={"base_uri": "https://localhost:8080/", "height": 703}
numFeatures = 50
dropout = 0.0
user_input = Input(shape=(1,))
user_emb = Embedding(numUsers, numFeatures)(user_input)
flat_user = Flatten()(user_emb)
user_dropout = Dropout(dropout)(flat_user)
movie_input = Input(shape=(1,))
movie_emb = Embedding(numMovies, numFeatures)(movie_input)
flat_movie = Flatten()(movie_emb)
movie_dropout = Dropout(dropout)(flat_movie)
dotProduct = Dot(axes=1)([user_dropout, movie_dropout])
user_bias = Embedding(numUsers, 1)(user_input)
movie_bias = Embedding(numMovies, 1)(movie_input)
sum = Add()([dotProduct, user_bias, movie_bias])
flat_sum = Flatten()(sum)
output = Dropout(dropout)(flat_sum)
model = Model([user_input, movie_input], output)
model.summary()
# + [markdown] colab_type="text" id="rwBRWEY8uOOv"
# ## Model Training
# + colab_type="code" id="5ZXh0fuiuNfx" outputId="6fa9741a-6b50-4615-a29c-3692d09f77bf" colab={"base_uri": "https://localhost:8080/", "height": 647}
model.compile(loss='mean_squared_error', optimizer=Adam())
history = model.fit([train.userSeq, train.movieSeq], train.rating,
batch_size=32, epochs=10, verbose=1,
validation_data=([test.userSeq, test.movieSeq], test.rating))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# + [markdown] colab_type="text" id="TQGSmqmeN6Et"
# Notice the validation loss is slightly lower than the training loss. If the model was overfitting, then the opposite would be true, so this is a peculiar case.
#
# There are a few reasons this can happen:
#
#
# 1. Keras artifact explained the [Keras FAQ](https://keras.io/getting-started/faq/#why-is-the-training-loss-much-higher-than-the-testing-loss). Keras computes training loss as the average during training time, which can change quite a bit during one epoch. Validation is computed at the end of an epoch when the model loss is probably lower.
# 2. The test set is not not representative of the training set. In some cases, the test set might be easier to predict than the training set. This could happen if a very small test set is used.
#
#
#
#
#
# + [markdown] colab_type="text" id="EjmI0CuZFLIU"
# ## Make Predictions
#
# We can make predictions for a given user by creating a numpy array of all movies and a numpy array of the same dimension filled with just the one user we are interested in. The model will predict ratings for the specified user given all movies in the full data set.
#
# We can then sort the data set by predicted rating descending to get the best recommendations first.
# + colab_type="code" id="WwzJSCvhj8U8" outputId="924030be-686e-4b12-8f1c-59d79317280b" colab={"base_uri": "https://localhost:8080/", "height": 203}
# The user for whom we want to make recommendations
userNumber = 0
uniqueMovies = ratings.drop_duplicates(subset=['movieSeq'])
movie_vector = uniqueMovies.movieSeq.values
user_vector = np.ones((len(uniqueMovies),)) * userNumber
predictions = model.predict([user_vector, movie_vector])
predictedDF = uniqueMovies.copy()
predictedDF['Predictions'] = predictions
predictedDF.sort_values(by='Predictions', ascending=False).head(5)
# + [markdown] colab_type="text" id="Ga2dHwYa7pP2"
# ## Error Analysis
#
# Let's look at some movies where the ground truth did not compare well with predictions.
# + colab_type="code" id="xC6srYBe6CU3" outputId="788718e2-1f6c-41d9-9556-9f3c58312e3c" colab={"base_uri": "https://localhost:8080/", "height": 203}
oneUser = predictedDF[predictedDF.userSeq == userNumber].copy()
oneUser['Error'] = (oneUser.rating - oneUser.Predictions)**2
oneUser.sort_values(by='Error', ascending=False).head(5)
# + id="tdBMbHATJtz8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="80060c73-b689-4cca-9e19-9dc0ec0254f2"
ratings[ratings.movieSeq == 919].sort_values(by='rating', ascending=True)
| Recommendations/Movie_Recommendations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def normalize_string(text:str, char_to_cut=r"!?&-,: '", lower=True) -> str:
for ch in char_to_cut:
if ch in text:
text = text.replace(ch, "")
if lower:
text = text.lower()
return text
if __name__ == "__main__":
print(normalize_string("Hello World"))
| code/normalize_string.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.018943, "end_time": "2020-04-28T12:08:06.188782", "exception": false, "start_time": "2020-04-28T12:08:06.169839", "status": "completed"} tags=[]
# # Achate a curva
# > Número de casos ativos e estimativa de leitos de UTI total e para pacientes covid-19
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [covid-19, brasil, comparativo, altair, jupyter]
# - image: images/brazil-flatten-the-curve.png
# + papermill={"duration": 0.441167, "end_time": "2020-04-28T12:08:06.644848", "exception": false, "start_time": "2020-04-28T12:08:06.203681", "status": "completed"} tags=[]
#hide
import pandas as pd
import altair as alt
import math
from altair_saver import save
from IPython.display import HTML
CHART_WIDTH = 600
CHART_HEIGHT = 400
# -
#hide
url_icus = ('https://covid-insumos.saude.gov.br/paineis/insumos/lista_csv_painel.php?output=csv')
src_icus = pd.read_csv(url_icus, sep=';')
#hide
STATE_COLUMN = "Estado"
ICUS_COLUMN = 'UTIs'
AVAILABLE_PERCENTAGE_ICU = 0.35
src_icus = src_icus.rename(columns={'Leitos UTI adulto':ICUS_COLUMN, 'uf':STATE_COLUMN})
#hide
TOTAL_ICU = src_icus[ICUS_COLUMN].sum()
TOTAL_AVAILABLE_ICU = TOTAL_ICU * AVAILABLE_PERCENTAGE_ICU
#hide
DATE_COLUMN = "Data"
CONFIRMED_CASES = 'Casos confirmados'
CASES_NEED_ICU = 0.05
ICU_PER_100k = 20
CASES_NEED_ICU_COLUMN = "{:.0f}".format(CASES_NEED_ICU*100)+"% dos casos confirmados"
PATIENTS = "Pacientes"
AVAILABLE_ICUS_COLUMN = "{:.0f}".format(AVAILABLE_PERCENTAGE_ICU*100)+"% das UTIs"
UPPER_BOUND_ICU_NEED_STR = "Limite superior de necessidade de UTI"
LOWER_BOUND_ICU_NEED_STR = "Limite inferior de necessidade de UTI"
UPPER_BOUND_ICU_NEED = 0.12
LOWER_BOUND_ICU_NEED = 0.01
#hide
url = ('https://data.brasil.io/dataset/covid19/caso.csv.gz')
src = pd.read_csv(url)
#hide
df = src[(src['place_type'] == 'state')]
df = df.rename(columns={
"state": STATE_COLUMN,
"date":DATE_COLUMN,
"confirmed":CONFIRMED_CASES
})
# hold a backup
backup = df.copy()
#hide
cols = df.columns[~df.columns.isin([DATE_COLUMN, STATE_COLUMN, CONFIRMED_CASES])]
df = df.drop(cols, axis=1)
# +
#hide
# # copy latest value on empty last items
df = df.set_index(STATE_COLUMN)
df = df.reset_index()
df = df.sort_values(by=[DATE_COLUMN], ascending=True)
df[DATE_COLUMN] = pd.to_datetime(df[DATE_COLUMN])
LAST_DATE = max(df[DATE_COLUMN])
while True:
copied_registers = []
for row in df.groupby(STATE_COLUMN).last().iterrows():
if row[1][DATE_COLUMN] < LAST_DATE:
copied_registers.append([row[0], row[1][DATE_COLUMN] + pd.DateOffset(days=1),
row[1][CONFIRMED_CASES]])
for row in copied_registers:
print(row)
df.loc[row[0]] = [row[0],row[1],row[2]]
df = df.set_index(STATE_COLUMN)
df = df.reset_index()
if ((df.groupby(STATE_COLUMN).last()[DATE_COLUMN] == LAST_DATE).all()):
break
df = df.sort_values(by=[DATE_COLUMN], ascending=True)
df[DATE_COLUMN] = df[DATE_COLUMN].dt.strftime('%m/%d/%y')
# -
#hide
# get total per day and per state
df = df[[STATE_COLUMN,DATE_COLUMN,CONFIRMED_CASES]].groupby([STATE_COLUMN,DATE_COLUMN],as_index = False).sum().pivot(STATE_COLUMN,DATE_COLUMN).fillna(0)
total = df.sum(axis=0)
df.loc['Total',CONFIRMED_CASES]=total
df = df.reset_index()
#hide
# unpivot data
df = df.melt(id_vars=[STATE_COLUMN], value_vars=[CONFIRMED_CASES])
df = df.drop([None], axis=1)
df = df.rename(columns={'value':CONFIRMED_CASES})
#hide
# add lower and upper bounds
df = df[(df[STATE_COLUMN] == 'Total')]
df[UPPER_BOUND_ICU_NEED_STR] = round(df[CONFIRMED_CASES] * (UPPER_BOUND_ICU_NEED),0)
df[LOWER_BOUND_ICU_NEED_STR] = round(df[CONFIRMED_CASES] * (LOWER_BOUND_ICU_NEED),0)
df[CONFIRMED_CASES] = round(df[CONFIRMED_CASES] * (CASES_NEED_ICU),0)
# +
#hide_input
selection = alt.selection_single(fields=[STATE_COLUMN], name=' ')
color = alt.condition(selection,
alt.Color(STATE_COLUMN+':N',
scale=alt.Scale(scheme='tableau20', reverse=False), legend=None),
alt.value('#ffbf79')
)
chart = alt.Chart(df).mark_line().encode(
x=alt.X(DATE_COLUMN+':O', axis=alt.Axis(title=DATE_COLUMN)),
y=alt.Y(CONFIRMED_CASES+':Q', axis=alt.Axis(
title=["Estimativa de casos que precisam de UTI: {:.0f}".format(CASES_NEED_ICU*100)+"% (variando entre {:.0f}".format(LOWER_BOUND_ICU_NEED*100)+"% e {:.0f}".format(UPPER_BOUND_ICU_NEED*100)+"%)"])),
color=color,
tooltip=[
DATE_COLUMN,
STATE_COLUMN,
CONFIRMED_CASES
],
order=alt.Order(
STATE_COLUMN,
sort='ascending'
)
).properties(
title=[
"Achate a curva - Brasil",
"Casos confirmados que estima-se que precisem de UTI (ver premissas)"
]
).add_selection(
selection
)
shades = alt.Chart(df).mark_area().encode(
x=DATE_COLUMN+':O',
y=LOWER_BOUND_ICU_NEED_STR+":Q",
y2=UPPER_BOUND_ICU_NEED_STR+':Q',
opacity = alt.condition(selection, alt.value(0.2), alt.value(0.5))
)
x0line = alt.Chart(pd.DataFrame({'y': [TOTAL_ICU]})).mark_rule(color='#e42726', strokeWidth=2).encode(
y='y:Q'
)
text0 = x0line.mark_text(align='left', x=5, dy=10, color='#e42726', strokeWidth=1).encode(
text=alt.value("Total de "+"{:.0f}".format(TOTAL_ICU)+" UTIs do país")
)
x1line = alt.Chart(pd.DataFrame({'y': [TOTAL_AVAILABLE_ICU]})).mark_rule(color='darkorange', strokeWidth=2).encode(
y='y:Q'
)
text1 = x1line.mark_text(align='left', x=5, dy=-10, color='darkorange', strokeWidth=1).encode(
text=alt.value("Estimativa para COVID19: "+"{:.0f}".format(TOTAL_AVAILABLE_ICU)+" UTIs")
)
text2 = x1line.mark_text(align='left', x=5, dy=10, color='darkorange', strokeWidth=1).encode(
text=alt.value("{:.0f}".format(AVAILABLE_PERCENTAGE_ICU*100)+"% do total de UTIs do país")
)
legend = alt.Chart(df).mark_point().encode(
y=alt.Y(STATE_COLUMN+':N', axis=alt.Axis(orient='right')),
color=color
).add_selection(
selection
)
plot = chart.properties(width=CHART_WIDTH, height=CHART_HEIGHT) + x0line + text0 + x1line + text1 + text2 + shades
plot
# -
#hide
# get only states
df = backup[(backup[STATE_COLUMN] != 'Total')]
cols = df.columns[~df.columns.isin([DATE_COLUMN, STATE_COLUMN, CONFIRMED_CASES])]
df = df.drop(cols, axis=1)
#df.tail()
#hide
# add lower and upper bounds
df[CASES_NEED_ICU_COLUMN] = round(df[CONFIRMED_CASES] * (CASES_NEED_ICU),0)
df[LOWER_BOUND_ICU_NEED_STR] = round(df[CONFIRMED_CASES] * (LOWER_BOUND_ICU_NEED),0)
df[UPPER_BOUND_ICU_NEED_STR] = round(df[CONFIRMED_CASES] * (UPPER_BOUND_ICU_NEED),0)
src_icus = src_icus.set_index(STATE_COLUMN)
df = df.set_index(STATE_COLUMN)
df[ICUS_COLUMN] = src_icus[ICUS_COLUMN].astype(float)
df[AVAILABLE_ICUS_COLUMN] = df[ICUS_COLUMN] * AVAILABLE_PERCENTAGE_ICU
src_icus = src_icus.reset_index()
df = df.reset_index()
#hide
# # copy latest value on empty last items
df[DATE_COLUMN] = pd.to_datetime(df[DATE_COLUMN])
df = df.sort_values(by=[DATE_COLUMN])
LAST_DATE = max(df[DATE_COLUMN])
while ((df.groupby(STATE_COLUMN).last()[DATE_COLUMN] < LAST_DATE).any()):
copied_registers = []
for row in df.groupby(STATE_COLUMN).last().iterrows():
if row[1][DATE_COLUMN] < LAST_DATE:
copied_registers.append([row[0], row[1][DATE_COLUMN] + pd.DateOffset(days=1),
row[1][CONFIRMED_CASES],
row[1][CASES_NEED_ICU_COLUMN],
row[1][LOWER_BOUND_ICU_NEED_STR],
row[1][UPPER_BOUND_ICU_NEED_STR],
row[1][ICUS_COLUMN],
row[1][AVAILABLE_ICUS_COLUMN]])
for row in copied_registers:
df.loc[row[0]] = [row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]]
df = df.set_index(STATE_COLUMN)
df = df.reset_index()
df = df.sort_values(by=[DATE_COLUMN])
df[DATE_COLUMN] = df[DATE_COLUMN].dt.strftime('%m/%d/%y')
# +
#hide_input
opt = df[STATE_COLUMN].unique()
opt.sort()
input_dropdown = alt.binding_select(options=opt)
selection = alt.selection_single(
fields=[STATE_COLUMN],
init={STATE_COLUMN:'AC'},
bind=input_dropdown,
name=" ")
color = alt.condition(selection,
alt.Color(STATE_COLUMN+':N',
scale=alt.Scale(scheme='Paired', reverse=True), legend=None),
alt.value('#ffbf79')
)
chart = alt.Chart(df).mark_line().encode(
x=alt.X(DATE_COLUMN+':O', axis=alt.Axis(title=DATE_COLUMN)),
y=alt.Y(CASES_NEED_ICU_COLUMN+':Q', axis=alt.Axis(
title=["Estimativa de casos que precisam de UTI: {:.0f}".format(CASES_NEED_ICU*100)+"% (variando entre {:.0f}".format(LOWER_BOUND_ICU_NEED*100)+"% e {:.0f}".format(UPPER_BOUND_ICU_NEED*100)+"%)"])),
color=color,
tooltip=[
DATE_COLUMN,
STATE_COLUMN,
CASES_NEED_ICU_COLUMN
],
order=alt.Order(
STATE_COLUMN,
sort='ascending'
)
).properties(
title=[
"Achate a curva por estado",
"Casos confirmados que estima-se que precisem de UTI (ver premissas)"
]
).add_selection(
selection
).transform_filter(
selection
)
shades = alt.Chart(df).mark_area().encode(
x=DATE_COLUMN+':O',
y=LOWER_BOUND_ICU_NEED_STR+":Q",
y2=UPPER_BOUND_ICU_NEED_STR+':Q',
color=color,
opacity = alt.condition(selection, alt.value(0.2), alt.value(0.5))
).transform_filter(
selection
)
x1line = alt.Chart(df.groupby(DATE_COLUMN).last()).mark_rule(color='#e42726', strokeWidth=2).encode(
y=ICUS_COLUMN+':Q'
).transform_filter(
selection
)
text1 = alt.Chart(df.groupby(DATE_COLUMN).last()).mark_text(align='left', x=5, dy=10, color='#e42726', strokeWidth=1).encode(
y=ICUS_COLUMN+':Q',
text=alt.value("Total de leitos de UTI")
).transform_filter(
selection
)
x2line = alt.Chart(df.groupby(DATE_COLUMN).last()).mark_rule(color='darkorange', strokeWidth=2).encode(
y=AVAILABLE_ICUS_COLUMN+':Q'
).transform_filter(
selection
)
text2 = alt.Chart(df.groupby(DATE_COLUMN).last()).mark_text(align='left', x=5, dy=10, color='darkorange', strokeWidth=1).encode(
y=AVAILABLE_ICUS_COLUMN+':Q',
text=alt.value("{:.0f}".format(AVAILABLE_PERCENTAGE_ICU*100)+"% dos leitos de UTI")
).transform_filter(
selection
)
plot = chart.properties(width=CHART_WIDTH, height=CHART_HEIGHT) + x1line + text1 + x2line + text2 + shades
plot
# -
#hide_input
df.groupby(STATE_COLUMN).last()
#hide_input
print("Premissas :")
print("- O Brasil tem "+"{:.0f}".format(TOTAL_ICU)+" leitos de UTI adulto SUS e não SUS (fonte: painel de insumos e leitos)")
print("- {:.0f}".format(AVAILABLE_PERCENTAGE_ICU*100)+"% de UTIs disponíveis para pacientes com covid19 (fonte: artigo oglobo)")
print("- Estima-se que cerca de "+"{:.0f}".format(CASES_NEED_ICU*100)+"% dos pacientes com covid-19 venham a precisar de tratamento intensivo. No pior caso registrado, na Itália, cerca de 12% precisaram de UTI e no melhor caso, China, cerca de 1% dos infectados precisaram de UTI (fonte: artigo the lancet)")
#hide_input
HTML(f'<small class="float-right">Última atualização em {pd.to_datetime(LAST_DATE).strftime("%d/%m/%Y")}</small>')
# + [markdown] papermill={"duration": 0.07254, "end_time": "2020-04-28T12:08:09.077685", "exception": false, "start_time": "2020-04-28T12:08:09.005145", "status": "completed"} tags=[]
# Based on the work of [<NAME>](https://covid19dashboards.com/jupyter/2020/04/27/Covid-19-Overview-Chile.html) and [<NAME>](https://github.com/github/covid19-dashboard/blob/master/_notebooks/2020-03-19-estimating_infected.ipynb), adapted by [<NAME>](http://cleberjamaral.github.io/).
#
# Dados:
# - covid19: [CSSEGISandData](https://github.com/CSSEGISandData/COVID-19)
# - quantidade de leitos: [painel de insumos e leitos](https://covid-insumos.saude.gov.br/paineis/insumos/painel_leitos.php), [elpais](https://brasil.elpais.com/brasil/2020-04-28/sem-transparencia-sobre-fila-para-utis-justica-opera-para-garantir-atendimento-a-pacientes-de-covid-19.html), [elpais2](https://brasil.elpais.com/sociedade/2020-04-15/sem-leitos-de-uti-municipios-pequenos-temem-por-estrutura-limitada-para-transferir-pacientes-graves-com-a-covid-19.html), [bcc](https://www.bbc.com/portuguese/brasil-52137553) e [ministério da saúde](https://www.saude.gov.br/noticias/agencia-saude/46772-brasil-ganha-reforco-de-1-134-leitos-de-uti-no-combate-ao-coronavirus)
# - leitos disponíveis: [artigo oglobo](https://oglobo.globo.com/sociedade/coronavirus/coronavirus-ministerio-estima-que-sus-tem-de-12-13-mil-leitos-de-uti-disponiveis-para-atender-pacientes-1-24328523)
# - pacientes que precisam de UTI: [artigo the lancet](https://linkinghub.elsevier.com/retrieve/pii/S2213260020301612)
# -
#hide
save(plot,"../images/brazil-flatten-the-curve.png")
| _drafts/2020-05-11-brazil-flatten-the-curve.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Given a set of distinct integers, nums, return all possible subsets (the power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
# Input: nums = [1,2,3]
# Output:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
# +
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.dfs(nums, 0, [], res)
return res
def dfs(self, nums, index, path, res):
res.append(path)
for i in range(index, len(nums)):
self.dfs(nums, i+1, path+[nums[i]], res)
# test
nums = [1,2,3]
print(Solution().subsets(nums))
| DSA/backtracking/subsets.ipynb |
# # 📝 Exercise M6.03
#
# This exercise aims at verifying if AdaBoost can over-fit.
# We will make a grid-search and check the scores by varying the
# number of estimators.
#
# We will first load the California housing dataset and split it into a
# training and a testing set.
# +
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
data, target = fetch_california_housing(return_X_y=True, as_frame=True)
target *= 100 # rescale the target in k$
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=0, test_size=0.5)
# -
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# Then, create an `AbaBoostRegressor`. Use the function
# `sklearn.model_selection.validation_curve` to get training and test scores
# by varying the number of estimators. Use the mean absolute error as a metric
# by passing `scoring="neg_mean_absolute_error"`.
# *Hint: vary the number of estimators between 1 and 60.*
# +
# Write your code here.
# -
# Plot both the mean training and test errors. You can also plot the
# standard deviation of the errors.
# *Hint: you can use `plt.errorbar`.*
# +
# Write your code here.
# -
# Plotting the validation curve, we can see that AdaBoost is not immune against
# overfitting. Indeed, there is an optimal number of estimators to be found.
# Adding too many estimators is detrimental for the statistical performance of
# the model.
# Repeat the experiment using a random forest instead of an AdaBoost regressor.
# +
# Write your code here.
| notebooks/ensemble_ex_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: myvenv
# language: python
# name: myvenv
# ---
import gym
env = gym.make('CartPole-v0')
env.reset()
for t in range(1000):
env.step(env.action_space.sample())
env.render()
env.close()
# # Play Game Randomly
# +
for e in range(20):
observation = env.reset()
for t in range(200):
env.render()
action = env.action_space.sample()
observation,reward,done,other_info = env.step(action)
if done:
print('Game Episode :{}/{} High Score :{}'.format(e,20,t))
break
print("ALL episodes over!")
env.close()
# -
# # Q-Learning
import numpy as np
import matplotlib.pyplot as plt
import os
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import random
class Agent:
def __init__(self,state_size,action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen = 2000)
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_decay = 0.995
self.epsilon_min = 0.01
self.learning_rate = 0.001
self.model = self._create_model()
def _create_model(self):
model = Sequential()
model.add(Dense(24,input_dim=self.state_size,activation='relu'))
model.add(Dense(24,activation='relu'))
model.add(Dense(self.action_size,activation='linear'))
model.compile(loss='mse',optimizer = Adam(lr=0.001))
return model
def remember(self,state,action,reward,next_state,done):
self.memory.append((state,action,reward,next_state,done))
def act(self,state):
if np.random.rand() <= self.epsilon:
#Take Random Action
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0])
def train(self,batch_size=32):
minibatch = random.sample(self.memory,batch_size)
for experience in minibatch:
state,action,reward,next_state,done = experience
if not done:
target = reward + self.gamma*np.amax(self.model.predict(next_state)[0])
else:
target = reward
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state,target_f,epochs=1,verbose=0)
if self.epsilon>self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self,name):
self.model.load_weights(name)
def save(self,name):
self.model.save_weights(name)
n_episodes = 1000
output_dir = 'Datasets/carpole_model/'
agent = Agent(state_size=4,action_size=2)
done = False
state_size = 4
action_size =2
batch_size = 32
# +
agent = Agent(state_size, action_size) # initialise agent
done = False
for e in range(n_episodes):
state = env.reset()
state = np.reshape(state,[1,state_size])
for time in range(5000):
env.render()
action = agent.act(state) #action is 0 or 1
next_state,reward,done,other_info = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state,[1,state_size])
agent.remember(state,action,reward,next_state,done)
state = next_state
if done:
print("Game Episode :{}/{}, High Score:{},Exploration Rate:{:.2}".format(e,n_episodes,time,agent.epsilon))
break
if len(agent.memory)>batch_size:
agent.train(batch_size)
# if e%50==0:
# agent.save(output_dir+"weights_"+'{:04d}'.format(e)+".hdf5")
env.close()
# -
env.close()
| DeepQLearning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="wIBjnXDZ84ux" outputId="b2434362-2024-41ac-fb58-d27f6aa2fdc7"
# !pip uninstall --yes nvidia-ml-py3
# !pip uninstall --yes pandas-profiling
# !pip uninstall --yes scipy
# !pip install scipy==1.7.2
# !pip install pandas-profiling
# !pip install auto-sklearn==0.14.0
# + id="K-YYyxhK8md9"
import numpy as np
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/"} id="UifIey7S72oZ" outputId="618568e0-fbf0-4e11-ed50-f250d0652a78"
# https://drive.google.com/file/d/1GYV0XAVBgO95Lf-lTuMBf6YD2KaIjj8E/view?usp=sharing
# https://drive.google.com/file/d/1eJRZdF1yf7a-CjAJUwTno6IaYzlKQChM/view?usp=sharing
# !wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1GYV0XAVBgO95Lf-lTuMBf6YD2KaIjj8E' -O test.csv
# !wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1eJRZdF1yf7a-CjAJUwTno6IaYzlKQChM' -O train.csv
# + id="ycOydO-I73v8"
train_numData = pd.read_csv('train.csv')
# + id="ttKqdENn7jpY"
def convertCategoricalsToNumerics(df):
categorical_fields = df.select_dtypes(include=['object'])
categorical_fields = categorical_fields.drop('class',axis=1)
for col in df.columns:
if col in categorical_fields.columns:
# Counting the different categories in the column
countOrdered_cats = list(dict(categorical_fields[col].value_counts()).keys()) # descending order
# Creating a list of numeric replacements
num_replacements = list(np.arange(len(countOrdered_cats))+1)
# Dictionary of replacements to pass to df.replace()
replacements = dict(zip(countOrdered_cats, num_replacements))
cd = df[col].replace(replacements, inplace=True)
else:
pass
return df
train_numData = convertCategoricalsToNumerics(train_data)
train_numData
# + id="4UUzJ4Dn7aiT"
X = train_numData.loc[:, train_numData.columns != 'class']
Y = train_numData.loc[:, train_numData.columns == 'class']
# + id="tZPy2zbD7XDU"
import autosklearn.classification
import autosklearn.metrics
from sklearn.model_selection import train_test_split
test_size = 0.2
shuffle = True
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=test_size, shuffle=shuffle)
# + id="BrSTD2lyfBfU"
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import RandomizedSearchCV
import ConfigSpace.hyperparameters as CSH
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
numeric_transformer = make_pipeline(StandardScaler())
Preprocessor = ColumnTransformer(
transformers=[
('numeric_transformer', numeric_transformer, train_X.select_dtypes(exclude=['object']).columns),
])
hgb_pipe = make_pipeline(Preprocessor, HistGradientBoostingClassifier())
search_space = {
'histgradientboostingclassifier__learning_rate': [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3],
'histgradientboostingclassifier__max_leaf_nodes': [None],
'histgradientboostingclassifier__min_samples_leaf': [10, 20, 50, 100],
'histgradientboostingclassifier__scoring': ['accuracy'],
'histgradientboostingclassifier__loss': ['categorical_crossentropy'],
'histgradientboostingclassifier__random_state': [53]
}
# + colab={"base_uri": "https://localhost:8080/"} id="DLgJ1P6_7TEU" outputId="d8fad7f0-39c8-4ef6-a420-3bcfeee098b9"
#instantiate the Random CV Search
hgb_grid = RandomizedSearchCV(hgb_pipe, search_space,
n_jobs=-1,
cv=5,
scoring='accuracy',
verbose=10,
refit=True)
hgb_grid.fit(train_X, train_Y)
| notebooks/Ensembles_sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementing binary decision trees
import pandas as pd
import numpy as np
# ## Load the lending club dataset
loans = pd.read_csv('lending-club-data.csv')
loans['safe_loans'] = loans['bad_loans'].apply(lambda x: +1 if x==0 else -1)
loans.drop('bad_loans', axis=1, inplace=True)
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home_ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
target = 'safe_loans'
loans = loans[features + [target]]
loans.head()
# +
safe_loans_raw = loans[loans[target] == 1]
risky_loans_raw = loans[loans[target] == -1]
# Since there are less risky loans than safe loans, find the ratio of the sizes
# and use that percentage to undersample the safe loans.
percentage = len(risky_loans_raw)/float(len(safe_loans_raw))
safe_loans = safe_loans_raw.sample(frac = percentage, random_state = 1)
risky_loans = risky_loans_raw
loans_data = risky_loans.append(safe_loans)
print("Percentage of safe loans :", len(safe_loans) / float(len(loans_data)))
print("Percentage of risky loans :", len(risky_loans) / float(len(loans_data)))
print("Total number of loans in our new dataset :", len(loans_data))
# -
loans_data = risky_loans.append(safe_loans)
def onehot_transform(X, names=None,prefix_sep='.'):
dummies_X = pd.get_dummies(X,prefix_sep=prefix_sep)
if names is None:
return dummies_X, dummies_X.columns.values
else:
return pd.DataFrame(dummies_X, columns=names).fillna(0)
loans_data,names = onehot_transform(loans_data,)
loans_data.head()
features = loans_data.columns.tolist()
features.remove('safe_loans') # Remove the response variable
features
print("Number of features (after binarizing categorical variables) = %s" % len(features))
print("Total number of grade.A loans : %s" % loans_data['grade.A'].sum())
print("Expexted answer : 6422")
train_idx = pd.read_json('module-5-assignment-2-train-idx.json',typ='series').values
test_idx = pd.read_json('module-5-assignment-2-test-idx.json',typ='series').values
train_data, test_data = loans.iloc[train_idx],loans.iloc[test_idx]
train_data = onehot_transform(train_data,names,)
test_data = onehot_transform(test_data,names,)
# ## Decision tree implementation
def intermediate_node_num_mistakes(labels_in_node):
# Corner case: If labels_in_node is empty, return 0
if len(labels_in_node) == 0:
return 0
safe_loans = sum(labels_in_node==1)
risky_loans = sum(labels_in_node==-1)
return min(safe_loans, risky_loans)
# +
# Test case 1
example_labels = np.array([-1, -1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print('Test passed!')
else:
print('Test 1 failed... try again!')
# Test case 2
example_labels = np.array([-1, -1, 1, 1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print('Test passed!')
else:
print('Test 2 failed... try again!')
# Test case 3
example_labels = np.array([-1, -1, -1, -1, -1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print('Test passed!')
else:
print('Test 3 failed... try again!')
# -
# ## Function to pick best feature to split on
def best_splitting_feature(data, features, target):
best_feature = None
best_error =10
num_data_points = float(len(data))
for feature in features:
left_split = data[data[feature]==0]
right_split = data[data[feature]==1]
left_mistakes = intermediate_node_num_mistakes(left_split[target])
right_mistakes = intermediate_node_num_mistakes(right_split[target])
error = (left_mistakes+right_mistakes)/num_data_points
if error < best_error:
best_error = error
best_feature = feature
return best_feature
if best_splitting_feature(train_data, features, 'safe_loans') == 'term. 36 months':
print('Test passed!')
else:
print('Test failed... try again!')
# ## Building the tree
def create_leaf(target_values):
leaf = {
'spliting_feature': None,
'left': None,
'right': None,
'is_leaf': True
}
num_ones = len(target_values[target_values == +1])
num_minus_ones = len(target_values[target_values == -1])
# For the leaf node, set the prediction to be the majority class.
# Store the predicted class (1 or -1) in leaf['prediction']
if num_ones > num_minus_ones:
leaf['prediction'] = 1 ## YOUR CODE HERE
else:
leaf['prediction'] = -1 ## YOUR CODE HERE
return leaf
def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print("--------------------------------------------------------------------")
print("Subtree, depth = %s (%s data points)." % (current_depth, len(target_values)))
# Stopping condition 1
# (Check if there are mistakes at current node.
# Recall you wrote a function intermediate_node_num_mistakes to compute this.)
if intermediate_node_num_mistakes(target_values) == 0: ## YOUR CODE HERE
print("Stopping condition 1 reached.")
# If not mistakes at current node, make current node a leaf node
return create_leaf(target_values)
# Stopping condition 2 (check if there are remaining features to consider splitting on)
if remaining_features == None: ## YOUR CODE HERE
print("Stopping condition 2 reached.")
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(target_values)
# Additional stopping condition (limit tree depth)
if current_depth >= max_depth: ## YOUR CODE HERE
print("Reached maximum depth. Stopping for now.")
# If the max tree depth has been reached, make current node a leaf node
return create_leaf(target_values)
# Find the best splitting feature (recall the function best_splitting_feature implemented above)
## YOUR CODE HERE
splitting_feature = best_splitting_feature(data,remaining_features,target)
# Split on the best feature that we found.
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1] ## YOUR CODE HERE
remaining_features.remove(splitting_feature)
print("Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split)))
# Create a leaf node if the split is "perfect"
if len(left_split) == len(data):
print("Creating leaf node.")
return create_leaf(left_split[target])
if len(right_split) == len(data):
print("Creating leaf node.")
## YOUR CODE HERE
return create_leaf(right_split[target])
# Repeat (recurse) on left and right subtrees
left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth)
## YOUR CODE HERE
right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
def count_nodes(tree):
if tree['is_leaf']:
return 1
return 1 + count_nodes(tree['left']) + count_nodes(tree['right'])
small_data_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 3)
if count_nodes(small_data_decision_tree) == 13:
print('Test passed!')
else:
print('Test failed... try again!')
print('Number of nodes found :', count_nodes(small_data_decision_tree))
print('Number of nodes that should be there : 13')
# ## Build the tree
# Make sure to cap the depth at 6 by using max_depth = 6
my_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6)
# ## Making predictions with a decision tree
def classify(tree, x, annotate = False):
# if the node is a leaf node.
if tree['is_leaf']:
if annotate:
print("At leaf, predicting %s" % tree['prediction'])
return tree['prediction']
else:
# split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print("Split on %s = %s" % (tree['splitting_feature'], split_feature_value))
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
### YOUR CODE HERE
return classify(tree['right'], x, annotate)
test_data.iloc[0]
print('Predicted class: %s ' % classify(my_decision_tree, test_data.iloc[0]))
classify(my_decision_tree, test_data.iloc[0], annotate=True)
def evaluate_classification_error(tree, data):
# Apply the classify(tree, x) to each row in your data
prediction = [classify(tree,data.iloc[i]) for i in range(0,data.shape[0])]
# Once you've made the predictions, calculate the classification error and return it
## YOUR CODE HERE
return float(sum(prediction!=data['safe_loans']))/len(data)
evaluate_classification_error(my_decision_tree, test_data)
| Machine_Learning_WashingTon/Classification/Week3 Decision Tress/Implementing binary decision trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="YhDFkT5Nb2-t"
# ____
# __Universidad Tecnológica Nacional, Buenos Aires__\
# __Ingeniería Industrial__\
# __Autor: <NAME>__ \
# __Cátedra de Investigación Operativa - Curso I4051 - <NAME> Noche__
# ____
# + [markdown] id="YvgxdE3ccChE"
# # **Zafari por distribuciones de probabilidad y primeras simulaciones**
# + [markdown] id="x2dBwzfNa0Af"
# ### **Import Libraries**
# + id="L7lh-NsbS8Qv"
# Importamos las librerias escenciales
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import uniform
import seaborn as sns
import scipy.stats as ss
# + id="oOMsaxHrdEug"
# + [markdown] id="5n_VwZnjcQ25"
# ### **Histograma de frecuencias**
# Generamos tres vectores cada uno con 500 posiciones. Cada posicion corresponde al sampleo/muestreo de una variable aleatoria que sigue una distribucion de probabilidad f(x)
# + id="X-8xQILAcP6d"
# distribucion 1
d1 = np.random.laplace(loc=15, scale=3, size=500)
# distribucion 2
d2 = np.random.laplace(loc=25, scale=5, size=500)
# distribucion 3
d3 = d1 + d2
# + colab={"base_uri": "https://localhost:8080/"} id="dTWEjuiIe3Av" outputId="4e2c3b55-bc3c-4a2c-a24b-c664ca7f39a5"
type(d1)
# + colab={"base_uri": "https://localhost:8080/"} id="EEJR5qs_fTU-" outputId="fc789056-9bc1-473f-b070-1657712d5dd7"
np.shape(d1)
# + colab={"base_uri": "https://localhost:8080/"} id="7FHVLOsTfLTT" outputId="bc2bfb48-1aed-4bfe-8206-65c2ff61349d"
# visualizamos los primeros 10 elementos (del 0 al 9) del vector d1
d1[:10]
# + colab={"base_uri": "https://localhost:8080/"} id="RIhY0xYFfADG" outputId="692fe31c-81c2-4032-fb79-19d97b44df7a"
# visualizamos los primeros 10 elementos (del 0 al 9) del vector d2
d2[:10]
# + colab={"base_uri": "https://localhost:8080/"} id="TH7U3CaSfAHY" outputId="50611271-a8bb-4e39-dcb4-9c0d532c321e"
# visualizamos los primeros 10 elementos (del 0 al 9) del vector d3
d3[:10]
# + [markdown] id="Zx8jb5tRf5Bh"
# Visualizar los vectores muestreados aleatoriamente de distribuciones de probabilidad (que a priori no conocemos) puede ser engorroso. Para eso usaremos el histograma de frecuencias y enteder que
#
# $$
# n_k = \sum \delta (x_{(kj)})
# $$
#
# siendo $k$ la cantidad de cajas (bins) y $\delta$ la funcion identidad con cada muestra.
#
# $$
# \delta (x_{(kj)}) = 1
# $$
#
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="04lmmek5o4-4" outputId="279be9d8-0c6f-4415-ca16-b6aff07c088e"
# realizamos un histograma con matplotlib
plt.figure(figsize= (12,5))
plt.hist(d1)
plt.title('Histograma dist. d1')
plt.ylabel('Frecuencia')
plt.xlabel('Variable Aleatoria X')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="pSNFL8C5pvlW" outputId="686a3e5d-fb85-452b-9676-81ff7e9d07c9"
# realizamos un histograma con seaborn
plt.figure(figsize= (12,5))
sns.histplot(d1)
plt.title('Histograma dist. d1')
plt.xlabel('Variable aleatoria X')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="HJfKyIxep0mB" outputId="e0ac970b-3f48-4765-bbd9-34d6ff7b6bff"
# realizamos un displot con seaborn
plt.figure(figsize= (12,5))
sns.distplot(d1)
plt.title('Histograma dist. d1')
plt.xlabel('Variable aleatoria X')
plt.show()
# + [markdown] id="YwZdmO0SyGon"
# Visualizamos la siguiente combinacion de distribuciones:
# - solo d1
# - solo d2
# - d1 y d2 en el mismo grafico
# - d1 + d2
# + colab={"base_uri": "https://localhost:8080/", "height": 366} id="GVspIWwBcQD5" outputId="7cd6150f-6ce6-44a6-d48a-2dbba53eb09d"
# configuramos la figura
sns.set_context("poster")
fig, ax = plt.subplots(1,4, sharex=True, sharey = True, figsize= (20,5))
# definimos en que posicion va cada histograma
sns.histplot(d1, ax = ax[0])
sns.histplot(d2, ax = ax[1])
sns.histplot(d1, ax = ax[2], color = 'r')
sns.histplot(d2, ax = ax[2])
sns.histplot(d1+d2, ax = ax[3])
# ponemos los titulos
ax[0].set_ylabel('Frecuencia')
ax[0].set_title('Hist. d1')
ax[1].set_title('Hist. d2')
ax[2].set_title('Hist. d1 & d2')
ax[3].set_title('Hist. d1 + d2')
plt.show()
# + [markdown] id="QSXEASsvyZSX"
# Tambien podemos visualizar en 2D una densidad que este caracterizada por dos variables aleatorias en simultaneo. En este caso obtenemos una densidad nueva
#
# $$D(d1, d2)$$
# + colab={"base_uri": "https://localhost:8080/", "height": 354} id="I8UlRzqDcQHH" outputId="a0207e8d-6584-4e46-b9bb-bcbc00ec1739"
# visualizamos con distplot la densidad en dos dimensiones
sns.displot(x=d1, y=d2, kind="kde", rug=True)
plt.title('Histograma en dos dimensiones')
plt.show()
# + id="Rdn4-IkscQKK"
# + id="5NJpFiuQXWuZ"
# cantidad de veces a tirar el dado
tiradas = 1000
# umbral para ganar
umbral = 7
# definir funcion de dos dados, que luego se suman
def dado(n):
total1 = 0
total2 = 0
for i in range(n):
total1 += random.randint(1, 6)
total2 += random.randint(1, 6)
total = total1 + total2
return total
# crear vector resultados para guardar la suma del dado 1 y dado 2 en cada tirada
resultados = np.zeros(tiradas)
for g in range(0,tiradas):
resultados[g] = np.round(dado(1))
# ir acumulando el dinero ganado en cada tirada
total = np.zeros(tiradas)
tot = 0
for t in range(0,np.shape(resultados)[0]):
if resultados[t]<umbral:
total[t] = tot - 1
tot = tot - 1
if resultados[t]>umbral:
total[t] = tot + 1
tot = tot + 1
# + colab={"base_uri": "https://localhost:8080/", "height": 523} id="loR_x0xVXeb1" outputId="5dad0ee9-13b8-4634-c4a2-457ec960bc3d"
fig, ax = plt.subplots(1,3, sharex=False, figsize= (28,8), sharey = False)
ax[0].hist(resultados,color = 'b')
ax[1].plot(range(0, np.shape(resultados)[0]),resultados, color = 'b', alpha = 0.8)
ax[2].plot(range(0,np.shape(resultados)[0]),total, color = 'b', alpha = 0.8)
ax[0].set_title('Histograma luego de ' + str(tiradas) +' iteraciones', size=15)
ax[1].set_title('Evolución de la V.A. luego de ' + str(tiradas) +' iteraciones', size=15)
ax[2].set_title('Evolución de la ganancia luego de ' + str(tiradas) +' iteraciones, se gana 1 si resultado > 7', size=15)
plt.show()
# + [markdown] id="fjh_c8sj48qI"
# ## **Distribucion uniforme**
#
# Funcion de densidad
# $$
# f(x) = \left\{\begin{matrix}
# \frac{1}{b-a} & \text{if} & a \leq x \leq b \\
# 0 & \text{else} &
# \end{matrix}\right.
# $$
#
# Funcion acumulada
# $$
# F(x) = \left\{\begin{matrix}
# 0 & \text{if} & x < a \\
# \frac{x-a}{b-a} & \text{if} & a \leq x \leq b \\
# 1 & \text{if} & x > b
# \end{matrix}\right.
# $$
# + id="ptn5GWy1Xeeh"
# random numbers from uniform distribution
n = 50000
start = 10
width = 20
n_bins = 50
data_uniform_0 = uniform.rvs(size=10, loc = start, scale=width)
data_uniform_1 = uniform.rvs(size=100, loc = start, scale=width)
data_uniform_2 = uniform.rvs(size=1000, loc = start, scale=width)
data_uniform_3 = uniform.rvs(size=10000, loc = start, scale=width)
# + colab={"base_uri": "https://localhost:8080/", "height": 772} id="Hn3kMrI7Xehm" outputId="c217263d-c408-4a5b-d96d-52edec6b7c30"
fig, ax = plt.subplots(2,4, sharex=True, figsize= (26,12), sharey = False)
sns.histplot(data_uniform_0, bins=100, kde=True, color='green',ax= ax[0,0] )
sns.histplot(data_uniform_1, bins=100, kde=True, color='green',ax= ax[0,1] )
sns.histplot(data_uniform_2, bins=100, kde=True, color='green',ax= ax[0,2] )
sns.histplot(data_uniform_3, bins=100, kde=True, color='green',ax= ax[0,3] )
ax[1,0].hist(data_uniform_0, n_bins, density=True, histtype='step', cumulative=True, label='Empirical')
ax[1,1].hist(data_uniform_1, n_bins, density=True, histtype='step', cumulative=True, label='Empirical')
ax[1,2].hist(data_uniform_2, n_bins, density=True, histtype='step', cumulative=True, label='Empirical')
ax[1,3].hist(data_uniform_3, n_bins, density=True, histtype='step', cumulative=True, label='Empirical')
ax[0,0].set_title('10 muestras')
ax[0,1].set_title('100 muestras')
ax[0,2].set_title('1000 muestras')
ax[0,3].set_title('10000 muestras')
ax[1,0].set_xlabel('Espacio muestral')
ax[1,1].set_xlabel('Espacio muestral')
ax[1,2].set_xlabel('Espacio muestral')
ax[1,3].set_xlabel('Espacio muestral')
plt.show()
# + [markdown] id="jApYLXYY8M5r"
# ## **Dist Gaussiana**
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="o61aXBP_TXCT" outputId="518aafba-<PASSWORD>-<PASSWORD>-ff64-<PASSWORD>"
####### Funcion de probabilidad de densidad gaussiana #######
def gaussiana(mu, sigma):
y = (1 / (np.sqrt(2 * np.pi * np.power(sigma, 2)))) * \
(np.power(np.e, -(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))))
return y
plt.style.use('seaborn') # pretty matplotlib plots
plt.rcParams['figure.figsize'] = (12, 8)
# ploteamos la funcion de densidad
x = np.linspace(-5, 15, 5000)
mu1 = 0
mu2 = 2
mu3 = 4
sigma1 = 1
sigma2 = 2
sigma3 = 3
y1 = gaussiana(mu1, sigma1)
y2 = gaussiana(mu2, sigma2)
y3 = gaussiana(mu3, sigma3)
plt.plot(x, y1);
plt.plot(x, y2);
plt.plot(x, y3);
plt.title('Jugando con parametros de gaussiana')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 483} id="kWTDhLUP_TdP" outputId="5799b944-f0e7-4c52-bd88-50dc8f3af286"
# ploteamos densidad y la acumulada
x = np.linspace(-5, 5, 5000)
mu = 0
sigma = 1
y_pdf = ss.norm.pdf(x, mu, sigma) # normal pdf
y_cdf = ss.norm.cdf(x, mu, sigma) # normal cdf
plt.plot(x, y_pdf, label='pdf')
plt.plot(x, y_cdf, label='cdf')
plt.legend();
# + id="rOIPj9FlASxT"
data_normal0 = np.random.normal(mu1, sigma1, size = 10)
data_normal1 = np.random.normal(mu1, sigma1, size = 100)
data_normal2 = np.random.normal(mu1, sigma1, size = 1000)
data_normal3 = np.random.normal(mu1, sigma1, size = 10000)
# + id="54F8BUASAS9c" colab={"base_uri": "https://localhost:8080/", "height": 723} outputId="9ea1614c-8661-45c8-a777-5dd6d4f3f247"
sns.set_context("poster")
fig, ax = plt.subplots(2,4, sharex=True, figsize= (26,12), sharey = False)
sns.histplot(data_normal0, bins=100, kde=True, color='green',ax= ax[0,0] )
sns.histplot(data_normal1, bins=100, kde=True, color='green',ax= ax[0,1] )
sns.histplot(data_normal2, bins=100, kde=True, color='green',ax= ax[0,2] )
sns.histplot(data_normal3, bins=100, kde=True, color='green',ax= ax[0,3] )
ax[1,0].hist(data_normal0, n_bins, density=True, histtype='step', cumulative=True, label='Empirical', alpha = 1, color = 'b')
ax[1,1].hist(data_normal1, n_bins, density=True, histtype='step', cumulative=True, label='Empirical', alpha = 1, color = 'b')
ax[1,2].hist(data_normal2, n_bins, density=True, histtype='step', cumulative=True, label='Empirical', alpha = 1, color = 'b')
ax[1,3].hist(data_normal3, n_bins, density=True, histtype='step', cumulative=True, label='Empirical', alpha = 1, color = 'b')
plt.grid(False)
plt.show()
# + id="6rocwbNxTxgo"
| 03_simulacion/casos_codigo/1_ioperativ_clase01_zafari_distribuciones.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimally Creating and Assigning Work Orders Based on Routes
#
# Suppose our organization needs to perform restaurant/brewery inspections in the Greater Portland, Maine area. Let's assume that there are around 25 breweries that need to be inspected and that there are 5 workers that are available to do the inspections. As the supervisor of these workers I'm going to develop a Python Script (well, Jupyter Notebook in this case) that will optimally create distinct routes for my workers, create assignments at the brewery locations, and then assign the assignment to the correct worker.
# ### Import ArcGIS API for Python
# Let's import some libraries and connect to our organization
# +
import pandas as pd
import arcgis
from arcgis.gis import GIS
from arcgis.apps import workforce
pd.options.display.max_columns = None
gis = GIS("https://arcgis.com", "workforce_scripts")
project = workforce.Project(gis.content.search("type:'Workforce Project' Maine Brewery Inspections")[0])
project.assignments_item.layers[0].delete_features(where="1=1")
# -
# ### View the breweries that need to be inspected
breweries = gis.content.search("type:'Feature Service' owner:workforce_scripts Maine Breweries")[0].layers[0]
breweries.filter = "location in ('Portland','South Portland','Gorham','Biddeford','Scarborough', 'Topsham','Freeport')"
webmap = gis.map("Portland, ME", zoomlevel=10)
webmap.add_layer(breweries)
webmap
breweries_df = breweries.query(where=breweries.filter, out_fields="objectid,name,location,url", as_df=True)
breweries_df
# ### Create a route for each worker
#
# Now that we know what we're working with, let's use the Plan Routes tool to generate the most optimal routes for each of the workers. First we need to define where the workers will start their routes. Each worker will begin from the main office located at 100 Commercial Street, Portland Maine. We'll use the geocoding module to get an exact location for this address.
from arcgis.geocoding import geocode
start_location = geocode("100 Commercial Street, Portland, ME", out_sr={"wkid": 102100})[0]["location"]
start_location["spatialReference"] = {"wkid": 102100}
start_location
# Next we need to create a feature at this location
feature = arcgis.features.Feature(
attributes={
"ObjectID": 1,
"Name": "Office"
},
geometry=start_location
)
# Next, we'll create a Feature Set from the feature. Then we'll create a Feature Collection from the Feature Set. Finally, we'll format the layer so that it conforms to the expected input format defined [here](https://doc.arcgis.com/en/arcgis-online/analyze/plan-routes.htm).
feature_set = arcgis.features.FeatureSet([feature])
feature_collection = arcgis.features.FeatureCollection.from_featureset(feature_set)
start_layer = {"layerDefinition": feature_collection.properties["layers"][0]["layerDefinition"], "featureSet": feature_set.value}
# Then we'll run the Plan Routes tool using the breweries layer as list of stops to route to. We'll set the number of routes equal to the number of workers. We'll also set the start time and start location as well as few other parameters.
from datetime import datetime
workers = project.workers.search()
results = arcgis.features.analysis.plan_routes(breweries, # Feature Layer of Stops
len(workers), # Number of routes to generate
5, # Maximum stops per route
datetime.now(), # Start time of route
start_layer, # The dictionary we created to represent the start location
stop_service_time=60, # How much time in minutes to spend at each stop
max_route_time=480, # The maximum time for the worker to complete the route
)
results
# Let's see what the routes look like
webmap = gis.map("Portland, ME", zoomlevel=10)
webmap.add_layer(results["routes_layer"])
webmap.add_layer(results["assigned_stops_layer"])
webmap
# Let's look at what data is in route
routes = results['routes_layer'].query().sdf
routes
# You can see that each route has a name, total time, and total distance among other things. Let's see what information is provided in an assigned stop.
stops = results['assigned_stops_layer'].query().sdf
stops
# You can see each row in the above table contains the attributes of each Brewery along with information about which route it is on. You'll also notice that there are several additional stops not related to a brewery. These are the starting and ending locations of each route.
# ### Create Assignment and Assign To Worker
#
# For each route that was generated we will select a random worker to complete that route. Then we'll find the breweries that were assigned to that route and create an Inspection Assignment for each one. Notice that when the assignment is created we are also assigning it to a worker.
#
# An important thing to note is that we are setting the due date of the assignment to the departure date of the stop. This means that a mobile worker will be able to sort their "To Do" list by due date and see the assignments in the correct order (according to the route).
# +
import random
assignments_to_add = []
for _, row in routes.iterrows():
worker = random.choice(workers)
workers.remove(worker)
route_stops = stops.loc[(stops['RouteName'] == row["RouteName"]) & stops['globalid'].notnull()]
for _, stop in route_stops.iterrows():
assignments_to_add.append(workforce.Assignment(
project,
assignment_type="Inspection",
location=stop["name"],
status="assigned",
worker=worker,
assigned_date=datetime.now(),
due_date=stop["DepartTime"],
geometry=stop["SHAPE"]
))
assignments = project.assignments.batch_add(assignments_to_add)
# -
# Let's check to verify the assignments were created and are assigned
webmap.add_layer(project.assignments_layer)
webmap
| notebooks/dev_summit_2020/Step 4 - Optimally Creating and Assigning Work Orders Based on Routes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NLP (NATURAL LANGUAGE PROCESSING)
# NLP is a branch of data science that consists of systematic processes for analyzing, understanding, and deriving information from the text data in a smart and efficient manner
# ## It has three steps -
#
# #### 1)Noise Removal
# Any piece of text which is not relevant to the context of the data and the end-output can be
# specified as the noise. Eg. language stopwords (commonly used words of a language – is, am, the, of, in etc),
# URLs or links, social media entities (mentions, hashtags), punctuations and industry specific words. Solution use
# stopwords and regular expressions.
#
# #### 2)Lexicon Normalization
# “play”, “player”, “played”, “plays” and “playing” are the different variations of the
# word – “play”, Though they mean different but contextually all are similar. The step converts all the disparities
# of a word into their normalized form (also known as lemma).
# Normalization is a pivotal step for feature engineering with text as it converts the high dimensional features
# (N different features) to the low dimensional space (1 feature), which is an ideal ask for any ML model.
# Eg- Stemming and Lemmatization
#
# #### 3)Object Standardization
# Text data often contains words or phrases which are not present in any standard lexical
# dictionaries. These pieces are not recognized by search engines and models.acronyms, hashtags with attached words, and
# colloquial slangs. With the help of regular expressions and manually prepared data dictionaries,this type of noise can be fixed.
import nltk
# Cleaning the texts
import re
#NTLK libraries
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
paragraph = """I have three visions for India. In 3000 years of our history, people from all over
the world have come and invaded us, captured our lands, conquered our minds.
From Alexander onwards, the Greeks, the Turks, the Moguls, the Portuguese, the British,
the French, the Dutch, all of them came and looted us, took over what was ours.
Yet we have not done this to any other nation. We have not conquered anyone.
We have not grabbed their land, their culture,
their history and tried to enforce our way of life on them.
Why? Because we respect the freedom of others.That is why my
first vision is that of freedom. I believe that India got its first vision of
this in 1857, when we started the War of Independence. It is this freedom that
we must protect and nurture and build on. If we are not free, no one will respect us.
My second vision for India’s development. For fifty years we have been a developing nation.
It is time we see ourselves as a developed nation. We are among the top 5 nations of the world
in terms of GDP. We have a 10 percent growth rate in most areas. Our poverty levels are falling.
Our achievements are being globally recognised today. Yet we lack the self-confidence to
see ourselves as a developed nation, self-reliant and self-assured. Isn’t this incorrect?
I have a third vision. India must stand up to the world. Because I believe that unless India
stands up to the world, no one will respect us. Only strength respects strength. We must be
strong not only as a military power but also as an economic power. Both must go hand-in-hand.
My good fortune was to have worked with three great minds. Dr. <NAME> of the Dept. of
space, Professor <NAME>, who succeeded him and Dr. <NAME>, father of nuclear material.
I was lucky to have worked with all three of them closely and consider this the great opportunity of my life.
I see four milestones in my career"""
# ## Tokenization
sentences = nltk.sent_tokenize(paragraph)
sentences
# Tokenizing words
words = nltk.word_tokenize(paragraph)
words
# ##### Part of speech tagging
# Every word in a sentence is also associated with a part of speech (pos) tag (nouns, verbs, adjectives, adverbs etc).
# The pos tags defines the usage and function of a word in the sentence.
from nltk import word_tokenize, pos_tag
text = "I am learning Natural Language Processing from an online coure"
tokens = word_tokenize(text)
pos_tag(tokens)
# #### N-Grams as Features
# A combination of N words together are called N-Grams. N grams (N > 1) are generally more informative as compared to
# words (Unigrams) as features. Also, bigrams (N = 2) are considered as the most important features of all the others.
#
def generate_ngrams(text, n):
words = text.split()
output = []
for i in range(len(words)-n+1):
output.append(words[i:i+n])
return output
generate_ngrams('this is a sample text', 3)
# ## Stemming
# +
sentencesS = nltk.sent_tokenize(paragraph)
stemmer = PorterStemmer()
# Stemming
for i in range(len(sentencesS)):
words = nltk.word_tokenize(sentencesS[i])
words = [stemmer.stem(word) for word in words if word not in set(stopwords.words('english'))]
sentencesS[i] = ' '.join(words)
sentencesS
# -
# ## Lemmatization
# +
sentencesL = nltk.sent_tokenize(paragraph)
lemmatizer = WordNetLemmatizer()
# Lemmatization
for i in range(len(sentencesL)):
words = nltk.word_tokenize(sentencesL[i])
words = [lemmatizer.lemmatize(word) for word in words if word not in set(stopwords.words('english'))]
sentencesL[i] = ' '.join(words)
sentencesL
# -
# ## Bag of Words
# A bag-of-words model, or BoW for short, is a way of extracting features from text for use in modeling, such as with
# machine learning algorithms.The approach is very simple and flexible, and can be used in a myriad of ways for extracting
# features from documents.
#
# A bag-of-words is a representation of text that describes the occurrence of words within a document. It involves two things:
#
# A vocabulary of known words.
# A measure of the presence of known words.
#
# It is called a “bag” of words, because any information about the order or structure of words in the document is discarded.
# The model is only concerned with whether known words occur in the document, not where in the document.
#
#
ps = PorterStemmer()
wordnet=WordNetLemmatizer()
sentences = nltk.sent_tokenize(paragraph)
corpus = []
for i in range(len(sentences)):
review = re.sub('[^a-zA-Z]', ' ', sentences[i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
corpus
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
X
X[0].size
# #### Drawbacks of using a Bag-of-Words (BoW) Model
#
# If the new sentences contain new words, then our vocabulary size would increase and thereby, the length of the vectors
# would increase too.
#
# Additionally, the vectors would also contain many 0s, thereby resulting in a sparse matrix
# (which is what we would like to avoid)
#
# We are retaining no information on the grammar of the sentences nor on the ordering of the words in the text.
#
# ## TF-IDF
# Term frequency–inverse document frequency, is a numerical statistic that is intended to reflect
# how important a word is to a document in a collection or corpus
# #### Term Frequency (TF)
# TF for a term “t” is defined as the count of a term “t” in a document “D”
#
# #### Inverse Document Frequency (IDF)
# IDF for a term is defined as logarithm of ratio of total documents available in the corpus and number of documents containing the term T.
#
# #### TF . IDF
# TF IDF formula gives the relative importance of a term in a corpus (list of documents).
from sklearn.feature_extraction.text import TfidfVectorizer
obj = TfidfVectorizer()
corpus = ['This is sample document.', 'another random document.', 'third sample document text']
X = obj.fit_transform(corpus)
print(X)
# The model creates a vocabulary dictionary and assigns an index to each word.
# Each row in the output contains a tuple (i,j) and a tf-idf value of word at index j in document i.
# +
ps = PorterStemmer()
wordnet=WordNetLemmatizer()
sentencesTFIDF = nltk.sent_tokenize(paragraph)
corpus = []
for i in range(len(sentencesTFIDF)):
review = re.sub('[^a-zA-Z]', ' ', sentencesTFIDF[i])
review = review.lower()
review = review.split()
review = [wordnet.lemmatize(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# Creating the TF-IDF model
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer()
X = cv.fit_transform(corpus).toarray()
X
# -
# #### Conclusion
#
# Bag of Words just creates a set of vectors containing the count of word occurrences in the document (reviews),
# while the TF-IDF model contains information on the more important words and the less important ones as well.
#
# Bag of Words vectors are easy to interpret. However, TF-IDF usually performs better in machine learning models.
# ## Word2Vec
import gensim
from gensim.models import Word2Vec
paragraph = """I have three visions for India. In 3000 years of our history, people from all over
the world have come and invaded us, captured our lands, conquered our minds.
From Alexander onwards, the Greeks, the Turks, the Moguls, the Portuguese, the British,
the French, the Dutch, all of them came and looted us, took over what was ours.
Yet we have not done this to any other nation. We have not conquered anyone.
We have not grabbed their land, their culture,
their history and tried to enforce our way of life on them.
Why? Because we respect the freedom of others.That is why my
first vision is that of freedom. I believe that India got its first vision of
this in 1857, when we started the War of Independence. It is this freedom that
we must protect and nurture and build on. If we are not free, no one will respect us.
My second vision for India’s development. For fifty years we have been a developing nation.
It is time we see ourselves as a developed nation. We are among the top 5 nations of the world
in terms of GDP. We have a 10 percent growth rate in most areas. Our poverty levels are falling.
Our achievements are being globally recognised today. Yet we lack the self-confidence to
see ourselves as a developed nation, self-reliant and self-assured. Isn’t this incorrect?
I have a third vision. India must stand up to the world. Because I believe that unless India
stands up to the world, no one will respect us. Only strength respects strength. We must be
strong not only as a military power but also as an economic power. Both must go hand-in-hand.
My good fortune was to have worked with three great minds. Dr. <NAME> of the Dept. of
space, Professor <NAME>, who succeeded him and Dr. <NAME>, father of nuclear material.
I was lucky to have worked with all three of them closely and consider this the great opportunity of my life.
I see four milestones in my career"""
# Preprocessing the data
text = re.sub(r'\[[0-9]*\]',' ',paragraph)
text = re.sub(r'\s+',' ',text)
text = text.lower()
text = re.sub(r'\d',' ',text) # removes one or more digits
text = re.sub(r'\s+',' ',text) # replace consecutive whitespace with one space
text
# Preparing the dataset
sentences = nltk.sent_tokenize(text)
sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
for i in range(len(sentences)):
sentences[i] = [word for word in sentences[i] if word not in stopwords.words('english')]
# Training the Word2Vec model
model = Word2Vec(sentences, min_count=1)
words = list(model.wv.index_to_key) ## finding vocabs
# Finding Word Vectors
vector = model.wv['war']
vector
# Most similar words
similar = model.wv.most_similar('war')
similar
| session-3/3)NLP Fundamentals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # Tilte
# Text blah blah
# $y = \sin(x)$
x=1
y=2
x*y
using PyPlot
x = collect(linspace(0, 10, 100))
y = sin.(x)
z = cos.(x)
plot(x, y, "--r", x, z, ":g")
xlabel("x axis")
ylabel("y axis")
title("sin and cos plots")
grid()
text(6.0, 1.0, "annotation")
savefig("sincos.eps")
# +
struct Relperm
krw0
kro0
swc
sor
nw
no
end
relperm = Relperm(0.9, 0.3, 0.15, 0.2, 2.0, 2.0)
# -
function visualize_relperm(k::Relperm)
sw = linspace(k.swc, 1-k.sor, 100)
krw = k.krw0*((sw-k.swc)/(1-k.swc-k.sor)).^k.nw
plot(sw, krw)
xlabel("Sw")
ylabel("krw")
end
visualize_relperm(relperm)
"""
function f(;x=1, y=3)
"""
function f(;x=1, y=3)
return sin(x)*cos(y)
end
?f
import JLD
t = collect(1:0.1:10)
M = zeros(length(t), 15)
for i in 1:15
v = sin.(t.^(i/10))
M[:,i]=v[:]
end
s = "this is only a test"
JLD.save("test_jld.jld", "t", t, "M", M, "note", s)
res = JLD.load("test_jld.jld")
plot(res["t"], res["M"])
res["note"]
| analytical/notebooks/julia_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# I began tackling this project by focusing on the first requirement as outlined by Dr <NAME>;
#
# #### Describe (20%)
#
# #### <font color=red>Describe (20%) : </font>
#
# *Good summary of the dataset, repository well laid-out and organised. *
#
# *Reasonable commits to the repository.*
#
# *Create a git repository and make it available online for the lecturer to clone.*
#
# *The repository should contain all your work for this assessment. *
#
# *Within the repository, create a jupyter notebook that uses descriptive statistics and plots to describe the Boston House Prices dataset. *
#
# *This part is worth 20% of your overall mark.*
#
# My first goal was to research online for a good summary of the dataset, i.e. figure out how I can upload the dataset to my Jupyter Notebook, and then summarise it.
#
# I reviewed the Boston Standard Metropolitan Statistical Area - Boston house prices dataset on https://www.kaggle.com/c/boston-housing.
# According to the description on www.kaggle.com; *this dataset details the Housing Values in Suburbs of Boston.
# Data description; The Boston data frame has 506 rows and 14 columns.*
#
#
# We are provided with a ‘key’ which describes what the information in each column represents;
#
# **crim**
# per capita crime rate by town.
#
# **zn**
# proportion of residential land zoned for lots over 25,000 sq.ft.
#
# **indus**
# proportion of non-retail business acres per town.
#
# **chas**
# Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).
#
# **nox**
# nitrogen oxides concentration (parts per 10 million).
#
# **rm**
# average number of rooms per dwelling.
#
# **age**
# proportion of owner-occupied units built prior to 1940.
#
# **dis**
# weighted mean of distances to five Boston employment centres.
#
# **rad**
# index of accessibility to radial highways.
#
# **tax**
# full-value property-tax rate per $10,000.
#
# **ptratio**
# pupil-teacher ratio by town.
#
# **black**
# 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town.
#
# **lstat**
# lower status of the population (percent).
#
# **medv**
# median value of owner-occupied homes in $1000s.
#
# Source;
#
# * <NAME>. and <NAME>. (1978) Hedonic prices and the demand for clean air. J. Environ. Economics and Management 5, 81–102.
#
# * <NAME>., <NAME>. and <NAME>. (1980) Regression Diagnostics. Identifying Influential Data and Sources of Collinearity. New York: Wiley.
#
#
# I was immediately struck by one piece of information mentioned;
#
# **black**
#
# 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town.
#
# This seemed very strange to me, and slightly offensive. I wondered if this would be explained a bit more when I became more familiar with the dataset.
# Over the course of this module a fellow student raised this same concern on our Outlook group which is used for news, announcements and discussion. Dr. Ian addressed this on our Outlook group, so I felt less confused after this.
#
# When I clicked on ‘Data’ on www.kaggle.com I expected to be presented with the dataset, but nothing appeared. I revisited Moodle to see if Ian had mentioned that we need to create an account on on www.kaggle.com to access the dataset.
# I noticed one comment from Ian in our Outlook group which is used for news, announcements and discussion. Ian’s comment was made in reply to a question posted by one of my fellow students;
# *You'll find the common datasets in many different configurations - part of the assignment is to source the dataset in a form conducive to what you need to do with it.*
# This is important knowledge for me while completing this assignment. It also explains why I couldn’t immediately view the dataset in https://www.kaggle.com/c/boston-housing.
#
# I discovered a website 'packtpub' which seemed to explain how to load the dataset in Jupyter Notebook;
# https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset.
# This website describes the dataset as follows;
# *The dataset we'll look at in this section is the so-called Boston housing dataset. It contains US census data concerning houses in various areas around the city of Boston. Each sample corresponds to a unique area and has about a dozen measures. We should think of samples as rows and measures as columns. The data was first published in 1978 and is quite small, containing only about 500 samples.*
#
# I read through the information supplied on this website which provides examples of code to load the dataset into a Jupyter Notebook, along with code to Explore the Boston housing dataset and an introduction to Predictive Analytics with Jupyter Notebooks.
# I thought that I could slightly adapt the code provided by this website for my purposes, in order to meet the requirements laid out by Dr. Ian for the second portion of the assessment;
# * (20%) Infer: To the jupyter notebook, add a section where you use inferential statistics to analyse whether there is a significant difference in median house prices between houses that are along the Charles river and those that aren’t. You should explain and discuss your findings within the notebook. This part is also worth 20%.
#
# So, I decided to follow the steps outlined on the https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset website.
#
# The website describes how to complete the following; “Load the data into Jupyter using a Pandas DataFrame”.
#
# *Oftentimes, data is stored in tables, which means it can be saved as a comma-separated variable (CSV) file. This format, and many others, can be read into Python as a DataFrame object, using the Pandas library. Other common formats include tab-separated variable (TSV), SQL tables, and JSON data structures. Indeed, Pandas has support for all of these. In this example, however, we are not going to load the data this way because the dataset is available directly through scikit-learn.*
# The https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset website also mentions; *An important part after loading data for analysis is ensuring that it's clean. For example, we would generally need to deal with missing data and ensure that all columns have the correct datatypes. The dataset we use in this section has already been cleaned, so we will not need to worry about this.*
# (Source: https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset).
# I followed the website’s recommended steps and the code provided as follows;
#
from sklearn import datasets
boston = datasets.load_boston()
type (boston)
# +
from sklearn.utils import Bunch
# Bunch?
# -
['DESCR', 'target', 'data', 'feature_names']
boston['DESCR']
# As we can see from the error above 'NameError: name 'boston' is not defined' I ran into some issues while running the code supplied on https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset. I played around with the code and managed to get the desired result;
boston = load_boston()
boston['DESCR']
['DESCR', 'target', 'data', 'feature_names']
boston = load_boston()
boston['DESCR']
# I managed to load the dataset description into my Jupyter notebook as we can see in the above cell. This resembles the outcome of Step 5 as described on https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781789958171/1/ch01lvl1sec04/our-first-analysis-the-boston-housing-dataset; *Run the cell to print the dataset description contained in boston['DESCR'] .*
#
# I continued on to Step 6 as outlined on the website;
# +
import pandas as pd
# pd.DataFrame?
# -
# The website informs me that the *The docstring reveals the DataFrame input parameters. We want to feed in boston['data'] for the data and use boston['feature_names'] for the headers,* (https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781789958171/1/ch01lvl1sec04/our-first-analysis-the-boston-housing-dataset).
#
# Progressing on to Step 7;
#What does the data look like?
boston[ 'data' ]
# I tried to copy the code supplied on the website, and initially made a mistake as I encountered the above error. I realised I had inserted 2 spaces before & after the ‘data’ which I should not have done – I amended this and tried again;
#What does the data look like?
boston ['data']
# This did not resolve the issue, as I still encounter an error message. I tried playing around with the code some more as shown below to see if I could get it working;
#What does the data look like?
boston ['data']
#What does the data look like?
boston = load_boston()
boston['data']
boston = load_boston()
boston['data']
# I kept encountering the same error again ‘NameError: name 'boston' is not defined’.
#
# I established that there must be something wrong with a previous step in my Jupyter Notebook, and I suspect it is that I have done something wrong at step 4.
#
# I searched online for guidance and found some information pertaining to this error message;
#
# **NameError: global name '---' is not defined**
#
# *Python knows the purposes of certain names (such as names of built-in functions like print). Other names are defined within the program (such as variables). If Python encounters a name that it doesn't recognize, you'll probably get this error.
# Some common causes of this error include:*
# * Forgetting to give a variable a value before using it in another statement
# * Misspelling the name of a built-in function (e.g., typing "inpit" instead of "input")
#
# (Source: https://www2.cs.arizona.edu/people/mccann/errors-python#Four)
#
# I played around with my Jupyter Notebook, changing the code in the cells and eventually realised that if I ran all steps again from 1 to 7 it worked correctly for me;
#
from sklearn import datasets
boston = datasets.load_boston()
type (boston)
# +
from sklearn.utils import Bunch
# Bunch?
# -
['DESCR', 'target', 'data', 'feature_names']
boston['data']
boston['data'].shape
# As per the https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset website;
#
# *Looking at the output, we see that our data is in a 2D NumPy array. Running the command boston['data'].shape returns the length (number of samples) and the number of features as the first and second outputs, respectively.*
#
# At this point I compared the results in Out[16] above with the Boston Data Set according to https://www.kaggle.com/c/boston-housing and noticed 2 discrepancies;
#
# 1. medv – This is included in the Boston Standard Metropolitan Statistical Area - Boston house prices dataset as per https://www.kaggle.com/c/boston-housing
# According to this description, this dataset details the Housing Values in Suburbs of Boston. *Data description; The Boston data frame has 506 rows and 14 columns.
# We are provided with a ‘key’ which describes what the information in each column represents, and **medv** (median value of owner-occupied homes in $1000s)is one of the 14 keys.*
#
# However, **medv** is not included in the output in my Jupyter Notebook or on the https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781789958171/1/ch01lvl1sec04/our-first-analysis-the-boston-housing-dataset website. According to the results in Jupyter Notebook after running the code from the website, there are only 13 keys.
#
boston['feature_names']
# 2. **black**
# This is included in the Boston Standard Metropolitan Statistical Area - Boston house prices dataset as per https://www.kaggle.com/c/boston-housing.
#
# According to this description, this dataset details the Housing Values in Suburbs of Boston. *Data description; The Boston data frame has 506 rows and 14 columns.
# We are provided with a ‘key’ which describes what the information in each column represents, and **black** (1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town) is one of the 14 keys.*
#
# However, this is represented by ‘B’ in my Jupyter Notebook as shown in the above output cell, and on the https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781789958171/1/ch01lvl1sec04/our-first-analysis-the-boston-housing-dataset website.
#
# I continued on to Step 8 as per the https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781789958171/1/ch01lvl1sec04/our-first-analysis-the-boston-housing-dataset website;
#
# 8. Load the data into a Pandas DataFrame df by running the following:
# df = pd.DataFrame(data=boston['data'],
# columns=boston['feature_names'])
#
df = pd.DataFrame(data=boston['data'],
columns=boston['feature_names'])
# As we can see from the above I encountered a similar situation to the one I encountered previously; NameError: name 'pd' is not defined.
# I decided to address this in the same manner as I had previously and run steps 1 to 8 again;
from sklearn import datasets
boston = datasets.load_boston()
type (boston)
# +
from sklearn.utils import Bunch
# Bunch?
# -
['DESCR', 'target', 'data', 'feature_names']
boston['data']
boston['data'].shape
boston['feature_names']
df = pd.DataFrame(data=boston['data'],
columns=boston['feature_names'])
df = pd.DataFrame(data=boston['data'],
columns=boston['feature_names'])
# Still the error message appeared.
# According to the website 'In machine learning, the variable that is being modeled is called the target variable; it's what you are trying to predict given the features. For this dataset, the suggested target is MEDV, the median house value in 1,000s of dollars.'
# I decided to try step 9 of the website to see if this helps resolve the issue.
# The instructions for step 9 were as follows;
# Run the next cell to see the shape of the target:
# Still need to add the target variable
boston['target'].shape
# The result is the same for me as it is described on the website. As per the website 'We see that it has the same length as the features, which is what we expect. It can therefore be added as a new column to the DataFrame.'
#
# Step 10; Add the target variable to df by running the cell with the following:
# df['MEDV'] = boston['target']
df['MEDV'] = boston['target']
# Step 11 according to the website; Move the target variable to the front of df by running the cell with the following code:
y = df['MEDV'].copy()
del df['MEDV']
df = pd.concat((y, df), axis=1)
# This is done to distinguish the target from our features by storing it to the front of our DataFrame.
#
# Here, we introduce a dummy variable y to hold a copy of the target column before removing it from the DataFrame. We then use the Pandas concatenation function to combine it with the remaining DataFrame along the 1st axis (as opposed to the 0th axis, which combines rows).
#
# Step 12;
# Implement df.head() or df.tail() to glimpse the data and len(df) to verify that number of samples is what we expect. Run the next few cells to see the head, tail, and length of df:
df.head()
# This is not the correct result according to the website. I think this may be because I've forgotten to include & run one step;
# boston = load_boston()
# boston['DESCR'].
# I will start from Step 1 again;
from sklearn import datasets
boston = datasets.load_boston()
type (boston)
# +
from sklearn.utils import Bunch
# Bunch?
# -
['DESCR', 'target', 'data', 'feature_names']
boston = load_boston()
boston['DESCR']
boston = load_boston()
boston['DESCR']
boston = load_boston()
boston['DESCR']
# Stuck here.
# I tried the below and it worked;
boston['DESCR']
# +
import pandas as pd
# pd.DataFrame?
# -
#What does the data look like?
boston[ 'data' ]
boston['data'].shape
boston['feature_names']
df = pd.DataFrame(data=boston['data'],
columns=boston['feature_names'])
# Still need to add the target variable
boston['target'].shape
df['MEDV'] = boston['target']
y = df['MEDV'].copy()
del df['MEDV']
df = pd.concat((y, df), axis=1)
df.head()
df.tail()
# Finally, I got the desired result for steps 12 & 13 as per to the website (as seen above). I implemented df.head() and df.tail() *to glimpse the data and len(df) to verify that number of samples is what we expect. I ran these cells to see the head, tail, and length of df.* As per the website; *each row is labeled with an index value, as seen on the left side of the table. By default, these are a set of integers starting at 0 and incrementing by one for each row.*
#
# I notice that for all 10 records highlighted above the **CHAS** category shows 0. As per https://www.kaggle.com/c/boston-housing; **chas** - Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).
#
# This means for the 10 records above the tract does not bound the river for these cases.
#
# I assessed the **medv** (median value of owner-occupied homes in $1000s) for these 10 examples where the river is not bound, and noticed that the lowest figure shown is 11.9 and highest is 36.2.
#
# The general range is between 11.9 and 36.2.
#
# Based on this I calculated that the general median value of owner-occupied homes range for these 10 records which do not bound the river is between 11,900 dollars & 36,200 dollars.
# Beginning again due to an error : NameError: name 'df' is not defined. I think this occurrs each time I step away from my notebook and then attempt to run code again when I return to it.
# ##### Step 1
# (Taken from https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781789958171/1/ch01lvl1sec04/our-first-analysis-the-boston-housing-dataset);
from sklearn import datasets
boston = datasets.load_boston()
type (boston)
# +
from sklearn.utils import Bunch
# Bunch?
# -
['DESCR', 'target', 'data', 'feature_names']
boston['DESCR']
# +
import pandas as pd
# pd.DataFrame?
# -
#What does the data look like?
boston[ 'data' ]
boston['data'].shape
boston['feature_names']
df = pd.DataFrame(data=boston['data'],
columns=boston['feature_names'])
# Still need to add the target variable
boston['target'].shape
df['MEDV'] = boston['target']
y = df['MEDV'].copy()
del df['MEDV']
df = pd.concat((y, df), axis=1)
df.head()
df.tail()
len(df)
df.dtypes
# According to https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset *printing df.dtypes will show the datatype contained within each column.
# For this dataset, we see that every field is a float and therefore most likely a continuous variable, including the target. This means that predicting the target variable is a regression problem.*
# As per the website *The next thing we need to do is clean the data by dealing with any missing data, which Pandas automatically sets as NaN values.
# These can be identified by running df.isnull() , which returns a Boolean DataFrame of the same shape as df.
# To get the number of NaN's per column, we can do df.isnull().sum() .
# Run the next cell to calculate the number of NaNvalues in each column:*
df.isnull()
# Identify and NaNs
df.isnull().sum()
# The website tells us that *For this dataset, we see there are no NaN's, which means we have no immediate work to do in cleaning the data and can move on.*
df.describe().T
# The website instructs me to run the above code as *This computes various properties including the mean, standard deviation, minimum, and maximum for each column. This table gives a high-level idea of how everything is distributed. Note that we have taken the transform of the result by adding a .T to the output; this swaps the rows and columns. Going forward with the analysis, we will specify a set of columns to focus on.*
# Source: https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset
#
# Next step: *Run the cell where these "focus columns" are defined:*
cols = ['RM', 'AGE', 'TAX', 'LSTAT', 'MEDV']
df[cols].head()
# I altered the code provided on the website slightly for my own purposes. The code as per the website is;
#
# cols = ['RM', 'AGE', 'TAX', 'LSTAT', 'MEDV']
#
# I altered this to cols = ['CHAS', 'AGE', 'TAX', 'LSTAT', 'MEDV']
#
# The reason behind this is that I'd like to focus on the Charles River and Pricing aspects to prepare to address the second requirement for this assignment as set by Dr. <NAME>.
#
#
# #### Infer (20%)
#
# #### <font color=red>Infer (20%):</font>
#
# *Add a section where you use inferential statistics to analyse whether there is a significant difference in median house prices between houses that are along the Charles river and those that aren’t.
# You should explain and discuss your findings within the notebook.
# This part is also worth 20%.*
#
# So, I replaced **'RM'** with **'CHAS'** (Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).), and kept all financial related topics in the code, i.e. **'MEDEV'** (median value of owner-occupied homes in USD 1,000s), and **'TAX'**(full-value property-tax rate per USD 10,000.).
cols = ['CHAS', 'AGE', 'TAX', 'LSTAT', 'MEDV']
# *This subset of columns can be selected from df using square brackets. Display this subset of the DataFrame by running df[cols].head() :*
df[cols].head()
# As per the website; *To look for patterns in this data, we can start by calculating the pairwise correlations using pd.DataFrame.corr. Calculate the pairwise correlations for our selected columns by running the cell containing the following code:*
df[cols].corr()
# As per the website;*This resulting table shows the correlation score between each set of values. Large positive scores indicate a strong positive (that is, in the same direction) correlation. As expected, we see maximum values of 1 on the diagonal.*
# I follow the guidance and code provided on the website to create a 'heatmap' which will have the **CHAS** and **medev** factors included so I can begin to easily compare the correlation between median house prices between houses that are along the Charles river and those that aren’t;
# *Instead of straining our eyes to look at the preceding table, it's nicer to visualize it with a heatmap. This can be done easily with Seaborn.
# Run the next cell to initialize the plotting environment, as discussed earlier in the chapter. Then, to create the heatmap, run the cell containing the following code:*
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
ax = sns.heatmap(df[cols].corr(),
cmap=sns.cubehelix_palette(20, light=0.95,
dark=0.15))
ax.xaxis.tick_top() # move labels to the top
plt.savefig('../figures/lesson-1-boston-housing-corr.png', bbox_inches='tight', dpi=300)
# *We call sns.heatmap and pass the pairwise correlation matrix as input. We use a custom color palette here to override the Seaborn default. The function returns a matplotlib.axes object which is referenced by the variable ax. The final figure is then saved as a high resolution PNG to the figures folder.*
#
# *For the final step in our dataset exploration exercise, we'll visualize our data using Seaborn's pairplot function.*
#
# Source: https://subscription.packtpub.com/book/programming/9781789804744/1/ch01lvl1sec11/our-first-analysis-the-boston-housing-dataset
#
sns.pairplot(df[cols],
plot_kws={'alpha': 0.6},
diag_kws={'bins': 30})
# *Having previously used a heatmap to visualize a simple overview of the correlations, this plot allows us to see the relationships in far more detail.*
# I notice that for all results which pertain to **CHAS** they are very focused on 0 or 1, there are none at 0.5.
# I presume the reason for this is because as per https://www.kaggle.com/c/boston-housing **chas** - *Charles River dummy variable (= **1** if tract bounds river; **0** otherwise).*
#
# I notice that for the result on the bottom left corner of the above, **MEDEV**, **CHAS**;
# * **CHAS** 0.0 (houses which do not run alongside the river) ranges from **MEDEV** (median value of owner-occupied homes in USD 1000s) 0 - 50,
# and
# * **CHAS** 1.0 (houses which are on the river) start at **MEVED** 15, and ranges form around 15 to 50(median value of owner-occupied homes in USD 1000s).
#
# I.e. Houses which do not border the river range from less than USD 15,000 to USD 50,000, and houses which do border the river are valued between a minimum of USD 15,000 and USD 50,000.
# Houses which border the river are valued higher than those which do not border the river.
#
# To me, this indicates that houses on the Charles River (1) have a higher value as the starting price is 1.5 to 50, whereas starting prices for houses not on the river start as low as 0 on the chart.
#
# For the result in the top right corner, we see houses with 1 (located on Charles River) start at a **MEDEV** of approx 15, whereas houses not located on the river (0) start around 5.
# #### Predict (60%)
#
# #### <font color=red>Predict (60%) :</font>
# *Use keras to create a neural network that can predict the median house price based on the other variables in the dataset.*
#
# **Etain's notes on Keras based on review of Dr. <NAME>'s lecture videos on this topic 2019;**
#
# Etain’s notes from video ‘Individual neurons in keras’ (2);
#
# Keras is a package for Python which can be used to make predictions.
#
# *Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano.*
#
# *It was developed with a focus on enabling fast experimentation. Being able to go from idea to result with the least possible delay is key to doing good research.* (https://keras.io/)
#
# Based on Ian’s video ‘Individual neurons in keras‘, it sounds like I will be able to use keras in this assignment to complete the below requirements;
#
# Within the repository, create a jupyter notebook that uses descriptive statistics and plots to describe the Boston House Prices dataset. Again using the same notebook, use keras to create a neural network that can predict the median house price based on the other variables in the dataset.
#
# Neurons are mentioned.
#
# Ian demonstrates how to insert content to keras in order to have the exact same content expelled/output by keras. (1.45min)
#
# Ian mentioned that the following 3 commands are the main commands I will use to create my neuron network;
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, input_dim=1, activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# -
# (Source: https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/keras-neurons.ipynb)
#
# Input – dataset.
# Output – you sometimes have an idea what this will look like already.
# E.g. the dataset could have several values, but you want the output to have only 2 potential outcomes such as ‘yes’ and ‘no’.
# * Sequential – explained at 39mins
# * Dense – explained at 41mins
#
# *TensorFlow is an end-to-end open source platform for machine learning.
# It has a comprehensive, flexible ecosystem of tools, libraries and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML powered applications.*
# (https://www.tensorflow.org/)
#
# I reviewed Ian's instructions on how to install the Keras package. I tried following his instructions but for some reason I continuously ran into issues on my system.
# I also attempted to follow the instruction on the Keras website (https://keras.io/) but I found it too confusing.
# I researched online and came accros a YouTube video which described how to complete the install of Keras & Tensorflow via the Anaconda Navigator https://www.youtube.com/watch?v=V9cDjjRXS08.
# However, this did not work for me as neither Tensorflow or Keras were available under ' Not Installed' when I searched for them.
# I decided to review the comments in our Outlook group which is used for news, announcements and discussion to see if any other students encountered issues.
# One student advised that they followed the instructions from YouTube video https://www.youtube.com/watch?v=59duINoc8GM. I decided to attempt the same. I noticed while watching this video that I had been running Ian's instruction in the incorrect command prompt area - I should have been opening the Anaconda Prompt (ForAnaconda) and then attempting "-conda install -c conda-forge keras tensorflow". I tried this. Something seemed to be happening so I let the system run for a few minutes.
# I was hopeful that Keras and Tensorflow had been successfully installed. I can now see 'Keras' and 'Tensorflow' in my Anaconda Navigator under 'Installed'.
import keras as kr
# It seems from the above as tough I have installed everything correctly, so I moved on to Ian's next video 'Individual neurons in keras'.
import keras as kr
import numpy as np
import mathplotlib.pyplot as plt
import keras as kr
import numpy as np
import mathplotlib.pyplot as plt
# I copy Ian's code;
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, input_dim=1, activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.0, 10.0, 1)
# Run each x value through the neural network.
y = m.predict(x)
# -
x
y
# Plot the values.
plt.plot(x, y, 'k.')
# Plot the values.
plt.plot(x, y, 'k.')
# I encounter the above error so I decide to run Ian's code again from the beginning to see if this helps, although I am concerned that this error is appearing due to the ModuleNotFoundError: No module named 'mathplotlib' error.
import keras as kr
import numpy as np
import mathplotlib.pyplot as plt
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, input_dim=1, activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.0, 10.0, 1)
# Run each x value through the neural network.
y = m.predict(x)
# -
x
y
# Plot the values.
plt.plot(x, y, 'k.')
# Plot the values.
plt.plot(x, y, 'k.')
# Unfortunately, it appears as though I cm having issues generating the plot as I encounter the above error. I decide to Google 'NameError: name 'plt' is not defined'.
# I found a page which seems relevant; https://github.com/konstantint/matplotlib-venn/issues/33
# The notes on this page suggest I try 'from matplotlib import pyplot as plt'.
from matplotlib import pyplot as plt
import keras as kr
import numpy as np
import mathplotlib.pyplot as plt
# Still I encounter the same error.
#
# I try not to get bogged down at this point and remind myself again what outcome I'm looking for using Keras;
# *Use keras to create a neural network that can predict the median house price based on the other variables in the dataset.*
#
# I consider using the 3 main commands with some figures from the Boston House Prices dataset as input instead of the figures in Dr. Ian's example. I decide to give this a go substituting some of Ian's figures in his example with the **MEDEV** figures from the dataset - x = np.arange(0.175260, 0.376955, 1.000000);
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, input_dim=1, activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.175260, 0.376955, 1.000000)
# Run each x value through the neural network.
y = m.predict(x)
# -
x
y
# I'd like to create a network with more than one Neuron so I copy Ian's code again but instead of 1 I insert 10 figures;
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, 2, 3, 4, 5, 6, 7, 8, 9, 10 input_dim=1, activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(10 input_dim=1, activation="linear", kernel_initializer='ones', bias_initializer='zeros'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# -
# I try to substitute 1 for 0 figures to create a network with more than one Neuron, but I encounter SyntaxError: invalid syntax.
#
# I review a resporce posted by Ian on Moodle - 'Playing with neurons' Jupyter Notebook (https://nbviewer.jupyter.org/github/ianmcloughlin/jupyter-teaching-notebooks/blob/master/playing-with-neurons.ipynb). I notice here Ian has included the import matplotlib.pyplot as plt as the first line of code, so I decide to attempt this too;
import matplotlib.pyplot as plt
import numpy as np
import keras as kr
plt.rcParams['figure.figsize'] = (10, 10)
# +
x = np.linspace(-10.0, 10.0, 2000)
y_l = (3.0 * x) + 5.0
y_p = (x * x) - 41.0
y_s = 10.0 * np.sin(2.0 * x)
# +
plt.plot(x, y_l, label='$3x + 5$')
plt.plot(x, y_p, label='$x^2 - 41$')
plt.plot(x, y_s, label='$10 \sin(2x)$')
plt.legend()
# -
# I followed the above code as supplied by Ian in https://nbviewer.jupyter.org/github/ianmcloughlin/jupyter-teaching-notebooks/blob/master/playing-with-neurons.ipynb and get the same result on the graph as in Ian's Notebook. I decide to substitute some of the figures with the **CHAS** and **MEDEV** figures from the Boston House Prices dataset.
import matplotlib.pyplot as plt
import numpy as np
import keras as kr
plt.rcParams['figure.figsize'] = (10, 10)
# +
x = np.linspace(0, 1, 2000)
y_l = (24.0 * x) + 5.0
y_p = (x * x) - 0.376955
y_s = 10.0 * np.sin(2.0 * x)
# +
plt.plot(x, y_l, label='$24x + 5$')
plt.plot(x, y_p, label='$x^2 - 0.376955$')
plt.plot(x, y_s, label='$10 \sin(2x)$')
plt.legend()
# -
# Etain's explination of where I came up with the figures;
#
# x = np.linspace(0, 1, 2000)
#
# y_l = (3.0 * x) + 5.0
# y_p = (x * x) - 41.0
# y_s = 10.0 * np.sin(2.0 * x)
# __________________________________
#
# 0 = not on river
# 1 = house on river
# 2000 = left as is in Ian's code
# _________________________
# y_l = (24.0 * x) + 5.0
#
# 24 from MEDEV, everything else left as is
#
# _________________
# y_p = (x * x) - 0.376955
#
# - 0.376955 taken from MEDEV
#
# ___________
#
# y_s = 10.0 * np.sin(2.0 * x)
#
# Left as it
# I think I should attempt a plot with keyword strings as shown on the Mathplotlib Pyplot Tutorial page to display the results in their best form https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html.
# I know that my code thusfar has not 'trained' my neural network, so this is something I need to work on in order to predict the median house price based on the other variables in the dataset.
# I wonder if I can use the 'Two neurons' approach demonstrated by Ian. I would consider using the two factors **chas** (Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)), and **tax** (full-value property-tax rate per USD 10,000) to determine the **medv** (median value of owner-occupied homes in USD 1000s). I know for sure I have some figures above for **chas** and **medev**, and **tax** so it would be easy for me to focus on these three factors.
# I would still be fulfilling the requirements laid out for this assessment, as Dr. Ian has stated "You are free to interpret this as you wish — for example, you may use all the other variables, or select a subset", so I could use these 3 factors as my subset.
# I copy Ian's code below, and insert some figures in place of the sample figures he used.
# The figures I have chosen to use are;
# 1. **CHAS** 0.0 , 0.0
# 2. **TAX** 296.0 , 242.0
# 3. **MEDEV** 24.0 , 21.6
#
# I have taken these from the figures generated earlier in this Jupyter Notebook when I ran 'df.head()' and received 'Out[28]'.
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a two neurons in a single layer.
m.add(kr.layers.Dense(2, input_dim=1, activation="linear"))
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, activation="linear", kernel_initializer=kr.initializers.Constant(value=1), bias_initializer=kr.initializers.Constant(value=0)))
# Set the weight/bias of the two neurons.
m.layers[0].set_weights([np.matrix([0.0, 296.0]), np.array([0.0, 242.0])])
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a two neurons in a single layer.
m.add(kr.layers.Dense(2, input_dim=1, activation="linear"))
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, activation="linear", kernel_initializer=kr.initializers.Constant(value=1), bias_initializer=kr.initializers.Constant(value=0)))
# Set the weight/bias of the two neurons.
m.layers[0].set_weights([np.matrix([0.0, 296.0]), np.array([0.0, 242.0])])
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.0, 10.0, 1)
# Run each x value through the neural network.
y = m.predict(x)
# -
# Plot the values.
plt.plot(x, y, 'k.')
# We can see above I encounter error messages.
# +
# Create a new neural network.
m = keras.models.Sequential()
# Add a two neurons in a single layer.
m.add(kr.layers.Dense(2, input_dim=1, activation="linear"))
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, activation="linear", kernel_initializer=kr.initializers.Constant(value=1), bias_initializer=kr.initializers.Constant(value=0)))
# Set the weight/bias of the two neurons.
m.layers[0].set_weights([np.matrix([0.0, 296.0]), np.array([0.0, 242.0])])
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# -
# I tried changing 'kr' to 'keras' to see if this helps but I still encounter the error message.
import matplotlib.pyplot as plt
import numpy as np
import keras as kr
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a two neurons in a single layer.
m.add(kr.layers.Dense(2, input_dim=1, activation="linear"))
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, activation="linear", kernel_initializer=kr.initializers.Constant(value=1), bias_initializer=kr.initializers.Constant(value=0)))
# Set the weight/bias of the two neurons.
m.layers[0].set_weights([np.matrix([0.0, 296.0]), np.array([0.0, 242.0])])
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.0, 10.0, 1)
# Run each x value through the neural network.
y = m.predict(x)
# -
# Plot the values.
plt.plot(x, y, 'k.')
# I decide to re-run the below code and then try the code again to see if this elimimates the errors, and as we can see it worked;
#
# import matplotlib.pyplot as plt
#
# import numpy as np
#
# import keras as kr
# I'm uncertain what the resulting graph indicates in terms of predicting the median house price based on the other variables in the dataset. I decide to attempt the graph again with a different factor besides **CHAS**, so I instead run the same code using **lstat** (lower status of the population (percent)).
# 1. ~~CHAS 0.0 , 0.0~~
# 2. TAX 296.0 , 242.0
# 3. MEDEV 24.0 , 21.6
# 4. LSTAT 4.98, 9.14
#
import matplotlib.pyplot as plt
import numpy as np
import keras as kr
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a two neurons in a single layer.
m.add(kr.layers.Dense(2, input_dim=1, activation="linear"))
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, activation="linear", kernel_initializer=kr.initializers.Constant(value=1), bias_initializer=kr.initializers.Constant(value=0)))
# Set the weight/bias of the two neurons.
m.layers[0].set_weights([np.matrix([4.98, 296.0]), np.array([0.0, 242.0])])
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.0, 1)
# Run each x value through the neural network.
y = m.predict(x)
# -
# Plot the values.
plt.plot(x, y, 'k.')
import matplotlib.pyplot as plt
import numpy as np
import keras as kr
# +
# Create a new neural network.
m = kr.models.Sequential()
# Add a two neurons in a single layer.
m.add(kr.layers.Dense(2, input_dim=1, activation="linear"))
# Add a single neuron in a single layer, initialised with weight 1 and bias 0.
m.add(kr.layers.Dense(1, activation="linear", kernel_initializer=kr.initializers.Constant(value=1), bias_initializer=kr.initializers.Constant(value=0)))
# Set the weight/bias of the two neurons.
m.layers[0].set_weights([np.matrix([4.98, 296.0]), np.array([9.14, 242.0])])
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="sgd")
# +
# Create some input values.
x = np.arange(0.0, 10.0, 1)
# Run each x value through the neural network.
y = m.predict(x)
# -
# Plot the values.
plt.plot(x, y, 'k.')
#
| Etain's Jupyter Notebook containing the main body of my work for this assignment - final subission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from gspan_mining.config import parser
from gspan_mining.main import main
# %pylab inline
args_str = '-s 2 -d True -l 5 -p True -w True graphdata/graph.data.simple.5'
FLAGS, _ = parser.parse_known_args(args=args_str.split())
gs = main(FLAGS)
# ## plot graphs in database
for g in gs.graphs.values():
g.plot()
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#lets start by importing a bunch of stuff
import tensorflow as tf
import pandas as pd
import numpy as np
import math
# +
# Downloading and separating data
#explictily setting the types and names
names_data = ['entry','entry_name','protein_name','gene_name','organism','length','sequence',
'gene_ontology','status','organism_id','keywords']
dtypes_data = {'entry':'str','entry_name':'str','protein_name':'str','gene_name':'str',
'organism':'str','length':'int','sequence':'str','gene_ontology':'str','status':'str',
'organism_id':'int','keywords':'str'}
protein_data = pd.read_csv('uniprot-filtered-reviewed_yes.tab',sep='\t',names = names_data,dtype=dtypes_data,skiprows=1)
#Seeing what the data looks like
protein_data.shape
#randomly shuffling dataset
protein_data = protein_data.sample(frac=1)
# +
#Ignoring protein's with ambigous amino acid codes
protein_data = protein_data[~protein_data.sequence.str.contains('B')]
protein_data = protein_data[~protein_data.sequence.str.contains('O')]
protein_data = protein_data[~protein_data.sequence.str.contains('J')]
protein_data = protein_data[~protein_data.sequence.str.contains('U')]
protein_data = protein_data[~protein_data.sequence.str.contains('X')]
protein_data = protein_data[~protein_data.sequence.str.contains('Z')]
protein_data.shape
# -
index_data_nan= protein_data.loc[protein_data.gene_ontology.isnull()].index
protein_data = protein_data.drop(index_data_nan)
# +
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
protein_data = protein_data.drop(columns=['gene_name', 'organism_id', 'status', 'protein_name', 'entry_name', 'organism', 'keywords'])
index_data = protein_data.loc[protein_data.length > 500].index
protein_data = protein_data.drop(index_data)
#encoding the sequence to a one-hot encoding scheme
labels = np.array(['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y'])
#label encoding - integer encoding
label_encoder = LabelEncoder()
label_encoder.fit(labels)
# onehot encoder
onehot_encoder = OneHotEncoder(sparse=False, categories = [range(20)], dtype=int)
def one_hot_encoder(my_string):
"""function to turn sequence onehot encoded"""
my_array = np.array(list(my_string)) # converts string into array
integer_encoded = label_encoder.transform(my_array)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
return onehot_encoded
#testing custom onehot_encoder
# test_sequence = protein_data.sequence[0]
# string_to_array(test_sequence)
# one_hot_encoder(test_sequence)
#applying onehot_encoder to entire dataframe(takes 6 minutes)
protein_data['sequence_encoded'] = protein_data['sequence'].apply(one_hot_encoder)
# +
#filtering the length of the sequences, dropping sequences greater than 6000
max_length = int(protein_data.length.max())# 2000
def make_all_same_length(my_array):
"""function to make all the sequence encoded arrays into the same length"""
if len(my_array) == 500:
return my_array
else:
b= [[0]* 20] * (500 - len(my_array))
same_length_array = np.concatenate((my_array,b), axis = 0)
return same_length_array
protein_data['sequence_encoded_same_len'] = protein_data['sequence_encoded'].apply(make_all_same_length)
# +
#Dividing into 80 percent training and 20 percent test
train, test = np.split(protein_data, [int(.8*len(protein_data))])
#removing GO labels and keywords from the test dataset
test = test.drop(columns=['gene_ontology'])
# -
# ### Don't output the cells below
# example = str(train.gene_ontology.iloc[3])
# print(example)
# example_array = np.array(example.split(";"))
# list_example = [x.strip() for x in example_array]
# list_example
# built a dictionary of unique GO MF terms , just in training dataset we have 8272 terms GO MF
max_len_GO = 0
dictionary_uniq_GO = {}
for index, row in train.iterrows():
for x in row.gene_ontology.split(";"):
x=x[:-12]
x=x.strip()
if x in dictionary_uniq_GO:
dictionary_uniq_GO[x] += 1
else:
dictionary_uniq_GO[x] = 1
else:
pass
#print(dictionary_uniq_GO)
#getting top 50 terms for GO
# ### Begining one hot encoding the GO Terms
train.head(n=20)
# +
def clean_GO_terms(term):
"""returns array cleaned for GO sequencing, splits and removes 'GO:...' """
term_array = term.split(';')
cleaned_array = [x[:-12] for x in term_array]
stripped_array = [x.strip() for x in cleaned_array]
return cleaned_array
#string_array = train.gene_ontology.head(n=100).iloc[3].split(';')
train['gene_ontology'] = train['gene_ontology'].apply(clean_GO_terms)
train.head(n=20)
# -
# #just strip before checkiong top 50
# cool= np.array(['4 iron, 4 sulfur cluster binding',
# "GTP 3',8'-cyclase activity",
# 'GTP binding',
# 'metal ion binding',
# 'S-adenosyl-L-methionine binding'])
# for x in train.gene_ontology.iloc[1]:
# if x.strip() in cool:
# print("cool")
# else:
# print('the spaces matter!')
array = [1,2,3,4,5,6]
array.remove(1)
print(array)
train.head()
# +
#have only applied this to train, not test!!!!!!
#dropping sequence encoded column
#train = train.drop(columns=['sequence_encoded'])
listofpopularGO_terms = np.array(['ATP binding','structural constituent of ribosome','metal ion binding','rRNA binding',
'DNA binding','magnesium ion binding','zinc ion binding','RNA binding','tRNA binding',
'GTP binding','4 iron, 4 sulfur cluster binding','iron ion binding',
'DNA-binding transcription factor activity','toxin activity', 'rotational mechanism',
'GTPase activity','NAD binding','pyridoxal phosphate binding','heme binding','NADP binding',
'quinone binding','nucleic acid binding','sequence-specific DNA binding','electron transfer activity',
'protein homodimerization activity','protein dimerization activity',
'manganese ion binding','unfolded protein binding','translation elongation factor activity',
'identical protein binding', 'nucleotide binding','NADH dehydrogenase (ubiquinone) activity',
'transferase activity','calcium ion binding','serine-type endopeptidase activity',
'metalloendopeptidase activity','structural molecule activity','oxidoreductase activity',
'translation initiation factor activity',
'ATPase activity','protein heterodimerization activity','sulfurtransferase activity',
'ribosome binding','2 iron, 2 sulfur cluster binding','FMN binding','nickel cation binding',
'single-stranded DNA binding','large ribosomal subunit rRNA binding',
'protein serine/threonine kinase activity'])
#top 50 popular GO terms the one with the most annotations
def removeGO_term_not_50(array):
"""removes all the GO terms that are not in the top 50 most annotated list"""
new_array = [x.strip() for x in array]
return_array = []
for x in range(len(new_array)):
if new_array[x] in listofpopularGO_terms:
return_array.append(new_array[x])
else:
pass
return return_array
#array = ['ATP binding ','NANA ',' NANA', 'NANA']
#print(removeGO_term_not_50(array))
train['new_gene_ontology'] = train['gene_ontology'].apply(removeGO_term_not_50)
train.head(n=25)
# -
# ### Removing empty GO rows in TRAIN!
train = train.drop(columns=['sequence_encoded'])
train = train.drop(columns=['gene_ontology'])
new_train = train[train.astype(str)['new_gene_ontology'] != '[]']
print(new_train.shape)
print(train.shape)
new_train.head(n=20)
# +
#actually encoding now-- only new train_new gene_ontology
listofpopularGO_terms_labels = np.array(['ATP binding','structural constituent of ribosome','metal ion binding','rRNA binding',
'DNA binding','magnesium ion binding','zinc ion binding','RNA binding','tRNA binding',
'GTP binding','4 iron, 4 sulfur cluster binding','iron ion binding',
'DNA-binding transcription factor activity','toxin activity', 'rotational mechanism',
'GTPase activity','NAD binding','pyridoxal phosphate binding','heme binding','NADP binding',
'quinone binding','nucleic acid binding','sequence-specific DNA binding','electron transfer activity',
'protein homodimerization activity','protein dimerization activity',
'manganese ion binding','unfolded protein binding','translation elongation factor activity',
'identical protein binding', 'nucleotide binding','NADH dehydrogenase (ubiquinone) activity',
'transferase activity','calcium ion binding','serine-type endopeptidase activity',
'metalloendopeptidase activity','structural molecule activity','oxidoreductase activity',
'translation initiation factor activity',
'ATPase activity','protein heterodimerization activity','sulfurtransferase activity',
'ribosome binding','2 iron, 2 sulfur cluster binding','FMN binding','nickel cation binding',
'single-stranded DNA binding','large ribosomal subunit rRNA binding',
'protein serine/threonine kinase activity'])
#label encoding, just changes it to numbers
label_encoder = LabelEncoder()
label_encoder.fit(listofpopularGO_terms_labels)
# onehot encoder
onehot_encoder = OneHotEncoder(sparse=False, categories = [range(49)], dtype=int)
def one_hot_encoder(my_array):
"""function to turn sequence onehot encoded"""
integer_encoded = label_encoder.transform(my_array)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
return onehot_encoded
new_train['GO_encoded'] = new_train['new_gene_ontology'].apply(one_hot_encoder)
new_train.head()
# +
#finding the max number of functions
#don't need to output
def find_len(df):
abs_max = 0
for index, row in df.iterrows():
max_num = len(row.GO_encoded)
if max_num > abs_max:
abs_max = max_num
else:
pass
return abs_max
#print(find_len(new_train))
array= [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# -
# ### making GO encoded the same length (making it a 8 X 49 matrix)
def make_all_GO_same_length(my_array):
"""function to make all the sequence encoded arrays into the same length"""
if len(my_array) == 8:
return my_array
else:
b= [[0]* 49] * (8 - len(my_array))
same_length_array = np.concatenate((my_array,b), axis = 0)
return same_length_array
new_train['GO_encoded'] = new_train['GO_encoded'].apply(make_all_GO_same_length)
new_train.head(n=25)
# ## Starting the actual CNN now!!
# +
# define parameters
# -
| .ipynb_checkpoints/Deepprotein stuff-checkpoint.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// + [markdown] toc=true
// <h1>Table of Contents<span class="tocSkip"></span></h1>
// <div class="toc"><ul class="toc-item"><li><span><a href="#Understanding-Asynchronous-Operations-in-Aerospike" data-toc-modified-id="Understanding-Asynchronous-Operations-in-Aerospike-1"><span class="toc-item-num">1 </span>Understanding Asynchronous Operations in Aerospike</a></span><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1.1"><span class="toc-item-num">1.1 </span>Introduction</a></span></li><li><span><a href="#Prerequisites" data-toc-modified-id="Prerequisites-1.2"><span class="toc-item-num">1.2 </span>Prerequisites</a></span></li><li><span><a href="#Initialization" data-toc-modified-id="Initialization-1.3"><span class="toc-item-num">1.3 </span>Initialization</a></span><ul class="toc-item"><li><span><a href="#Ensure-database-is-running" data-toc-modified-id="Ensure-database-is-running-1.3.1"><span class="toc-item-num">1.3.1 </span>Ensure database is running</a></span></li><li><span><a href="#Download-and-install-additional-components." data-toc-modified-id="Download-and-install-additional-components.-1.3.2"><span class="toc-item-num">1.3.2 </span>Download and install additional components.</a></span></li><li><span><a href="#Constants-and-Convenience-Functions" data-toc-modified-id="Constants-and-Convenience-Functions-1.3.3"><span class="toc-item-num">1.3.3 </span>Constants and Convenience Functions</a></span></li></ul></li><li><span><a href="#Open-a-Terminal-Tab" data-toc-modified-id="Open-a-Terminal-Tab-1.4"><span class="toc-item-num">1.4 </span>Open a Terminal Tab</a></span></li></ul></li><li><span><a href="#Synchronous,-Asynchronous,-and-Background-Operations" data-toc-modified-id="Synchronous,-Asynchronous,-and-Background-Operations-2"><span class="toc-item-num">2 </span>Synchronous, Asynchronous, and Background Operations</a></span></li><li><span><a href="#Asynchronous-Operations-For-Better-Resource-Efficiency" data-toc-modified-id="Asynchronous-Operations-For-Better-Resource-Efficiency-3"><span class="toc-item-num">3 </span>Asynchronous Operations For Better Resource Efficiency</a></span></li><li><span><a href="#Supported-Asynchronous-Operations" data-toc-modified-id="Supported-Asynchronous-Operations-4"><span class="toc-item-num">4 </span>Supported Asynchronous Operations</a></span></li><li><span><a href="#Execution-Model" data-toc-modified-id="Execution-Model-5"><span class="toc-item-num">5 </span>Execution Model</a></span><ul class="toc-item"><li><span><a href="#Application-Call-Sequence" data-toc-modified-id="Application-Call-Sequence-5.1"><span class="toc-item-num">5.1 </span>Application Call Sequence</a></span></li></ul></li><li><span><a href="#Understanding-Event-Loops" data-toc-modified-id="Understanding-Event-Loops-6"><span class="toc-item-num">6 </span>Understanding Event Loops</a></span><ul class="toc-item"><li><span><a href="#Event-Loop-Variants:-Netty,-NIO,-EPOLL" data-toc-modified-id="Event-Loop-Variants:-Netty,-NIO,-EPOLL-6.1"><span class="toc-item-num">6.1 </span>Event Loop Variants: Netty, NIO, EPOLL</a></span></li></ul></li><li><span><a href="#Async-Framework" data-toc-modified-id="Async-Framework-7"><span class="toc-item-num">7 </span>Async Framework</a></span><ul class="toc-item"><li><span><a href="#Initialize-event-loops" data-toc-modified-id="Initialize-event-loops-7.1"><span class="toc-item-num">7.1 </span>Initialize event loops</a></span></li><li><span><a href="#Initialize-Client" data-toc-modified-id="Initialize-Client-7.2"><span class="toc-item-num">7.2 </span>Initialize Client</a></span></li><li><span><a href="#Initialize-event-loop-throttles-and-atomic-operation-count." data-toc-modified-id="Initialize-event-loop-throttles-and-atomic-operation-count.-7.3"><span class="toc-item-num">7.3 </span>Initialize event loop throttles and atomic operation count.</a></span></li><li><span><a href="#Define-Listener-and-Handlers" data-toc-modified-id="Define-Listener-and-Handlers-7.4"><span class="toc-item-num">7.4 </span>Define Listener and Handlers</a></span></li><li><span><a href="#Submit-Async-Requests-Using-Throttling" data-toc-modified-id="Submit-Async-Requests-Using-Throttling-7.5"><span class="toc-item-num">7.5 </span>Submit Async Requests Using Throttling</a></span></li><li><span><a href="#Closing" data-toc-modified-id="Closing-7.6"><span class="toc-item-num">7.6 </span>Closing</a></span></li></ul></li><li><span><a href="#Nested-and-Inline-Async-Operations" data-toc-modified-id="Nested-and-Inline-Async-Operations-8"><span class="toc-item-num">8 </span>Nested and Inline Async Operations</a></span></li><li><span><a href="#Misc-Examples" data-toc-modified-id="Misc-Examples-9"><span class="toc-item-num">9 </span>Misc Examples</a></span><ul class="toc-item"><li><span><a href="#Delay-Queue-Full-Error" data-toc-modified-id="Delay-Queue-Full-Error-9.1"><span class="toc-item-num">9.1 </span>Delay Queue Full Error</a></span></li></ul></li><li><span><a href="#Comparing-Different-Settings" data-toc-modified-id="Comparing-Different-Settings-10"><span class="toc-item-num">10 </span>Comparing Different Settings</a></span></li><li><span><a href="#Takeaways-and-Conclusion" data-toc-modified-id="Takeaways-and-Conclusion-11"><span class="toc-item-num">11 </span>Takeaways and Conclusion</a></span></li><li><span><a href="#Clean-up" data-toc-modified-id="Clean-up-12"><span class="toc-item-num">12 </span>Clean up</a></span></li><li><span><a href="#Further-Exploration-and-Resources" data-toc-modified-id="Further-Exploration-and-Resources-13"><span class="toc-item-num">13 </span>Further Exploration and Resources</a></span><ul class="toc-item"><li><span><a href="#Next-steps" data-toc-modified-id="Next-steps-13.1"><span class="toc-item-num">13.1 </span>Next steps</a></span></li></ul></li></ul></div>
// -
// # Understanding Asynchronous Operations in Aerospike
// This tutorial describes asynchronous operations in Aerospike: why they are used, the architecture, and how to program with async operations.
//
// This notebook requires the Aerospike Database running locally with Java kernel and Aerospike Java Client. To create a Docker container that satisfies the requirements and holds a copy of Aerospike notebooks, visit the [Aerospike Notebooks Repo](https://github.com/aerospike-examples/interactive-notebooks).
// ## Introduction
// In this notebook, we will see the benefits, design, and specifics of programming with asynchronous operations in Aerospike.
//
// Aerospike provides asynchronous APIs for many operations. We will describe the benefits of using async operations and key abstractions in the client related to async requests. After covering the theoretical ground, we will show how it all comes together with specific code examples.
//
// The notebook tutorial has two parts:
// - architecture and concepts, and
// - coding examples.
//
// The main topics include:
// - Execution models in Aerospike
// - Benefits of async
// - Key concepts
// - Framework for async programming
// - Coding examples
// ## Prerequisites
// This tutorial assumes familiarity with the following topics:
// - [Aerospike Notebooks - Readme and Tips](../readme_tips.ipynb)
// - [Hello World](hello_world.ipynb)
// ## Initialization
// ### Ensure database is running
// This notebook requires that Aerospike Database is running.
import io.github.spencerpark.ijava.IJava;
import io.github.spencerpark.jupyter.kernel.magic.common.Shell;
IJava.getKernelInstance().getMagics().registerMagics(Shell.class);
// %sh asd
// ### Download and install additional components.
// Install the Aerospike Java client and the Java Netty package, which is described later in the notebook.
// %%loadFromPOM
<dependencies>
<dependency>
<groupId>com.aerospike</groupId>
<artifactId>aerospike-client</artifactId>
<version>5.0.0</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
<version>4.1.53.Final</version>
<scope>compile</scope>
</dependency>
</dependencies>
// ### Constants and Convenience Functions
// We will use some constants and convenience functions throughout this tutorial, including the namespace "test" and set "async-ops".
// +
final String Namespace = "test";
final String Set = "async-ops";
// truncate data, close client and event loops - called multiple times to initialize with different options
// described in greater detail later
void Cleanup() {
try {
client.truncate(null, Namespace, Set, null);
}
catch (AerospikeException e) {
// ignore
}
client.close();
eventLoops.close();
};
// -
// ## Open a Terminal Tab
// You may execute shell commands including Aerospike tools like [aql](https://docs.aerospike.com/docs/tools/aql/index.html) and [asadm](https://docs.aerospike.com/docs/tools/asadm/index.html) in the terminal tab. Open a terminal tab by selecting File->Open from the notebook menu, and then New->Terminal.
// # Synchronous, Asynchronous, and Background Operations
// An application uses the Aerospike client library (aka the client) to interact with Aerospike Database. The client sets up a connection to the appropriate server node and sends in a request for execution in one of the following modes:
//
// Synchronous: The request thread makes the request, waits for the response, and processes the response upon arrival.
//
// Asynchronous: The request thread submits one or more requests, and results are processed in one or more callback thread(s) as they arrive.
//
// Background: The request thread submits the request and operation (or task) is completed in the background. The submission returns immediately, while the actual operation executes separately. The application can check the completion status of the task, and after it is completed may examine the results in the database with one or more separate requests.
//
// Note, a background operation may be considered a special type of asynchronous operation, and it is applicable only for updates in Aerospike. By asynchronous operations we refer to only those that return results in a callback.
// # Asynchronous Operations For Better Resource Efficiency
// During the time a request is sent to the server and the result arrives (“request latency”), the client and application need not wait idly if a high throughput is the goal. A higher throughput can be achieved through concurrent requests.
//
// - Synchronous: The application can spawn multiple threads and process multiple requests in parallel, one per thread at a time.
// - Asynchronous: The application can process requests asynchronously by submitting them in parallel without waiting for the results. The results are processed as they arrive in a different “callback” thread. An async request uses a dedicated connection to the server.
// - Pipeline: Multiple requests could be sent over the same connection to the same server node, and their results received over the same connection. Thus there is greater sharing of threads and connections across multiple requests. Aerospike currently does not support pipeline processing.
//
// In many cases, asynchronous processing can be more resource efficient and can deliver better throughput than multi-threaded synchronous processing because threads have memory and CPU (context-switch) overhead and their number may be limited by the OS.
//
// On the other hand, the asynchronous model is more complex to program and debug. The application should make judicious use of synchronous, asynchronous, and background requests. The client can perform different type of commands in a single instance.
//
// It should be noted that background operations when appropriate would typically deliver superior throughput especially in a non-UDF invocation.
// # Supported Asynchronous Operations
// Most CRUD operations have the async variant.
//
// - Single record operations
// - add, append, delete, apply(udf), get, getHeader, operate, prepend, put, touch
// - Batch operations:
// - exists (array listener and sequence listener), get (batch list and batch sequence listener), get (record array and record sequence listener), getHeader
// - Query/scan: Callback handles a series of records, a single record at a time.
// - query, queryPartitions
// - scanAll, scanPartitions
// - Metadata: createIndex, dropIndex
// - Operational: info
//
// Please refer to the [API documentation](https://docs.aerospike.com/apidocs/java/com/aerospike/client/AerospikeClient.html) for details.
//
// # Execution Model
// The async methods take two additional arguments than their sync variants: the “event loops” and “listener (callback)”. See the code in [Async Framework](#Async-Framework) section below.
// - Event loops: An event-loop represents the loop of "submit a request" and "asynchronously process the result" for concurrent processing of events or requests. Multiple event loops are used to leverage multiple CPU cores.
// - Listener: The listener encapsulates the processing of results.
// - Listener types: Depending on the expected number of records in the result and whether they arrive at once or individually, different listener types are to be used such as a single record listener, a record array listener, or a record sequence listener.
// - Completion handlers: A single record or record array is processed with the success or failure handler. In a record sequence listener, each record is processed with a "record" handler, whereas the success handler is called to mark the end of the sequence.
// ## Application Call Sequence
// The application is responsible for spreading requests evenly across event loops as well as throttling the rate of requests if the request rate can exceed the client or server capacity. The call sequence involves these steps (see the code in [Async Framework](#Async-Framework) section below):
// - Initialize event loops.
// - Implement the listener with success and failure handlers.
// - Submit requests across event loops, throttling to stay below maximum outstanding requests limit.
// - Wait for all outstanding requests to finish.
// # Understanding Event Loops
// Let's look at the key concepts relating to event loops. As described above, an event loop represents concurrent submit-callback processing of requests. See the code in [Async Framework](#Async-Framework) section below.
//
// **Number of event loops**: In order to maximize parallelism of the client hardware, as many event loops are created as the number of cores dedicated for the Aerospike application. An event pool is aligned with a CPU core, not to a server node or a request type.
//
// **Concurrency level**: The maximum concurrency level in each event loop depends on the effective server throughput seen by the client, and in aggregate may not exceed it. A larger value would result in request timeouts and other failures.
//
// **Connection pools and event loops**: Connection pools are allocated on a per node basis, and are independent of event pools. When an async request needs to connect to a node, it uses a connection from the node’s connection pool only for the duration of the request and then releases it.
//
// **Connection pool size**: Concurrency across all loops must be supported by the number of connections in the connection pool. The connection pool per node should be set equal to or greater than the total number of outstanding requests across all event loops (because all requests may go to the same node in the extreme case).
//
// **Delay queue buffer**: To buffer a temporary mismatch in processing and submission rates, there is a delay queue buffer in front of an event loop where requests are held until an async request slot becomes available in the event loop. The queued request is automatically assigned to a slot and processed without involvement of the application.
//
// **Throttling**: The delay queue cannot buffer a long running mismatch in submission and processing speeds, however, and if the wait queue fills up, a request will not be accepted and the client will return “delay queue full” error. The application should throttle by keeping track of outstanding requests and issue a new request when an outstanding one finishes. If delay queue size is set to zero, throttling must also be handled in the application code.
// ## Event Loop Variants: Netty, NIO, EPOLL
// Both Netty and Direct NIO event loops are supported in Aerospike.
//
// [Netty](https://netty.io/) is an asynchronous event-driven network application framework for high-performance servers based on Java Non-blocking IO ([NIO](https://en.wikipedia.org/wiki/Non-blocking_I/O_(Java))) package. [Epoll](https://en.wikipedia.org/wiki/Epoll) (event poll)is a Linux specific construct and allows for a process to monitor multiple file descriptors and get notifications when I/O is possible on them.
//
// Netty allows users to share their existing event loops with AerospikeClient which can improve performance. Netty event loops are also required when using TLS connections. However Netty is an optional external library dependency.
//
// Direct NIO event loops are lighter weight and slightly faster than Netty defaults when not sharing event loops. Direct NIO does not have an external library dependency.
//
// You should consider trade-offs in using the types of event loops - refer to the links provided for further details.
// # Async Framework
// Below we walk through the steps in setting up a typical async operation framework.
// ## Initialize event loops
// Initialize event loops. Allocate an event loop for each CPU core.
//
// Examine the code snippets below.
//
// - Initialize event policy. Select level of parallelism desired; cannot exceed server throughput.
// <pre>
// EventPolicy eventPolicy = EventPolicy();
// final CommandsPerEventLoop = 50;
// eventPolicy.maxCommandsInProcess = commandsPerEventLoop;
// </pre>
// - Select delay queue buffer size in front of the event loop.
// <pre>
// maxCommandsInQueue = 50;
// eventPolicy.maxCommandsInQueue = maxCommandsInQueue;
// </pre>
// - Create event loops object.
// <pre>
// // here we use direct nio and 2 events loops
// numLoops = 2;
// EventLoops eventLoops = new NioEventLoops(eventPolicy, numLoops);
// </pre>
//
// In the following cell, the function InitializeEventLoops allows initialization of different types of event loops. The function will be called multiple times later in the notebook to experiment with different settings.
// +
import com.aerospike.client.async.EventPolicy;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.epoll.EpollEventLoopGroup;
import com.aerospike.client.async.NettyEventLoops;
import com.aerospike.client.async.EventLoops;
import com.aerospike.client.async.NioEventLoops;
enum EventLoopType{DIRECT_NIO, NETTY_NIO, NETTY_EPOLL};
// a function to create event loops with specified parameters
EventLoops InitializeEventLoops(EventLoopType eventLoopType, int numLoops, int commandsPerEventLoop,
int maxCommandsInQueue) {
EventPolicy eventPolicy = new EventPolicy();
eventPolicy.maxCommandsInProcess = commandsPerEventLoop;
eventPolicy.maxCommandsInQueue = maxCommandsInQueue;
EventLoops eventLoops = null;
switch(eventLoopType) {
case DIRECT_NIO:
eventLoops = new NioEventLoops(eventPolicy, numLoops);
break;
case NETTY_NIO:
NioEventLoopGroup nioGroup = new NioEventLoopGroup(numLoops);
eventLoops = new NettyEventLoops(eventPolicy, nioGroup);
break;
case NETTY_EPOLL:
EpollEventLoopGroup epollGroup = new EpollEventLoopGroup(numLoops);
eventLoops = new NettyEventLoops(eventPolicy, epollGroup);
break;
default:
System.out.println("Error: Invalid event loop type");
}
return eventLoops;
}
// initialize event loops
final int NumLoops = 2;
final int CommandsPerEventLoop = 50;
final int DelayQueueSize = 50;
EventLoops eventLoops = InitializeEventLoops(EventLoopType.DIRECT_NIO, NumLoops, CommandsPerEventLoop, DelayQueueSize);
System.out.format("Event loops initialized with num-loops: %s, commands-per-event-loop: %s, delay-queue-size: %s.\n",
NumLoops, CommandsPerEventLoop, DelayQueueSize);;
// -
// ## Initialize Client
// Examine the code snippets below.
//
// - Initialize client policy with event loops.
// <pre>
// ClientPolicy policy = new ClientPolicy();
// clientPolicy.eventLoops = eventLoops;
// </pre>
// - Set total concurrent connections per node by multiplying concurrency level at event loop (maxCommandsInProcess) by the number of event loops.
// <pre>
// concurrentMax = commandsPerEventLoop * numLoops;
// </pre>
// - This is the max number of commands or requests per node if all requests go to one node. Adjust the default connection pool size of 300 if concurrentMax is larger.
// <pre>
// if (clientPolicy.maxConnsPerNode < concurrentMax) {
// clientPolicy.maxConnsPerNode = concurrentMax;
// }
// </pre>
// - Initialize the client with the client policy and seed hosts in cluster.
// <pre>
// Host[] hosts = Host.parseHosts("localhost", 3000);
// AerospikeClient client = new AerospikeClient(clientPolicy, hosts);
// </pre>
//
// In the following cell, the function InitializeClient allows initialization of the client with specified parameters.
// +
import com.aerospike.client.policy.ClientPolicy;
import com.aerospike.client.Host;
import com.aerospike.client.AerospikeClient;
// a function to initialize the client with specified parameters
AerospikeClient InitializeClient(EventLoops eventLoops, int numLoops, int commandsPerEventLoop, Host[] hosts) {
ClientPolicy clientPolicy = new ClientPolicy();
clientPolicy.eventLoops = eventLoops;
int concurrentMax = commandsPerEventLoop * numLoops;
if (clientPolicy.maxConnsPerNode < concurrentMax) {
clientPolicy.maxConnsPerNode = concurrentMax;
}
AerospikeClient client = new AerospikeClient(clientPolicy, hosts);
return client;
}
// initialize the client
Host[] hosts = Host.parseHosts("localhost", 3000);
AerospikeClient client = InitializeClient(eventLoops, NumLoops, CommandsPerEventLoop, hosts);
System.out.print("Client initialized.\n");
// -
// ## Initialize event loop throttles and atomic operation count.
// The event loop throttles object is initialized with the number of event loops and commands per event loop. It provides two methods "waitForSlot" and "addSlot" to manage concurrency for an event loop, both take an index parameter that identifies the event loop.
// <pre>
// throttles = new Throttles(numLoops, commandsPerEventLoop);
// </pre>
//
// The operation count is used to track the number of finished operations. Because multiple callback threads access and increment it concurrently, it is defined as an AtomicInteger, which has support for atomic operation get/increment operations.
// <pre>
// AtomicInteger asyncOpCount = new AtomicInteger();
// </pre>
// In the following cell, the function InitializeThrottles creates throttles for event loops with specified parameters.
// +
import com.aerospike.client.async.Throttles;
// creates event loop throttles with specified parameters
Throttles InitializeThrottles(int numLoops, int commandsPerEventLoop) {
Throttles throttles = new Throttles(numLoops, commandsPerEventLoop);
return throttles;
}
// initialize event loop throttles
Throttles throttles = InitializeThrottles(NumLoops, CommandsPerEventLoop);
System.out.format("Throttles initialized for %s loops with %s concurrent operations per loop.\n",
NumLoops, CommandsPerEventLoop);
// initialize the atomic integer to keep track of async operations count
import java.util.concurrent.atomic.AtomicInteger;
AtomicInteger asyncOpCount = new AtomicInteger();
System.out.format("Atomic operation count initialized.");;
// -
// ## Define Listener and Handlers
// Define the listener with success and failure handlers to process results. Below, we have MyWriteListener derived from WriteListener to process insertion of records that:
// - implements success and failure handlers
// - releases a slot in the event loop on success or failure for another insert to proceed
// throttles.addSlot(eventLoopIndex, 1);
// - signals completion through monitor on failure or when the write count reaches the expected final count
// monitor.notifyComplete();
// - prints progress every "progressFreq" records
// +
import com.aerospike.client.Key;
import com.aerospike.client.listener.WriteListener;
import com.aerospike.client.async.Monitor;
import com.aerospike.client.AerospikeException;
// write listener
// - implements success and failure handlers
// - releases a slot on success or failure for another insert to proceed
// - signals completion through monitor on failure or when the write count reaches the expected final count
// - prints progress every "progressFreq" records*/
class MyWriteListener implements WriteListener {
private final Key key;
private final int eventLoopIndex;
private final int finalCount;
private Monitor monitor;
private final int progressFreq;
public MyWriteListener(Key key, int eventLoopIndex, int finalCount, Monitor monitor, int progressFreq) {
this.key = key;
this.eventLoopIndex = eventLoopIndex;
this.finalCount = finalCount;
this.monitor = monitor;
this.progressFreq = progressFreq;
}
// Write success callback.
public void onSuccess(Key key) {
// Write succeeded.
throttles.addSlot(eventLoopIndex, 1);
int currentCount = asyncOpCount.incrementAndGet();
if ( progressFreq > 0 && currentCount % progressFreq == 0) {
System.out.format("Inserted %s records.\n", currentCount);
}
if (currentCount == finalCount) {
monitor.notifyComplete();
}
}
// Error callback.
public void onFailure(AerospikeException e) {
System.out.format("Put failed: namespace=%s set=%s key=%s exception=%s\n",
key.namespace, key.setName, key.userKey, e.getMessage());
monitor.notifyComplete();
}
}
System.out.print("Write listener defined.");
// -
// ## Submit Async Requests Using Throttling
// While submitting async requests it is important to keep below the planned concurrent capacity using throttling.
//
// The function InsertRecords below inserts the specified number of records asynchronously with id-\<index\> as the user-key and two integer fields bin1 and bin2. It keeps track of and returns the elapsed time.
//
// Throttling is achieved by waiting for an available slot in the event loop.
// <pre>
// if (throttles.waitForSlot(eventLoopIndex, 1)) {
// // submit async request
// }
// </pre>
//
// After submitting all requests, the main thread must wait for outstanding requests to complete before closing.
// <pre>
// monitor.waitTillComplete();
// </pre>
// +
import java.util.concurrent.TimeUnit;
import com.aerospike.client.Bin;
import com.aerospike.client.policy.WritePolicy;
import com.aerospike.client.async.EventLoop;
long InsertRecords(int numRecords, EventLoops eventLoops, Throttles throttles, int progressFreq) {
long startTime = System.nanoTime();
Monitor monitor = new Monitor();
asyncOpCount.set(0);
WritePolicy policy = new WritePolicy();
for (int i = 0; i < numRecords; i++) {
Key key = new Key(Namespace, Set, "id-"+i);
Bin bin1 = new Bin(new String("bin1"), i);
Bin bin2 = new Bin(new String("bin2"), numRecords*10+i);
EventLoop eventLoop = eventLoops.next();
int eventLoopIndex = eventLoop.getIndex();
if (throttles.waitForSlot(eventLoopIndex, 1)) {
try {
client.put(eventLoop, new MyWriteListener(key, eventLoopIndex, numRecords, monitor, progressFreq),
policy, key, bin1, bin2);
}
catch (Exception e) {
throttles.addSlot(eventLoopIndex, 1);
}
}
}
monitor.waitTillComplete();
long endTime = System.nanoTime();
return (endTime - startTime);
}
final int NumRecords = 100000;
long elapsedTime = InsertRecords(NumRecords, eventLoops, throttles, NumRecords/4);
System.out.format("Inserted %s records with %s event-loops and %s commands-per-loop in %s milliseconds.\n",
NumRecords, NumLoops, CommandsPerEventLoop, elapsedTime/1000000);;
// -
// ## Closing
// Both AerospikeClient and EventLoops should be closed before program shutdown. The latest client waits for pending async commands to finish before performing the actual close, so there is no need to externally track pending async commands. Earlier versions provide a waitToComplete() call on Monitor object to ensure async operations are completed. The Cleanup function implemented above truncates the database and closes client and event-loops.
// truncates database and closes client and event-loops
Cleanup();
System.out.println("Removed data and closed client and event loops.");
// # Nested and Inline Async Operations
// It is possible to nest a series of async calls, one in the processing logic of another. Some simple examples of such cascaded calls are:
// - Retry the same operation in the failure handler
// - Issue an async read to validate an async write operation
// - Issue an async write to update a record retrieved from an async read operation.
//
// The following code illustrates a simplistic example of how each record retrieved from an async filtered scan is updated asynchronously by incrementing the value of bin2. Note the inline implementation of WriteListener. The scan filter selects records between bin1 values of 1 and 1000. Throttling and progress report are also present as described above.
// +
import com.aerospike.client.policy.ScanPolicy;
import com.aerospike.client.listener.RecordSequenceListener;
import com.aerospike.client.Record;
import com.aerospike.client.exp.Exp;
// Scan callback
class ScanRecordSequenceListener implements RecordSequenceListener {
private EventLoops eventLoops;
private Throttles throttles;
private Monitor scanMonitor;
private AtomicInteger writeCount = new AtomicInteger();
private int scanCount = 0;
private final int progressFreq;
public ScanRecordSequenceListener(EventLoops eventLoops, Throttles throttles, Monitor scanMonitor,
int progressFreq) {
this.eventLoops = eventLoops;
this.throttles = throttles;
this.scanMonitor = scanMonitor;
this.progressFreq = progressFreq;
}
public void onRecord(Key key, Record record) throws AerospikeException {
++scanCount;
if ( progressFreq > 0 && scanCount % progressFreq == 0) {
System.out.format("Scan returned %s records.\n", scanCount);
}
// submit async update operation with throttle
EventLoop eventLoop = eventLoops.next();
int eventLoopIndex = eventLoop.getIndex();
if (throttles.waitForSlot(eventLoopIndex, 1)) { // throttle by waiting for an available slot
try {
WritePolicy policy = new WritePolicy();
Bin bin2 = new Bin(new String("bin2"), 1);
client.add(eventLoop, new WriteListener() { // inline write listener
public void onSuccess(final Key key) {
// Write succeeded.
throttles.addSlot(eventLoopIndex, 1);
int currentCount = writeCount.incrementAndGet();
if ( progressFreq > 0 && currentCount % progressFreq == 0) {
System.out.format("Processed %s records.\n", currentCount);
}
}
public void onFailure(AerospikeException e) {
System.out.format("Put failed: namespace=%s set=%s key=%s exception=%s\n",
key.namespace, key.setName, key.userKey, e.getMessage());
throttles.addSlot(eventLoopIndex, 1);
int currentCount = writeCount.incrementAndGet();
if ( progressFreq > 0 && currentCount % progressFreq == 0) {
System.out.format("Processed %s records.\n", currentCount);
}
}
},
policy, key, bin2);
}
catch (Exception e) {
System.out.format("Error: exception in write listener - %s", e.getMessage());
}
}
}
public void onSuccess() {
if (scanCount != writeCount.get()) { // give the last write some time to finish
try {
Thread.sleep(100);
}
catch(InterruptedException e) {
System.out.format("Error: exception - %s", e);
}
}
scanMonitor.notifyComplete();
}
public void onFailure(AerospikeException e) {
System.out.format("Error: scan failed with exception - %s", e);
scanMonitor.notifyComplete();
}
}
// +
// cleanup prior state
Cleanup();
// initialize data, event loops and client
int numRecords = 100000;
int numLoops = 2;
int commandsPerLoop = 25;
int delayQueueSize = 0;
eventLoops = InitializeEventLoops(EventLoopType.DIRECT_NIO, numLoops, commandsPerLoop, delayQueueSize);
client = InitializeClient(eventLoops, numLoops, commandsPerLoop, hosts);
throttles = InitializeThrottles(numLoops, commandsPerLoop);
InsertRecords(numRecords, eventLoops, throttles, 0);
System.out.format("Inserted %s records.\n", numRecords);
EventLoop eventLoop = eventLoops.next();
Monitor scanMonitor = new Monitor();
int progressFreq = 100;
// issue async scan that in turn issues async update on each returned record
ScanPolicy policy = new ScanPolicy();
policy.filterExp = Exp.build(
Exp.and(
Exp.le(Exp.intBin("bin1"), Exp.val(1000)),
Exp.ge(Exp.intBin("bin1"), Exp.val(1))));
client.scanAll(eventLoop, new ScanRecordSequenceListener(eventLoops, throttles, scanMonitor, progressFreq),
policy, Namespace, Set);
scanMonitor.waitTillComplete();
System.out.format("Done: nested async scan and update");;
// -
// # Misc Examples
// ## Delay Queue Full Error
// If the delay queue fills up, a request will not be accepted and the client will return “delay queue full” error. Below we simulate this condition by having 25 slots and a delay queue of 20 in 2 event loops each (can handle total 90 outstanding requests) and issuing a hundred concurrent requests. The throttle is effectively turned off by a large setting for the number of requests to go through.
// +
// clean up the current state
Cleanup();
// initialize data, event loops and client
int numRecords = 100;
int numLoops = 2;
int commandsPerLoop = 25;
int delayQueueSize = 20;
int noThrottle = 10000; //effectively no throttle
eventLoops = InitializeEventLoops(EventLoopType.DIRECT_NIO, numLoops, commandsPerLoop, delayQueueSize);
client = InitializeClient(eventLoops, numLoops, commandsPerLoop, hosts);
throttles = InitializeThrottles(numLoops, noThrottle);
// attempt to insert records above the available slots and delay queue capacity
long elapsedTime = InsertRecords(numRecords, eventLoops, throttles, 0);
System.out.format("%s ops/ms with event-loops: %s and commands-per-loop: %s.\n",
numRecords/(elapsedTime/1000000), numLoops, commandsPerLoop);;
// -
// # Comparing Different Settings
// The code below allows comparison of insert throughput with different parameters: event loops type, number of event loops, and concurrency level in each loop. It doesn't produce meaningful results in the default notebook container setting where the client and server are running in the same container. A meaningful comparison can be drawn by pointing to the desired server cluster and also adjusting the client environment.
// +
// Throughput with parameterized async insertion
int numRecords = 100000;
EventLoopType[] eventLoopOptions = {EventLoopType.DIRECT_NIO, EventLoopType.NETTY_NIO, EventLoopType.NETTY_EPOLL};
int[] numLoopsOptions = {2, 4, 8};
int[] commandsPerLoopOptions = {50, 100, 200};
for (EventLoopType eventLoopType: eventLoopOptions) {
for (int numLoops: numLoopsOptions) {
for (int commandsPerLoop: commandsPerLoopOptions) {
Cleanup();
eventLoops = InitializeEventLoops(eventLoopType, numLoops, commandsPerLoop, 0);
client = InitializeClient(eventLoops, numLoops, commandsPerLoop, hosts);
throttles = InitializeThrottles(numLoops, commandsPerLoop);
long elapsedTime = InsertRecords(numRecords, eventLoops, throttles, 0);
System.out.format("%s ops/ms with %s %s event-loops and %s commands-per-loop.\n",
numRecords/(elapsedTime/1000000), numLoops, eventLoopType, commandsPerLoop);
}
}
}
System.out.println("Done.");;
// -
// # Takeaways and Conclusion
// The tutorial described the architecture of and key concepts in asynchronous operations in Aerospike client. It presented the programming framework in which async requests can be submitted and handled. It illustrated with code how event loops, throttling, inline async calls are implemented. The trade-offs that a developer needs to make for which execution modes to employ - synchronous, asynchronous, or background - involve multiple factors including the nature of operations, client and server setup, throughput needs, and programming complexity.
// # Clean up
// Remove tutorial data and close connection.
Cleanup();
System.out.println("Removed tutorial data and closed server connection.");
// # Further Exploration and Resources
// Here are some links for further exploration
//
// Resources
// - Related notebooks
// - [Implementing SQL Operations: SELECT](sql_select.ipynb),
// - [Implementing SQL Operations: Aggregates - Part 1](sql_aggregates_1.ipynb) and [Part 2](sql_aggregates_2.ipynb).
// - [Implementing SQL Operations: CREATE, UPDATE, DELETE](sql_updates.ipynb)
// - [Working with Lists](java-working_with_lists.ipynb)
// - [Working with Maps](java-working_with_maps.ipynb)
// - Aerospike Developer Hub
// - [Java Developers Resources](https://developer.aerospike.com/java-developers)
// - Github repos
// - [Java code examples](https://github.com/aerospike/aerospike-client-java/tree/master/examples/src/com/aerospike/examples)
// - [Reactive programming examples for the Java client](https://github.com/aerospike/aerospike-client-java-reactive)
// - Documentation
// - [Java Client](https://www.aerospike.com/docs/client/java/index.html)
// - [Java API Reference](https://www.aerospike.com/apidocs/java/)
// - [Aerospike Documentation](https://docs.aerospike.com/docs/)
// - Blog
// - [Simple Web Application Using Java, Spring Boot, Aerospike and Docker](https://medium.com/aerospike-developer-blog/simple-web-application-using-java-spring-boot-aerospike-database-and-docker-ad13795e0089)
// ## Next steps
//
// Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to run additional Aerospike notebooks. To run a different notebook, download the notebook from the repo to your local machine, and then click on File->Open in the notebook menu, and select Upload.
| notebooks/java/async_ops.ipynb |