text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
# Import libraries
import os
import ee
import geemap
import ipywidgets as widgets
from bqplot import pyplot as plt
from ipyleaflet import WidgetControl
# Create an interactive map
Map = geemap.Map(center=[-23.36, -46.36], zoom=5, add_google_map=True)
Map
# Definindo a barra interativa
style = {'description_width': 'initial'}
# Definindo as opções de índices
nd_options =["Índice de Vegetação por Diferença Normalizada (NDVI)",
"Índice da Água por Diferença Normalizada (NDWI)",
"Índice da Água por Diferença Normalizada Melhorado (MNDWI)",
"Índice do Solo por Diferença Normalizada (NDSI)"]
# Definindo primeira banda
first_band = widgets.Dropdown(
description='1ª banda:',
options=['Blue', 'Green','Red','NIR', 'SWIR1', 'SWIR2'],
value='Green',
style=style
)
# Definindo segunda banda
second_band = widgets.Dropdown(
description='2ª banda:',
options=['Blue', 'Green','Red','NIR', 'SWIR1', 'SWIR2'],
value='SWIR1',
style=style
)
output_widget = widgets.Output(layout={'border': '4px solid black'})
output_control = WidgetControl(widget=output_widget, position='bottomright')
Map.add_control(output_control)
aoi_widget = widgets.Checkbox(
value=True,
description='Área de interesse',
style=style
)
download_widget = widgets.Checkbox(
value=False,
description='Download dos dados do gráfico',
style=style
)
def aoi_change(change):
Map.layers = Map.layers[:4]
Map.user_roi = None
Map.user_rois = None
Map.draw_count = 0
output_widget.clear_output()
aoi_widget.observe(aoi_change, names='value')
band_combo = widgets.Dropdown(
description='Band combo:',
options=['Red/Green/Blue', 'NIR/Red/Green', 'SWIR2/SWIR1/NIR', 'NIR/SWIR1/Red','SWIR2/NIR/Red',
'SWIR2/SWIR1/Red', 'SWIR1/NIR/Blue', 'NIR/SWIR1/Blue', 'SWIR2/NIR/Green', 'SWIR1/NIR/Red'],
value='NIR/Red/Green',
style=style
)
year_widget = widgets.IntSlider(min=1984, max=2020, value=2010, description='Selecionar ano:', width=400, style=style)
fmask_widget = widgets.Checkbox(
value=True,
description='Aplicar fmask?(remove nuvem, sombra e neve)',
style=style,
layout = {'width':'2px'}
)
# Normalized Satellite Indices: https://www.usna.edu/Users/oceano/pguth/md_help/html/norm_sat.
nd_indices = widgets.Dropdown(options=nd_options, value=nd_options[0], description='Índices:', style=style)
nd_threshold = widgets.FloatSlider(
value=0,
min=-1,
max=1,
step=0.01,
description='Threshold:',
orientation='horizontal',
style=style
)
nd_color = widgets.ColorPicker(
concise=False,
description='Color:',
value='blue',
style=style
)
def nd_index_change(change):
if nd_indices.value == 'Índice de Vegetação por Diferença Normalizada (NDVI)':
first_band.value = 'NIR'
second_band.value = 'Red'
elif nd_indices.value == 'Índice da Água por Diferença Normalizada (NDWI)':
first_band.value = 'NIR'
second_band.value = 'SWIR1'
elif nd_indices.value == 'Índice da Água por Diferença Normalizada Melhorado (MNDWI)':
first_band.value = 'Green'
second_band.value = 'SWIR1'
elif nd_indices.value == 'Índice do Solo por Diferença Normalizada (NDSI)':
first_band.value = 'SWIR1'
second_band.value = 'NIR'
elif nd_indices.value == 'Customized':
first_band.value = None
second_band.value = None
nd_indices.observe(nd_index_change, names='value')
submit = widgets.Button(
description='Analisar',
button_style='primary',
tooltip='Clique aqui',
style=style
)
full_widget = widgets.VBox([
widgets.HBox([nd_indices, first_band, second_band]),
widgets.HBox([band_combo, year_widget, fmask_widget]),
widgets.HBox([aoi_widget, nd_threshold, nd_color, download_widget]),
submit
])
full_widget
# Click event handler
def submit_clicked(b):
with output_widget:
output_widget.clear_output()
print('Computing...')
Map.default_style = {'cursor': 'wait'}
try:
band1 = first_band.value
band2 = second_band.value
selected_year = year_widget.value
threshold = nd_threshold.value
bands = band_combo.value.split('/')
apply_fmask = fmask_widget.value
palette = nd_color.value
use_aoi = aoi_widget.value
download = download_widget.value
if use_aoi:
if Map.user_roi is not None:
roi = Map.user_roi
layer_name = 'User drawn AOI'
geom = roi
else:
output_widget.clear_output()
print('No user AOI could be found.')
return
Map.layers = Map.layers[:4]
Map.addLayer(ee.Image().paint(geom, 0, 2), {'palette': 'red'}, layer_name)
images = geemap.landsat_timeseries(roi=roi, start_year=1984, end_year=2020, start_date='01-01', end_date='12-31', apply_fmask=apply_fmask)
nd_images = images.map(lambda img: img.normalizedDifference([band1, band2]))
result_images = nd_images.map(lambda img: img.gt(threshold))
selected_image = ee.Image(images.toList(images.size()).get(selected_year - 1984))
selected_result_image = ee.Image(result_images.toList(result_images.size()).get(selected_year - 1984)).selfMask()
vis_params = {
'bands': bands,
'min': 0,
'max': 3000
}
Map.addLayer(selected_image, vis_params, 'Landsat ' + str(selected_year))
Map.addLayer(selected_result_image, {'palette': palette}, 'Result ' + str(selected_year))
def cal_area(img):
pixel_area = img.multiply(ee.Image.pixelArea()).divide(1e4)
img_area = pixel_area.reduceRegion(**{
'geometry': geom,
'reducer': ee.Reducer.sum(),
'scale': 1000,
'maxPixels': 1e12,
'bestEffort': True
})
return img.set({'area': img_area})
areas = result_images.map(cal_area)
stats = areas.aggregate_array('area').getInfo()
x = list(range(1984, 2021))
y = [item.get('nd') for item in stats]
fig = plt.figure(1)
fig.layout.height = '270px'
plt.clear()
plt.plot(x, y)
plt.title('Temporal trend (1984-2020)')
plt.xlabel('Year')
plt.ylabel('Area (ha)')
output_widget.clear_output()
plt.show()
if download:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
out_name = 'chart_' + geemap.random_string() + '.csv'
out_csv = os.path.join(out_dir, out_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_csv, 'w') as f:
f.write('year, area (ha)\n')
for index, item in enumerate(x):
line = '{},{:.2f}\n'.format(item, y[index])
f.write(line)
link = geemap.create_download_link(
out_csv, title="Click here to download the chart data: ")
display(link)
except Exception as e:
print(e)
print('An error occurred during computation.')
Map.default_style = {'cursor': 'default'}
submit.on_click(submit_clicked)
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import os
import bidi.algorithm
import arabic_reshaper
import matplotlib.pyplot as plt
fpath = '/media/sf_VBox_Shared/Arabic/Analyses/Fiqh_final2/quotes'
links_df = pd.read_csv(os.path.join(fpath, 'fiqh_quran_links_v2.csv'))
nodes_aya_df = pd.read_csv(os.path.join(fpath, 'fiqh_quran_aya_nodes_v2.csv'))
nodes_books_df = pd.read_csv(os.path.join(fpath, 'fiqh_quran_book_nodes.csv'))
quotes_df = pd.read_csv(os.path.join(fpath, 'quran_quotes.csv'))
merged_df = links_df.merge(nodes_aya_df, left_on='Target', right_on='id')[['Source', 'Weight', 'sura_id', 'aya_id', 'sura_name', 'sura_arabic_name', 'Label']]
merged_df = merged_df.rename({'Label': 'aya_label'}, axis=1)
merged_df = merged_df.merge(nodes_books_df, left_on='Source', right_on='id')
merged_df = merged_df.drop(['Source', 'id', 'Type', 'Group', 'Label'], axis=1)
merged_df.to_csv(os.path.join(fpath, 'quotes_merged_v2.csv'), index=False)
def reshape_arabic(text):
return bidi.algorithm.get_display(arabic_reshaper.reshape(text))
# Nr of quotes per book, sorted per school
count_per_book = merged_df.groupby(['BookURI', 'BookSUBJ'])['Weight'].sum().unstack()
barplot = count_per_book.sort_values(list(count_per_book.columns), ascending=False).plot(kind='bar', stacked=True, figsize=(15,8))
leg = barplot.axes.get_legend()
for t in leg.get_texts():
t.set_text(reshape_arabic(t.get_text()))
t.set_fontsize(15)
plt.title('Nr of quotes per book, sorted by school')
plt.show()
# Relative of quotes per book (divided by length of book) , sorted per school
nr_tokens_per_book = merged_df.groupby(['BookURI', 'BookSUBJ'])['Number_of_tokens'].min()
rel_count_per_book = merged_df.groupby(['BookURI', 'BookSUBJ'])['Weight'].sum() / nr_tokens_per_book
rel_count_per_book = rel_count_per_book.unstack()
barplot = rel_count_per_book.sort_values(list(rel_count_per_book.columns), ascending=False).plot(kind='bar', stacked=True, figsize=(15,8))
leg = barplot.axes.get_legend()
for t in leg.get_texts():
t.set_text(reshape_arabic(t.get_text()))
t.set_fontsize(15)
plt.title('Relative nr of quotes per book')
plt.show()
rel_count_per_book = merged_df.groupby(['BookURI', 'BookSUBJ'])['Weight'].sum() / nr_tokens_per_book
rel_count_per_book = rel_count_per_book.reset_index().sort_values(['BookSUBJ', 'BookURI'])
rel_count_per_book = rel_count_per_book.pivot('BookURI', 'BookSUBJ', 0).reindex(rel_count_per_book.BookURI)
barplot = rel_count_per_book.plot(kind='bar', stacked=True, figsize=(15,8))
leg = barplot.axes.get_legend()
for t in leg.get_texts():
t.set_text(reshape_arabic(t.get_text()))
t.set_fontsize(15)
plt.title('Relative number of quotes per book (sorted by school/year)')
plt.show()
import matplotlib
rel_count_per_school = merged_df.groupby('BookSUBJ')['Weight'].sum() / nr_tokens_per_book.groupby('BookSUBJ').sum()
#barplot = rel_count_per_school.plot(kind='bar', figsize=(20,10), colormap=)
plt.subplots(figsize=(20,10))
barplot = plt.bar(range(len(rel_count_per_school)), rel_count_per_school.values, color='grey', width=0.3)
#plt.x
plt.xticks(range(len(rel_count_per_school)), labels=[reshape_arabic(t) for t in rel_count_per_school.index], fontsize=15) #barplot.xaxis.get_ticklabels()])
#plt.xticks()
plt.title('Relative nr of quotes per school')
plt.show()
rel_count_per_school.to_csv(os.path.join(fpath, 'counts_pers_school.csv'))
import re
merged_df['Century_num'] = merged_df.Century.apply(lambda s: int(re.match('^[0-9]*', s).group(0)))
count_per_century_subj = merged_df.groupby(['Century_num', 'BookSUBJ'])['Weight'].sum()
# Total number of quotes per century, per school
barplot = count_per_century_subj.unstack().plot(kind='bar', stacked=True)
leg = barplot.axes.get_legend()
for t in leg.get_texts():
t.set_text(reshape_arabic(t.get_text()))
t.set_fontsize(15)
plt.title('Number of quotes per century (by school)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.show()
nrtokens_per_century_book = merged_df.groupby(['BookURI', 'Century_num'])['Number_of_tokens'].min()
# Relative number of quotes per century
count_per_century = count_per_century_subj.groupby('Century_num').sum()
rel_count_per_century = count_per_century / nrtokens_per_century_book.groupby(['Century_num']).sum()
barplot = rel_count_per_century.plot(kind='bar', color='grey')
plt.title('Relative number of quotes per century')
plt.show()
```
## Which verses are cited most often?
Which verses have the most citations in total? And by how many books are they cited?
```
counts_per_verse = pd.DataFrame({'nr_books': merged_df.aya_label.value_counts(),
'nr_citations': merged_df.groupby('aya_label')['Weight'].sum()})
counts_per_verse.sort_values('nr_citations', ascending=False).head(20)
# What are the verses cited by most books?
print('What are the verses cited by most books?')
counts_per_verse.sort_values('nr_books', ascending=False).head(10)
```
## What are the most cited verses per school?
And by how many books are they cited?
```
from IPython.display import display
# What are the verses cited by most books, per school?
for school in merged_df.BookSUBJ.unique():
print(school)
df_sub = merged_df[merged_df.BookSUBJ==school]
counts_per_verse_sub = pd.DataFrame({'nr_books': df_sub.aya_label.value_counts(),
'nr_citations': df_sub.groupby('aya_label')['Weight'].sum(),
'books': df_sub.groupby('aya_label')['BookURI'].aggregate(set)})
print('Total nr of books in this school: ', df_sub.BookURI.nunique())
display(counts_per_verse_sub.sort_values('nr_citations', ascending=False).head(20))
print('\n')
```
## NLP approaches
```
adj_df = merged_df.pivot('BookURI', 'aya_label', 'Weight').fillna(0)
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
tfidf = tfidf_transformer.fit_transform(adj_df.values)
print(tfidf.shape)
from sklearn.metrics.pairwise import cosine_similarity
similarities = cosine_similarity(tfidf, tfidf)
similarities.shape
plt.hist(similarities.flatten());
import numpy as np
similarities_df = pd.DataFrame(similarities, columns=adj_df.index, index=adj_df.index)
np.fill_diagonal(similarities_df.values, 0)
plt.hist(similarities_df.values.flatten());
from sklearn.metrics.pairwise import cosine_distances
from sklearn.manifold import TSNE
dist = cosine_distances(tfidf, tfidf)
X_embedded = TSNE(n_components=2, metric='precomputed').fit_transform(dist)
print(X_embedded.shape)
df_books_embedded = pd.DataFrame(X_embedded, index=adj_df.index, columns=['x', 'y'])
df_books_embedded.head()
df_books_embedded = df_books_embedded.merge(nodes_books_df, right_on='BookURI', left_on='BookURI')
fig, ax = plt.subplots(figsize=(15,15))
for subj, group in df_books_embedded.groupby('BookSUBJ'):
ax.plot(group.x, group.y, label=reshape_arabic(subj), marker='o', linestyle='', markersize=10)
for x,y,s in zip(group.x, group.y, group.BookURI):
ax.text(x-10, y, s)
ax.legend()
plt.show()
```
## Network analysis
```
import networkx as nx
# nw_similarities = nx.from_pandas_adjacency(similarities_df)
# list(nw_similarities.edges(data=True))[:10]
# nx.to_pandas_edgelist(nw_similarities).to_csv(os.path.join(fpath, 'links_tfidf_books.csv'), index=False)
links_df['Distance'] = 1.0/links_df.Weight
network = nx.from_pandas_edgelist(links_df, source='Source', target='Target', edge_attr=['Weight', 'Distance'])
nx.algorithms.is_bipartite(network)
book_ids = links_df.Source.unique()
verse_ids = links_df.Target.unique()
network_books = nx.bipartite.weighted_projected_graph(network, book_ids)
network_verses = nx.bipartite.weighted_projected_graph(network, verse_ids)
print(network.number_of_edges(), network.number_of_nodes())
print(network_books.number_of_edges(), network_books.number_of_nodes())
print(network_verses.number_of_edges(), network_verses.number_of_nodes())
weights = nx.get_edge_attributes(network_books, 'weight')
nx.set_edge_attributes(network_books,
{k: 1/weights[k] for k in weights},
'distance')
closeness_centrality_books = nx.closeness_centrality(network_books, distance='distance')
#closeness_centrality_verses = nx.closeness_centrality(network_verses)
betweenness_centrality_books = nx.betweenness_centrality(network_books, weight='distance')
#betweenness_centrality_verses = nx.betweenness_centrality(network_verses)
nodes_books_df = nodes_books_df.set_index('id')
nodes_books_df['closeness_centrality'] = pd.Series(closeness_centrality_books)
nodes_books_df['betweenness_centrality'] = pd.Series(betweenness_centrality_books)
nodes_books_df = nodes_books_df.reset_index()
nodes_aya_df = nodes_aya_df.set_index('id')
#nodes_aya_df['closeness_centrality'] = pd.Series(closeness_centrality_verses)
nodes_aya_df['betweenness_centrality'] = pd.Series(betweenness_centrality_verses)
nodes_aya_df = nodes_aya_df.reset_index()
nodes_aya_df.head()
book_closeness = nodes_books_df.set_index('BookURI')['closeness_centrality']
book_closeness.sort_values(ascending=False).plot(kind='bar', figsize=(15,5))
book_closeness.sort_values(ascending=False)
book_betweenness = nodes_books_df.set_index('BookURI')['betweenness_centrality']
book_betweenness.sort_values(ascending=False).plot(kind='bar', figsize=(15,5))
book_betweenness.sort_values(ascending=False)
verse_closeness = nodes_aya_df.set_index('Label')['closeness_centrality']
verse_closeness.sort_values(ascending=False).head(30).plot(kind='bar', figsize=(15,5))
# Get projected graphs
book_ids = links_df.Source.unique()
nw_books_jaccard = nx.algorithms.bipartite.overlap_weighted_projected_graph(network, book_ids)
nw_books_overlap
list(nw_books.edges(data=True))[:10]
nx.to_pandas_edgelist(nw_books).to_csv(os.path.join(fpath, 'links_projected_books.csv'), index=False)
```
| github_jupyter |
# Multi-Fidelity
<div class="btn btn-notebook" role="button">
<img src="../_static/images/colab_logo_32px.png"> [Run in Google Colab](https://colab.research.google.com/drive/1Cc9TVY_Tl_boVzZDNisQnqe6Qx78svqe?usp=sharing)
</div>
<div class="btn btn-notebook" role="button">
<img src="../_static/images/github_logo_32px.png"> [View on GitHub](https://github.com/adapt-python/notebooks/blob/d0364973c642ea4880756cef4e9f2ee8bb5e8495/Multi_fidelity.ipynb)
</div>
The following example is a 1D regression multi-fidelity issue. Blue points are low fidelity observations and orange points are high fidelity observations. The goal is to use both datasets to learn the task on the [0, 1] interval.
To tackle this challenge, we use here the parameter-based method: [RegularTransferNN](#RegularTransferNN)
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from sklearn.metrics import mean_absolute_error, mean_squared_error
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.optimizers import Adam, SGD, RMSprop, Adagrad
from tensorflow.keras.layers import Dense, Input, Dropout, Conv2D, MaxPooling2D, Flatten, Reshape, GaussianNoise, BatchNormalization
from tensorflow.keras.constraints import MinMaxNorm
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import clone_model
from adapt.parameter_based import RegularTransferNN
```
## Setup
```
np.random.seed(0)
Xs = np.linspace(0, 1, 200)
ys = (1 - Xs**2) * np.sin(2 * 2 * np.pi * Xs) - Xs + 0.1 * np.random.randn(len(Xs))
Xt = Xs[:100]
yt = (1 - Xt**2) * np.sin(2 * 2 * np.pi * Xt) - Xt - 1.5
gt = (1 - Xs**2) * np.sin(2 * 2 * np.pi * Xs) - Xs - 1.5
plt.figure(figsize=(10,6))
plt.plot(Xs, ys, '.', label="low fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xt, yt, '.', label="high fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xs, gt, c="black", alpha=0.7, ls="--", label="Ground truth")
plt.legend(fontsize=14)
plt.xlabel("X", fontsize=16)
plt.ylabel("y = f(X)", fontsize=16)
plt.show()
```
## Network
```
np.random.seed(0)
tf.random.set_seed(0)
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(1,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(1))
model.compile(optimizer=Adam(0.001), loss='mean_squared_error')
```
## Low fidelity only
```
np.random.seed(0)
tf.random.set_seed(0)
model_low = clone_model(model)
model_low.compile(optimizer=Adam(0.001), loss='mean_squared_error')
model_low.fit(Xs, ys, epochs=800, batch_size=34, verbose=0);
yp = model_low.predict(Xs.reshape(-1,1))
score = mean_absolute_error(gt.ravel(), yp.ravel())
plt.figure(figsize=(10,6))
plt.plot(Xs, ys, '.', label="low fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xt, yt, '.', label="high fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xs, gt, c="black", alpha=0.7, ls="--", label="Ground truth")
plt.plot(Xs, yp, c="red", alpha=0.9, lw=3, label="Predictions")
plt.legend(fontsize=14)
plt.xlabel("X", fontsize=16)
plt.ylabel("y = f(X)", fontsize=16)
plt.title("Low Fidelity Only -- MAE = %.3f"%score, fontsize=18)
plt.show()
```
## High fidelity only
```
np.random.seed(0)
tf.random.set_seed(0)
model_high = clone_model(model)
model_high.compile(optimizer=Adam(0.001), loss='mean_squared_error')
model_high.fit(Xt, yt, epochs=800, batch_size=34, verbose=0);
yp = model_high.predict(Xs.reshape(-1,1))
score = mean_absolute_error(gt.ravel(), yp.ravel())
plt.figure(figsize=(10,6))
plt.plot(Xs, ys, '.', label="low fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xt, yt, '.', label="high fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xs, gt, c="black", alpha=0.7, ls="--", label="Ground truth")
plt.plot(Xs, yp, c="red", alpha=0.9, lw=3, label="Predictions")
plt.legend(fontsize=14)
plt.xlabel("X", fontsize=16)
plt.ylabel("y = f(X)", fontsize=16)
plt.title("Low Fidelity Only -- MAE = %.3f"%score, fontsize=18)
plt.show()
```
## [RegularTransferNN](https://adapt-python.github.io/adapt/generated/adapt.parameter_based.RegularTransferNN.html)
```
model_reg = RegularTransferNN(model_low, lambdas=1000., random_state=1, optimizer=Adam(0.0001))
model_reg.fit(Xt.reshape(-1,1), yt, epochs=1200, batch_size=34, verbose=0);
yp = model_reg.predict(Xs.reshape(-1,1))
score = mean_absolute_error(gt.ravel(), yp.ravel())
plt.figure(figsize=(10,6))
plt.plot(Xs, ys, '.', label="low fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xt, yt, '.', label="high fidelity", ms=15, alpha=0.9, markeredgecolor="black")
plt.plot(Xs, gt, c="black", alpha=0.7, ls="--", label="Ground truth")
plt.plot(Xs, yp, c="red", alpha=0.9, lw=3, label="Predictions")
plt.legend(fontsize=14)
plt.xlabel("X", fontsize=16)
plt.ylabel("y = f(X)", fontsize=16)
plt.title("Low Fidelity Only -- MAE = %.3f"%score, fontsize=18)
plt.show()
```
| github_jupyter |
## **Semana de Data Science**
- Minerando Dados
## Aula 01
### Conhecendo a base de dados
Monta o drive
```
from google.colab import drive
drive.mount('/content/drive')
```
Importando as bibliotecas básicas
```
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Carregando a Base de Dados
```
# carrega o dataset de london
from sklearn.datasets import load_boston
boston = load_boston()
# descrição do dataset
print (boston.DESCR)
# cria um dataframe pandas
data = pd.DataFrame(boston.data, columns=boston.feature_names)
# imprime as 5 primeiras linhas do dataset
data.head()
```
Conhecendo as colunas da base de dados
**`CRIM`**: Taxa de criminalidade per capita por cidade.
**`ZN`**: Proporção de terrenos residenciais divididos por lotes com mais de 25.000 pés quadrados.
**`INDUS`**: Essa é a proporção de hectares de negócios não comerciais por cidade.
**`CHAS`**: variável fictícia Charles River (= 1 se o trecho limita o rio; 0 caso contrário)
**`NOX`**: concentração de óxido nítrico (partes por 10 milhões)
**`RM`**: Número médio de quartos entre as casas do bairro
**`IDADE`**: proporção de unidades ocupadas pelos proprietários construídas antes de 1940
**`DIS`**: distâncias ponderadas para cinco centros de emprego em Boston
**`RAD`**: Índice de acessibilidade às rodovias radiais
**`IMPOSTO`**: taxa do imposto sobre a propriedade de valor total por US $ 10.000
**`B`**: 1000 (Bk - 0,63) ², onde Bk é a proporção de pessoas de descendência afro-americana por cidade
**`PTRATIO`**: Bairros com maior proporção de alunos para professores (maior valor de 'PTRATIO')
**`LSTAT`**: porcentagem de status mais baixo da população
**`MEDV`**: valor médio de casas ocupadas pelos proprietários em US $ 1000
Adicionando a coluna que será nossa variável alvo
```
# adiciona a variável MEDV
data['MEDV'] = boston.target
# imprime as 5 primeiras linhas do dataframe
data.head()
data.describe()
```
### Análise e Exploração dos Dados
Nesta etapa nosso objetivo é conhecer os dados que estamos trabalhando.
Podemos a ferramenta **Pandas Profiling** para essa etapa:
```
# Instalando o pandas profiling
pip install https://github.com/pandas-profiling/pandas-profiling/archive/master.zip
# import o ProfileReport
from pandas_profiling import ProfileReport
# executando o profile
profile = ProfileReport(data, title='Relatório - Pandas Profiling', html={'style':{'full_width':True}})
profile
# salvando o relatório no disco
profile.to_file(output_file="Relatorio01.html")
```
**Observações**
* *O coeficiente de correlação varia de `-1` a `1`.
Se valor é próximo de 1, isto significa que existe uma forte correlação positiva entre as variáveis. Quando esse número é próximo de -1, as variáveis tem uma forte correlação negativa.*
* *A relatório que executamos acima nos mostra que a nossa variável alvo (**MEDV**) é fortemente correlacionada com as variáveis `LSTAT` e `RM`*
* *`RAD` e `TAX` são fortemente correlacionadas, podemos remove-las do nosso modelo para evitar a multi-colinearidade.*
* *O mesmo acontece com as colunas `DIS` and `AGE` a qual tem a correlação de -0.75*
* *A coluna `ZN` possui 73% de valores zero.*
## Aula 02
Obtendo informações da base de dados manualmente
```
# Check missing values
data.isnull().sum()
# um pouco de estatística descritiva
data.describe()
```
Analisando a Correlação das colunas da base de dados
```
# Calcule a correlaçao
correlacoes = data.corr()
# Usando o método heatmap do seaborn
%matplotlib inline
plt.figure(figsize=(16, 6))
sns.heatmap(data=correlacoes, annot=True)
```
Visualizando a relação entre algumas features e variável alvo
```
# Importando o Plot.ly
import plotly.express as px
# RM vs MEDV (Número de quartos e valor médio do imóvel)
fig = px.scatter(data, x=data.RM, y=data.MEDV)
fig.show()
# LSTAT vs MEDV (índice de status mais baixo da população e preço do imóvel)
fig = px.scatter(data, x=data.LSTAT, y=data.MEDV)
fig.show()
# PTRATIO vs MEDV (percentual de proporção de alunos para professores e o valor médio de imóveis)
fig = px.scatter(data, x=data.PTRATIO, y=data.MEDV)
fig.show()
```
#### Analisando Outliers
```
# estatística descritiva da variável RM
data.RM.describe()
# visualizando a distribuição da variável RM
import plotly.figure_factory as ff
labels = ['Distribuição da variável RM (número de quartos)']
fig = ff.create_distplot([data.RM], labels, bin_size=.2)
fig.show()
# Visualizando outliers na variável RM
import plotly.express as px
fig = px.box(data, y='RM')
fig.update_layout(width=800,height=800)
fig.show()
```
Visualizando a distribuição da variável MEDV
```
# estatística descritiva da variável MEDV
data.MEDV.describe()
# visualizando a distribuição da variável MEDV
import plotly.figure_factory as ff
labels = ['Distribuição da variável MEDV (preço médio do imóvel)']
fig = ff.create_distplot([data.MEDV], labels, bin_size=.2)
fig.show()
```
Analisando a simetria do dado
```
# carrega o método stats da scipy
from scipy import stats
# imprime o coeficiente de pearson
stats.skew(data.MEDV)
```
Coeficiente de Pearson
* Valor entre -1 e 1 - distribuição simétrica.
* Valor maior que 1 - distribuição assimétrica positiva.
* Valor maior que -1 - distribuição assimétrica negativa.
```
# Histogram da variável MEDV (variável alvo)
fig = px.histogram(data, x="MEDV", nbins=50, opacity=0.50)
fig.show()
# Visualizando outliers na variável MEDV
import plotly.express as px
fig = px.box(data, y='MEDV')
fig.update_layout( width=800,height=800)
fig.show()
# imprimindo os 16 maiores valores de MEDV
data[['RM','LSTAT','PTRATIO','MEDV']].nlargest(16, 'MEDV')
# filtra os top 16 maiores registro da coluna MEDV
top16 = data.nlargest(16, 'MEDV').index
# remove os valores listados em top16
data.drop(top16, inplace=True)
# visualizando a distribuição da variável MEDV
import plotly.figure_factory as ff
labels = ['Distribuição da variável MEDV (número de quartos)']
fig = ff.create_distplot([data.MEDV], labels, bin_size=.2)
fig.show()
# Histogram da variável MEDV (variável alvo)
fig = px.histogram(data, x="MEDV", nbins=50, opacity=0.50)
fig.show()
# imprime o coeficiente de pearson
# o valor de inclinação..
stats.skew(data.MEDV)
```
**Definindo um Baseline**
- `Uma baseline é importante para ter marcos no projeto`.
- `Permite uma explicação fácil para todos os envolvidos`.
- `É algo que sempre tentaremos ganhar na medida do possível`.
```
# converte os dados
data.RM = data.RM.astype(int)
data.info()
# definindo a regra para categorizar os dados
categorias = []
# Se número de quartos for menor igual a 4 este será pequeno, senão se for menor que 7 será médio, senão será grande.
# alimenta a lista categorias
for i in data.RM.iteritems():
valor = (i[1])
if valor <= 4:
categorias.append('Pequeno')
elif valor < 7:
categorias.append('Medio')
else:
categorias.append('Grande')
# imprimindo categorias
categorias
# cria a coluna categorias no dataframe data
data['categorias'] = categorias
# imprime 5 linhas do dataframe
data.head()
# imprime a contagem de categorias
data.categorias.value_counts()
# agrupa as categorias e calcula as médias
medias_categorias = data.groupby(by='categorias')['MEDV'].mean()
# imprime a variável medias_categorias
medias_categorias
# criando o dicionario com chaves medio, grande e pequeno e seus valores
dic_baseline = {'Grande': medias_categorias[0], 'Medio': medias_categorias[1], 'Pequeno': medias_categorias[2]}
# imprime dicionario
dic_baseline
# cria a função retorna baseline
def retorna_baseline(num_quartos):
if num_quartos <= 4:
return dic_baseline.get('Pequeno')
elif num_quartos < 7:
return dic_baseline.get('Medio')
else:
return dic_baseline.get('Grande')
# chama a função retorna baseline
retorna_baseline(10)
# itera sobre os imoveis e imprime o valor médio pelo número de quartos.
for i in data.RM.iteritems():
n_quartos = i[1]
print('Número de quartos é: {} , Valor médio: {}'.format(n_quartos,retorna_baseline(n_quartos)))
# imprime as 5 primeiras linhas do dataframe
data.head()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf as pdf
import matplotlib.patches as pch
import eleanor_constants as EL
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
%matplotlib inline
### PLOT STARVED TRAJECTORIES
savename = "./figures/S0_starved.pdf"
df = pd.read_csv('./data/experiment_IDs/cleaned_static_data.csv')
df = df[df['starved'] == '1day']
df = df[df['dead'] == 'no']
color = EL.c_starve
odors = ["F", "FE", "Y",
"W", "I", "O",
"A", "G", "Q", "I2"]
odorkeys = ["Food", "Food extract", "Yeast RNA", "Water",
"Indole 100uM", "O-cresol",
"Amino acids", "Glucose",
"Quinine", "Indole 10mM"]
fig = plt.figure(figsize=(14, 12*4.125/3))
for i, (odor, odorkey) in enumerate(zip(odors, odorkeys)):
if odor != "":
col = 4
ax = fig.add_subplot(8, col, np.floor(i/col)*col+(i+1)+col, aspect="equal")
histax = fig.add_subplot(8, col, np.floor(i/col)*col+(i+1))
ax.set_xlim(0, 80-1)
ax.set_ylim(30-1, 0)
ax.set_xticks([])
ax.set_yticks([])
ax.spines['bottom'].set_color(EL.c_greyax)
ax.spines['top'].set_color(EL.c_greyax)
ax.spines['right'].set_color(EL.c_greyax)
ax.spines['left'].set_color(EL.c_greyax)
xlist = []
temp = df[df['treatment_odor'] == EL.treatments.get(odor)].copy()
temp['fname'] = './data/trajectories/video_calculations/' + temp['animal_ID'] + '-experiment.csv'
for n in temp["fname"].values:
temp2 = pd.read_csv(n)
x = temp2["pos_x_mm"].values
y = temp2["pos_y_mm"].values
xlist += x.tolist()
ax.plot(x, y, lw=0.75, color=EL.c_greyax, alpha=0.2)
ax.scatter([x[-1]], [y[-1]], color="k", alpha=0.5, lw=0, s=25, zorder=20)
histax.set_xlim(0, 80)
histax.set_ylim(0, 0.07)
if odor in ['Q', "I2"]:
histax.set_ylim(0, 0.18)
histax.text(40, 0.18, odorkey+', starved, n='+str(len(temp)),
ha='center', va='bottom', clip_on=False)
else:
histax.text(40, 0.07, odorkey+', starved, n='+str(len(temp)),
ha='center', va='bottom', clip_on=False)
histax.hist(xlist, bins=80, color=color, density=True, lw=0, clip_on=False)
histax.set_xlabel("Arena location (mm, 0-80)")
histax.set_ylabel("Probability Density (%)")
histax.spines['top'].set_visible(False)
histax.spines['right'].set_visible(False)
histax.spines['left'].set_color(EL.c_greyax)
histax.spines['bottom'].set_color(EL.c_greyax)
# SET BOUNDARIES AND SAVE FIGURE -----------------------------------------------
plt.tight_layout()
fig.subplots_adjust(wspace=0.4)
pp = pdf.PdfPages(savename, keep_empty=False)
pp.savefig(fig)
pp.close()
plt.show()
### PLOT FED TRAJECTORIES
savename = "./figures/S0_fed.pdf"
df = pd.read_csv('./data/experiment_IDs/cleaned_static_data.csv')
df = df[df['starved'] == 'no']
df = df[df['dead'] == 'no']
color = EL.c_fed
odors = ["F", "FE", "Y",
"W", "I", "O",
"A", "G", "Q"]
odorkeys = ["Food", "Food extract", "Yeast RNA", "Water",
"Indole", "O-cresol",
"Amino acids", "Glucose", "Quinine"]
fig = plt.figure(figsize=(14, 12))
for i, (odor, odorkey) in enumerate(zip(odors, odorkeys)):
ax = fig.add_subplot(6, 3, np.floor(i/3)*3+(i+1)+3, aspect="equal")
histax = fig.add_subplot(6, 3, np.floor(i/3)*3+(i+1))
ax.set_xlim(0, 80-1)
ax.set_ylim(30-1, 0)
ax.set_xticks([])
ax.set_yticks([])
ax.spines['bottom'].set_color(EL.c_greyax)
ax.spines['top'].set_color(EL.c_greyax)
ax.spines['right'].set_color(EL.c_greyax)
ax.spines['left'].set_color(EL.c_greyax)
xlist = []
temp = df[df['treatment_odor'] == EL.treatments.get(odor)].copy()
temp['fname'] = './data/trajectories/video_calculations/' + temp['animal_ID'] + '-experiment.csv'
for n in temp["fname"].values:
temp2 = pd.read_csv(n)
x = temp2["pos_x_mm"].values
y = temp2["pos_y_mm"].values
xlist += x.tolist()
ax.plot(x, y, lw=0.75, color=EL.c_greyax, alpha=0.2)
ax.scatter([x[-1]], [y[-1]], color="k", alpha=0.5, lw=0, s=25, zorder=20)
histax.set_xlim(0, 80)
histax.set_ylim(0, 0.1)
if odor == "Q":
histax.set_ylim(0, 0.14)
histax.set_yticks([0, 0.07, 0.14])
histax.text(40, 0.14, odorkey+', fed, n='+str(len(temp)),
ha='center', va='bottom', clip_on=False)
else:
histax.text(40, 0.1, odorkey+', fed, n='+str(len(temp)),
ha='center', va='bottom', clip_on=False)
histax.hist(xlist, bins=80, color=color, density=True, lw=0)
histax.set_xlabel("Arena location (mm, 0-80)")
histax.set_ylabel("Probability Density (%)")
histax.spines['top'].set_visible(False)
histax.spines['right'].set_visible(False)
histax.spines['left'].set_color(EL.c_greyax)
histax.spines['bottom'].set_color(EL.c_greyax)
# SET BOUNDARIES AND SAVE FIGURE -----------------------------------------------
plt.tight_layout()
fig.subplots_adjust(wspace=0.2)
pp = pdf.PdfPages(savename, keep_empty=False)
pp.savefig(fig)
pp.close()
plt.show()
```
| github_jupyter |
# Intro to scikit-learn, SVMs and decision trees
<hr style="clear:both">
This notebook is part of a series of exercises for the CIVIL-226 Introduction to Machine Learning for Engineers course at EPFL. Copyright (c) 2021 [VITA](https://www.epfl.ch/labs/vita/) lab at EPFL
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file or at https://www.opensource.org/licenses/MIT
**Author(s):** [David Mizrahi](mailto:david.mizrahi@epfl.ch)
<hr style="clear:both">
This is the final exercise of this course. In this exercise, we'll introduce the scikit-learn package, and use it to train SVMs and decision trees. We'll end with a small note on how to use scikit-learn for unsupervised learning.
## 1. Intro to scikit-learn
[scikit-learn](https://scikit-learn.org/stable/index.html) is a very popular Python package, built on top of NumPy, which provides efficient implementations of many popular machine learning algorithms.
It can be used for:
- Generating and loading popular datasets
- Preprocessing (feature extraction and expansion, normalization)
- Supervised learning (classification and regression)
- Unsupervised learning (clustering and dimensionality reduction)
- Model selection (grid search, train/test split, cross-validation)
- Evaluation (with many metrics for all kinds of tasks)
### 1.1. Data representation in scikit-learn
In scikit-learn, data is represented in the same way it was in the previous exercises. That is:
- The features are represented as a 2D features matrix (usually named `X`), most often contained in a NumPy array or Pandas DataFrame.
- The label (or target) array is often called `y`, and is usually contained in a NumPy array or Pandas Series.
In mathematical notation, this is:
- features: $\boldsymbol{X} \in \mathbb{R}^{N \times D}$, $\forall \ \boldsymbol{x}^{(i)} \in \boldsymbol{X}: \boldsymbol{x}^{(i)} \in \mathbb{R}^{D}$
- label (or target): $\boldsymbol{y} \in \mathbb{R}^{N}$
where $N$ is the number of examples in our dataset, and $D$ is the number of features per example
scikit-learn offers many utilities for splitting and preprocessing data.
- For splitting data, there are functions such as [`model_selection.train_test_split()`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html#sklearn.model_selection.train_test_split) which splits arrays or matrices into random train and test subsets, or [`model_selection.KFold()`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html#sklearn.model_selection.KFold) and similar functions which provides train/test indices for cross-validation. These functions are extremely handy, and are often used to split NumPy or Pandas arrays even when the training and models come from a library other than scikit-learn.
- For preprocessing data, scikit-learn offers many utility functions which can standardize data (e.g. [`preprocessing.StandardScaler()`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler)), impute, discretize and perform feature expansion. For more information, refer to the [official preprocessing tutorial](https://scikit-learn.org/stable/modules/preprocessing.html#).
### 1.2. Estimator API
For **supervised learning**, scikit-learn implements many algorithms we've seen in this class such as:
- Nearest neighbors
- Linear regression
- Logistic regression
- Support vector machines
- Decision trees
- Ensembles (such as random forests)
In scikit-learn, these algorithms are called **estimators**, and they use a clean, uniform and streamlined API, which makes it very easy to switch to a new model or algorithm.
Here is an example of many of the estimators available with scikit-learn. [Source](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html)

Here are the steps to follow when using the scikit-learn estimator API:
1. Arrange data into a features matrix (`X`) and target vector (`y`).
2. Choose a class of model by importing the appropriate estimator class (e.g. `linear_model.LogisticRegression()`, `svm.SVC()`, etc...)
3. Choose model hyperparameters by instantiating this class with desired values.
4. Fit the model to your data by calling the `fit()` method of the model instance.
5. Apply the model to new data: for supervised learning, we predict labels for unknown data using the `predict()` method.
The steps to follow when using scikit-learn estimators for unsupervised learning are almost identical.
### 1.3. Example: Logistic regression on the Iris dataset
As an example, we'll walk through how to use scikit-learn to train a logistic regression model for multi-class classification on the Iris dataset.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="white", context="notebook", palette="dark")
# !!! sklearn is how the scikit-learn package is called in Python
import sklearn
```
#### 1.3.1. Loading the dataset
```
from sklearn import datasets
# Iris is a toy dataset , which is directly available in sklearn.datasets
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features for simpler visualizations
y = iris.target
print(f"Type of X: {type(X)} | Shape of X: {X.shape}")
print(f"Type of y: {type(y)} | Shape of y: {y.shape}")
```
#### 1.3.2. Splitting and scaling
```
from sklearn.model_selection import train_test_split
# Split data using train_test_split, use 30% of the data as a test set and set a random state for reproducibility
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
print(f"Shape of X_train: {X_train.shape} | Shape of y_train: {y_train.shape}")
print(f"Shape of X_test: {X_test.shape} | Shape of y_test: {y_test.shape}")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Fit with the mean / std of the training data
scaler.fit(X_train)
# Scale both the training / test data
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print(f"Mean of X_train: {X_train.mean():.3f}| Std of X_train: {X_train.std():.3f}")
print(f"Mean of X_test: {X_test.mean():.3f}| Std of X_test: {X_test.std():.3f}")
```
#### 1.3.3. Training
```
from sklearn.linear_model import LogisticRegression
# Initialize a logistic regression model with L2 regularization
# and regularization strength 1e-4 (as C is inverse of regularization strength)
logreg = LogisticRegression(penalty="l2", C=1e4)
# Train the model
logreg.fit(X_train, y_train)
# Get train accuracy
train_acc = logreg.score(X_train, y_train)
print(f"Train accuracy: {train_acc * 100:.2f}%")
```
#### 1.3.4. Decision boundaries
We can use matplotlib to view the decision boundaries of our trained model.
```
# This code is beyond the scope of this class, no need to understand what it does.
# Source: https://scikit-learn.org/stable/auto_examples/linear_model/plot_iris_logistic.html
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() + .5
y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() + .5
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired, shading='auto', alpha=0.1, antialiased=True)
# Plot also the training points
scatter = plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.legend(handles=scatter.legend_elements()[0], labels=list(iris.target_names))
plt.show()
```
#### 1.3.5. Test accuracy
```
# Get test accuracy
test_acc = logreg.score(X_test, y_test)
print(f"Test accuracy: {test_acc * 100:.2f}%")
```
#### 1.3.6. Other metrics
```
# We can easily use other metrics using sklearn.metrics
from sklearn.metrics import balanced_accuracy_score
# First we'll use the balanced accuracy
y_pred_train = logreg.predict(X_train)
train_balanced_acc = balanced_accuracy_score(y_train, y_pred_train)
y_pred_test = logreg.predict(X_test)
test_balanced_acc = balanced_accuracy_score(y_test, y_pred_test)
print(f"Train balanced acc: {train_balanced_acc*100:.2f}%")
print(f"Test balanced acc: {test_balanced_acc*100:.2f}%")
from sklearn.metrics import plot_confusion_matrix
# Now we'll plot the confusion matrix of the testing data
plot_confusion_matrix(logreg, X_test, y_test, display_labels=iris.target_names, cmap=plt.cm.Blues)
plt.show()
```
### 1.4. Additional scikit-learn resources
This tutorial very briefly covers the scikit-learn package, and how it can be used to train a simple classifier. This package is capable of a lot more than what was shown here, as you will see in the rest of this exercise. If you want a more in-depth look at scikit-learn, take a look at these resources:
- scikit-learn Getting Started tutorial: https://scikit-learn.org/stable/getting_started.html
- scikit-learn User Guide: https://scikit-learn.org/stable/user_guide.html
- scikit-learn cheatsheet by Datacamp: https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Scikit_Learn_Cheat_Sheet_Python.pdf
- scikit-learn tutorial from the Python Data Science Handbook: https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.html
## 2. Support Vector Machines
In class, we have covered the theory behind SVMs, and how they can be used to perform non-linear classification using the "kernel trick". In this exercise, you'll see how SVMs can easily be trained with scikit-learn, and how the choice of kernel can impact the performance on a non-linearly separable dataset.
### 2.1. Linear SVM
First we'll show how to train a simple SVM classifier.
In scikit-learn, the corresponding estimator is called `SVC` (Support Vector Classifier).
In this part, we'll use a toy dataset which is linearly separable, generated using the `datasets.make_blobs()` function.
```
from helpers import plot_svc_decision_function
from sklearn.datasets import make_blobs
# Generate a linearly separable dataset
X, y = make_blobs(n_samples=150, centers=2, random_state=0, cluster_std=0.70)
# Split into train / test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Plot training and test data (color is for classes, shape is for train / test)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=50, marker='o', cmap="viridis", label="train")
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=50, marker='^', cmap="viridis", label="test")
plt.legend()
plt.show()
```
For this part, we'll train a SVM with a linear kernel. This corresponds to the basic SVM model that you've seen in class.
When initializing an instance of the SVC class, you can specify a regularization parameter C, and the strength of regularization is inversely proportional to C. That is, a high value of C leads to low regularization and a low C leads to high regularization.
Try changing the value of C. How does it affect the support vectors?
**Answer:**
YOUR ANSWER HERE
```
from sklearn.svm import SVC # SVC = Support vector classifier
# C is the regularization parameter. The strength of regularization is inversely proportional to C.
# Try very large and very small values of C
model = SVC(kernel='linear', C=1)
model.fit(X_train, y_train)
# Print training accuracy
train_acc = model.score(X_train, y_train)
print(f"Train accuracy: {train_acc * 100:.2f}%")
# Show decision function and support vectors
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=50, cmap="viridis")
plt.title(f"Kernel = {model.kernel} | C = {model.C}")
plot_svc_decision_function(model, plot_support=True)
# Print test accuracy
test_acc = model.score(X_test, y_test)
print(f"Test accuracy: {test_acc * 100:.2f}%")
```
### 2.2. Kernel SVM
Let's now use a non-linearly separable dataset, to observe the effect of the kernel function in SVMs.
```
from sklearn.datasets import make_circles
# Generate a circular dataset
X, y = make_circles(n_samples=400, noise=0.25, factor=0, random_state=0)
# Split into train / test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
# Plot training and test data (color is for classes, shape is for train / test)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, marker='o', cmap="viridis", label="train")
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, marker='^', cmap="viridis", label="test")
plt.legend()
plt.show()
```
As you've seen in class, we can use kernel functions to allow SVMs to operate in high-dimensional, implicit feature spaces, without needing to compute the coordinates of the data in that space. We have seen a variety of kernel functions, such as the polynomial kernel and the RBF kernel.
In this exercise, experiment with the different kernels, such as:
- the linear kernel (`linear`): $\langle x, x'\rangle$
- the polynomial kernel (`poly`): $(\gamma \langle x, x'\rangle + r)^d$ (try out different degrees)
- the radial basis function kernel (`rbf`): $\exp(-\gamma \|x-x'\|^2)$
Your task is to experiment with these kernels to see which one does the best on this dataset.
How does the kernel affect the decision boundary? Which kernel and value of C would you pick to maximize your model's performance?
**Note:** Use the the helper function `plot_svc_decision_function()` to view the decision boundaries for each model.
```
# Use as many code cells as needed to try out different kernels and values of C
### YOUR CODE HERE ###
```
**Answer:**
YOUR ANSWER HERE
**To go further**: To learn more about SVMs in scikit-learn, and how to use them for multi-class classification and regression, check out the documentation page: https://scikit-learn.org/stable/modules/svm.html
## 3. Trees
Decision trees are a very intuitive way to classify objects: they ask a series of questions to infer the target variable.
A decision tree is a set of nested decision rules. At each node $i$, the $d_i$-th feature of the input vector $ \boldsymbol{x}$ is compared to a treshold value $t$. The vector $\boldsymbol{x}$ is passed down to the left or right branch depending on whether $d_i$ is less than or greater than $t$. This process is repeated for each node encountered until a reaching leaf node, which specifies the predicted output.
<img src="images/simple_tree.png" width=400></img>
*Example of a simple decision tree on the Palmer Penguins dataset*
Decision trees are usually constructed from the top-down, by choosing a feature at each step that best splits the set of items. There are different metrics for measuring the "best" feature to pick, such as the Gini impurity and the entropy / information gain. We won't dive into them here, but we recommend reading Chapter 18 of ["Probabilistic Machine Learning: An Introduction"](https://probml.github.io/pml-book/) by K.P. Murphy if you want to learn more about them.
Decision trees are popular for several reasons:
- They are **easy to interpret**.
- They can handle mixed discrete and continuous inputs.
- They are insensitive to monotone transformations of the inputs, so there is no need to standardize the data.
- They perform automatic feature selection.
- They are fast to fit, and scale well to large data sets.
Unfortunately, trees usually do not predict as accurately as other models we have seen previously, such as neural networks and SVMs.
It is however possible to significantly improve their performance through an ensemble learning method called **random forests**, which consists of constructing a multitude of decision trees at training time and averaging their outputs at test time. While random forests usually perform better than a single decision tree, they are much less interpretable. We won't cover random forests in this exercise, but keep in mind that they can be easily implemented in scikit-learn using the [`ensemble` module](https://scikit-learn.org/stable/modules/ensemble.html).
### 3.1. Training decision trees
In this part, we will work on the Titanic dataset obtained at the end of the `05-pandas` tutorial. Our goal is to train a model to predict whether or not a passenger survived the shipwreck and to find out which features are the most useful for predicting this.
```
import pandas as pd
titanic = pd.read_csv("data/titanic.csv")
titanic.head(5)
# Split into X and y
X = titanic.drop(columns="survived")
y = titanic["survived"]
# Convert to NumPy (needed for interpretability function later on)
X_numpy, y_numpy = X.to_numpy(), y.to_numpy()
# Use 80% of data for train/val, 20% for test
X_trainval, X_test, y_trainval, y_test = train_test_split(X_numpy, y_numpy, test_size=0.2, random_state=42)
# Use 80% of trainval for train, 20% for val
X_train, X_val, y_train, y_val = train_test_split(X_trainval, y_trainval, test_size=0.2, random_state=42)
```
It is now your turn to train decision trees in scikit-learn. They follow the same estimator API as all other supervised learning models, so the implementation is very straightforward. For more information, check out the [`tree` module](https://scikit-learn.org/stable/modules/tree.html#tree).
**Your task:** Initialize a `DecisionTreeClassifier` and train it on `X_train` and `y_train`.
- Use "entropy" as the `criterion`
- Try out different values for the max tree depth. How does it affect the train and validation accuracy?
```
import sklearn.tree as tree
from sklearn.tree import DecisionTreeClassifier
# Use the entropy (information gain) as the criterion
# Try varying the max depth
### YOUR CODE HERE ###
model = ...
### END CODE HERE ###
train_acc = model.score(X_train, y_train)
print(f"Train accuracy: {train_acc * 100:.2f}%")
val_acc = model.score(X_val, y_val)
print(f"Validation accuracy: {val_acc * 100:.2f}%")
test_acc = model.score(X_test, y_test)
print(f"Test accuracy: {test_acc * 100:.2f}%")
```
**Answer:** YOUR ANSWER HERE
### 3.2. Interpretability of trees
In this section, we'll show you how to visualize decision trees and interpret the decision made for some examples of our test set.
**Your task:** Run the next few cells to better understand the structure of the tree you just built. Can you identify which features are the most important for predicting whether or not a passenger survived?
```
# Plots the decision tree
# Try out a max plot depth of 2 or 3, tree will be hard to read otherwise
plt.figure(figsize=(30, 10))
tree.plot_tree(model, max_depth=2, filled=True, feature_names=X.columns, class_names=["Perished", "Survived"],
impurity=False, proportion=True, rounded=True)
plt.show()
# Prints the decision tree as text
# Will be very long if max depth is high
# Class 0 = Perished, Class 1 = Survived
print(tree.export_text(model, feature_names=list(X.columns)))
# Explaining the decisions (complicated code, no need to understand what it does exactly)
def explain_decision(sample_id: int = 0):
"""Prints rules followed to obtain prediction for a sample of the test set
Code adapted from:
https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html
"""
sample_id = sample_id
class_names=["Perished", "Survived"]
n_nodes = model.tree_.node_count
children_left = model.tree_.children_left
children_right = model.tree_.children_right
feature = model.tree_.feature
threshold = model.tree_.threshold
node_indicator = model.decision_path(X_test)
leaf_id = model.apply(X_test)
# obtain ids of the nodes `sample_id` goes through, i.e., row `sample_id`
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
prediction = class_names[model.predict(X_test[sample_id:sample_id+1])[0]]
print(f"Prediction for sample {sample_id}: {prediction}\n")
print("Rules used:")
for node_id in node_index:
# continue to the next node if it is a leaf node
if leaf_id[sample_id] == node_id:
continue
# check if value of the split feature for sample 0 is below threshold
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("- node {node}: ({feature} = {value}) "
"{inequality} {threshold}".format(
node=node_id,
feature=X.columns[feature[node_id]],
value=X_test[sample_id, feature[node_id]],
inequality=threshold_sign,
threshold=threshold[node_id]))
# For binary variables, 1 = True, 0 = False
# e.g. sex_male = 1 -> male, sex_male = 0 -> female
# Many of the features are redundant (e.g. sex_male and sex_female)
# so the tree doesn't always choose the same features
explain_decision(sample_id=0)
explain_decision(sample_id=1)
```
**Answer:**
YOUR ANSWER HERE
**To go further:** Decision trees and random forests can also be used for regression, check out the scikit-learn pages on [trees](https://scikit-learn.org/stable/modules/tree.html#tree) and [ensemble methods](https://scikit-learn.org/stable/modules/ensemble.html#ensemble) for more info.
## 4. A small note on unsupervised learning
While we won't cover them in this exercise, most of the unsupervised learning techniques seen in class can be easily implemented with scikit-learn.
As an example, here is how to use the k-means clustering algorithm on a toy dataset consisting of 7 unlabeled blobs of points.
When choosing $k=7$, k-means manages to almost perfectly recover the original blobs.
```
from sklearn.cluster import KMeans
# Generate unlabeled data
X_blobs, _ = make_blobs(n_samples=200, centers=7, random_state=0, cluster_std=0.60)
plt.scatter(X_blobs[:, 0], X_blobs[:, 1], c="grey", alpha=0.5)
plt.title("Unlabeled data")
plt.show()
# Run k-means on data to find the blobs
# Try changing the value of k
k = 7
kmeans = KMeans(n_clusters=k)
labels = kmeans.fit_predict(X_blobs)
# Display clusters and their centers
plt.scatter(X_blobs[:,0], X_blobs[:,1], c=labels, cmap="viridis", alpha=0.5)
for c in kmeans.cluster_centers_:
plt.scatter(c[0], c[1], marker="*", s=80, color="blue")
plt.title(f"K-Means with {k} clusters")
plt.show()
```
To learn more about how to practically implement these techniques, check out these resources:
**For dimensionality reduction:**
- [PCA from the Python Data Science handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.09-principal-component-analysis.html)
- [Manifold learning from the Python Data Science handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.10-manifold-learning.html)
- [Decomposition page on scikit-learn's website](https://scikit-learn.org/stable/modules/decomposition.html)
- [Manifold learning page on scikit-learn's website](https://scikit-learn.org/stable/modules/manifold.html)
**For clustering:**
- [k-means from the Python Data Science handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.11-k-means.html)
- [Gaussian mixtures from the Python Data Science handbook](https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html)
- [Clustering page on scikit-learn's website](https://scikit-learn.org/stable/modules/clustering.html)
## Congratulations!
Congratulations on completing this final exercise!
Throughout this series of exercises, you learned about the fundamental tools and libraries used in machine learning, and worked on practical implementations of many of the most commonly used techniques in this field.
As long as these exercises have been, they are still too short to cover several other interesting and important machine learning topics, but we believe you now have all the tools at your disposal to learn about them on your own, if you desire to do so.
Thank you for sticking with us through the end, we really hope you enjoyed the exercises in this course!
<img src="images/thats_all_folks.png" width=400></img>
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
%matplotlib inline
thirteen_genre_df = pd.read_pickle('/content/13_genre.pkl')
thirteen_genre_df.head()
thirteen_genre_df = thirteen_genre_df.set_index('Genre_first')
thirteen_genre_df
thirteen_genre_list = sorted(list(set([x for x in thirteen_genre_df.index.to_numpy()])))
thirteen_genre_list
```
## Collecting all the description of specific genre
```
thr_g = thirteen_genre_df.groupby('Genre_first')['Description']
Action_gf = thr_g.get_group('Action')
Action_gf = Action_gf.str.cat(sep='.')
Action_gf
# thr_g = thirteen_genre_df.groupby('Genre_first')['Description']
Adventure_gf = thr_g.get_group('Adventure')
Adventure_gf = Adventure_gf.str.cat(sep='.')
Adventure_gf
Animation_gf = thr_g.get_group('Animation')
Animation_gf = Animation_gf.str.cat(sep='.')
Animation_gf
Biography_gf = thr_g.get_group('Biography')
Biography_gf = Biography_gf.str.cat(sep='.')
Biography_gf
Comedy_gf = thr_g.get_group('Comedy')
Comedy_gf = Comedy_gf.str.cat(sep='.')
Comedy_gf
Crime_gf = thr_g.get_group('Crime')
Crime_gf = Crime_gf.str.cat(sep='.')
Crime_gf
Drama_gf = thr_g.get_group('Drama')
Drama_gf = Drama_gf.str.cat(sep='.')
Drama_gf
Fantasy_gf = thr_g.get_group('Fantasy')
Fantasy_gf = Fantasy_gf.str.cat(sep='.')
Fantasy_gf
Horror_gf = thr_g.get_group('Horror')
Horror_gf = Horror_gf.str.cat(sep='.')
Horror_gf
Mystery_gf = thr_g.get_group('Mystery')
Mystery_gf = Mystery_gf.str.cat(sep='.')
Mystery_gf
Romance_gf = thr_g.get_group('Romance')
Romance_gf = Romance_gf.str.cat(sep='.')
Romance_gf
Sci_Fi_gf = thr_g.get_group('Sci-Fi')
Sci_Fi_gf = Sci_Fi_gf.str.cat(sep='.')
Sci_Fi_gf
Thriller_gf = thr_g.get_group('Thriller')
Thriller_gf = Thriller_gf.str.cat(sep='.')
Thriller_gf
thirteen_genre_list
```
## Creating dictionary out of the series generated
```
thirteen_genre_dict = dict(zip(thirteen_genre_list,[Action_gf,Adventure_gf,Animation_gf,Biography_gf,
Comedy_gf,Crime_gf,Drama_gf,Fantasy_gf,Horror_gf,
Mystery_gf,Romance_gf,Sci_Fi_gf,Thriller_gf]))
(thirteen_genre_dict)
```
## Generating dataframe out of the dictionary
```
only_thirteen_genre = pd.DataFrame.from_dict(thirteen_genre_dict,orient='index',columns=['Description'])
only_thirteen_genre.head()
```
## Finding top common words from all genres.
```
from collections import Counter
# genre_list
# common_words_list = []
for i in only_thirteen_genre['Description']:
common_words_list = [word for word, count in Counter(i.split()).most_common() if count >1]
common_words_list
from textblob import TextBlob
only_thirteen_genre['polarity'] = only_thirteen_genre['Description'].apply(lambda x : TextBlob(x).sentiment.polarity)
only_thirteen_genre['subjectivity'] = only_thirteen_genre['Description'].apply(lambda x : TextBlob(x).sentiment.subjectivity)
only_thirteen_genre
plt.figure(figsize=(12,9))
for indexes , genre in enumerate(thirteen_genre_list):
x = only_thirteen_genre.polarity[indexes]
y = only_thirteen_genre.subjectivity[indexes]
plt.scatter(x,y,color='blue')
plt.text(x+0.001,y+0.001,genre)
plt.title("Sentiment Anlaysis")
plt.xlabel("<-- Negative -------- Positive -->")
plt.ylabel('<-- Facts -------- Opinions -->')
from wordcloud import WordCloud
from sklearn.feature_extraction import text
stop_words = text.ENGLISH_STOP_WORDS.union(common_words_list)
wc = WordCloud(stopwords=stop_words, background_color='white',random_state=42,max_font_size=150,colormap='Dark2')
plt.figure(figsize=(12,9))
for indexes , genre in enumerate(only_thirteen_genre['Description']):
wc.generate(genre)
plt.subplot(5,3,indexes+1)
plt.imshow(wc,interpolation='bilinear')
plt.axis("off")
plt.title(thirteen_genre_list[indexes])
```
| github_jupyter |
# NIRCam F444W Coronagraphic Observations of Vega
---
Here we create the basics for a MIRI simulation to observe the Fomalhaut system with the FQPM 1550. This includes simulating the Fomalhaut stellar source behind the center of the phase mask, some fake off-axis companions, and a debris disk model that crosses the mask's quadrant boundaries.
From JWST PID 1193, the pointing file provides some the relevant information. Here's a truncated version of the pointing file for the first roll position. In this example, we skip the target acquisition observations and only simulate the science exposure image.
```
* Fomalhaut-1550C-Rot1 (Obs 6)
** Visit 6:1
Aperture Name Target RA Dec V2 V3 IdlX IdlY Level Type
MIRIM_TABLOCK 1 VEGA +279.23474 +38.78369 -407.464 -387.100 +0.000 +0.000 TARGET T_ACQ
MIRIM_TA1550_UR 1 VEGA +279.23474 +38.78369 -395.471 -365.842 +0.000 +0.000 TARGET T_ACQ
MIRIM_TA1550_CUR 1 VEGA +279.23474 +38.78369 -391.430 -370.519 +0.122 +0.134 TARGET T_ACQ
MIRIM_MASK1550 1 VEGA +279.23474 +38.78369 -389.892 -372.181 +0.000 +0.000 TARGET SCIENCE
```
Final outputs will be detector-sampled slope images (counts/sec).
```
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#import matplotlib.patches as mpatches
# Enable inline plotting
%matplotlib inline
# Progress bar
from tqdm.auto import trange, tqdm
import webbpsf_ext, pysiaf
from astropy.io import fits
from webbpsf_ext import image_manip, setup_logging, spectra, coords
```
# NIRCam Observation with MASK430R+F444W
## 1. Create PSF structure
```
# Mask information
filt = 'F444W'
mask = 'MASK430R'
pupil = 'CIRCLYOT'
# Initiate instrument class with selected filters, pupil mask, and image mask
inst = webbpsf_ext.NIRCam_ext(filter=filt, pupil_mask=pupil, image_mask=mask)
# Set desired PSF size and oversampling
inst.fov_pix = 256
inst.oversample = 2
# Calculate PSF coefficients
inst.npsf = 10
inst.gen_psf_coeff()
# Calculate position-dependent PSFs due to FQPM
# Equivalent to generating a giant library to interpolate over
inst.gen_wfemask_coeff()
```
## 1.1 Observation setup
__Configuring observation settings__
Observations consist of nested visit, mosaic tiles, exposures, and dithers. In this section, we configure a pointing class that houses information for a single observation defined in the APT .pointing file. The primary information includes a pointing reference SIAF aperturne name, RA and Dec of the ref aperture, Base X/Y offset relative to the ref aperture position, and Dith X/Y offsets. From this information, along with the V2/V3 position angle, we can determine the orientation and location of objects on the detector focal plane.
**Note**: The reference aperture is not necessarily the same as the observed aperture. For instance, you may observe simultaneously with four of NIRCam's SWA detectors, so the reference aperture would be the entire SWA channel, while the observed apertures are A1, A2, A3, and A4.
```
# Import class to setup pointing info
from webbpsf_ext.coords import jwst_point
# Observed and reference apertures
ap_obs = 'NRCA5_MASK430R'
ap_ref = 'NRCA5_MASK430R'
# Define the RA/Dec of reference aperture and telescope position angle
# Position angle is angle of V3 axis rotated towards East
ra_ref, dec_ref = (+279.23474, +38.78369)
pos_ang = 0
# Set any baseline pointing offsets (e.g., specified in APT's Special Requirements)
base_offset=(0,0)
# Define a list of nominal dither offsets
dith_offsets = [(0,0)]
# Telescope pointing information
tel_point = jwst_point(ap_obs, ap_ref, ra_ref, dec_ref, pos_ang=pos_ang,
base_offset=base_offset, dith_offsets=dith_offsets,
base_std=0, dith_std=0)
print(f"Reference aperture: {tel_point.siaf_ap_ref.AperName}")
print(f" Nominal RA, Dec = ({tel_point.ra_ref:.6f}, {tel_point.dec_ref:.6f})")
print(f"Observed aperture: {tel_point.siaf_ap_obs.AperName}")
print(f" Nominal RA, Dec = ({tel_point.ra_obs:.6f}, {tel_point.dec_obs:.6f})")
print(f"Relative offsets in 'idl' for each dither position (incl. pointing errors)")
for i, offset in enumerate(tel_point.position_offsets_act):
print(f" Position {i}: ({offset[0]:.4f}, {offset[1]:.4f}) arcsec")
```
## 1.2 Add central source
Here we define the stellar atmosphere parameters for Fomalhaut, including spectral type, optional values for (Teff, log_g, metallicity), normalization flux and bandpass, as well as RA and Dec.
```
from webbpsf_ext import miri_filter, nircam_filter, bp_2mass
from webbpsf_ext.image_manip import pad_or_cut_to_size
def make_spec(name=None, sptype=None, flux=None, flux_units=None, bp_ref=None, **kwargs):
"""
Create pysynphot stellar spectrum from input dictionary properties.
"""
from webbpsf_ext import stellar_spectrum
# Renormalization arguments
renorm_args = (flux, flux_units, bp_ref)
# Create spectrum
sp = stellar_spectrum(sptype, *renorm_args, **kwargs)
if name is not None:
sp.name = name
return sp
# Information necessary to create pysynphot spectrum of star
obj_params = {
'name': 'Vega',
'sptype': 'A0V',
'Teff': 9602, 'log_g': 4.1, 'metallicity': -0.5,
'dist': 7.7,
'flux': 16.09, 'flux_units': 'Jy', 'bp_ref': miri_filter('F1550C'),
'RA_obj' : +279.23474, # RA (decimal deg) of source
'Dec_obj' : +38.78369, # Dec (decimal deg) of source
}
# Create stellar spectrum and add to dictionary
sp_star = make_spec(**obj_params)
obj_params['sp'] = sp_star
# Get `sci` coord positions
coord_obj = (obj_params['RA_obj'], obj_params['Dec_obj'])
xsci, ysci = tel_point.radec_to_frame(coord_obj, frame_out='sci')
# Get sci position shifts from center in units of detector pixels
siaf_ap = tel_point.siaf_ap_obs
xsci_cen, ysci_cen = siaf_ap.reference_point('sci')
xsci_off, ysci_off = (xsci-xsci_cen, ysci-ysci_cen)
# and now oversampled pixel shifts
osamp = inst.oversample
xsci_off_over = xsci_off * osamp
ysci_off_over = ysci_off * osamp
print("Image shifts (oversampled pixels):", xsci_off_over, ysci_off_over)
# Create PSF
# PSFs already includes geometric distortions based on SIAF info
sp = obj_params['sp']
xtel, ytel = siaf_ap.convert(xsci, ysci, 'sci', 'tel')
hdul = inst.calc_psf_from_coeff(sp=sp, coord_vals=(xtel,ytel), coord_frame='tel')
# Expand PSF to full frame and offset to proper position
ny_pix, nx_pix = (siaf_ap.YSciSize, siaf_ap.XSciSize)
ny_pix_over, nx_pix_over = np.array([ny_pix, nx_pix]) * osamp
shape_new = (ny_pix*osamp, nx_pix*osamp)
delyx = (ysci_off_over, xsci_off_over)
image_full = pad_or_cut_to_size(hdul[0].data, shape_new, offset_vals=delyx)
# Make new HDUList of target (just central source so far)
hdul_full = fits.HDUList(fits.PrimaryHDU(data=image_full, header=hdul[0].header))
fig, ax = plt.subplots(1,1)
extent = 0.5 * np.array([-1,1,-1,1]) * inst.fov_pix * inst.pixelscale
ax.imshow(hdul_full[0].data, extent=extent)
ax.set_xlabel('Arcsec')
ax.set_ylabel('Arcsec')
ax.tick_params(axis='both', color='white', which='both')
for k in ax.spines.keys():
ax.spines[k].set_color('white')
ax.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
fig.tight_layout()
```
## 1.3 Convolve extended disk image
Properly including extended objects is a little more complicated than for point sources. First, we need properly format the input model to a pixel binning and flux units appropriate for the simulations (ie., pixels should be equal to oversampled PSFs with flux units of counts/sec). Then, the image needs to be rotated relative to the 'idl' coordinate plane and subsequently shifted for any pointing offsets. Once in the appropriate 'idl' system
### 1.3.1 PSF Grid
```
# Create grid locations for array of PSFs to generate
field_rot = 0 if inst._rotation is None else inst._rotation
rvals = 10**(np.linspace(-2,1,7))
thvals = np.linspace(0, 360, 4, endpoint=False)
rvals_all = [0]
thvals_all = [0]
for r in rvals:
for th in thvals:
rvals_all.append(r)
thvals_all.append(th)
rvals_all = np.array(rvals_all)
thvals_all = np.array(thvals_all)
xgrid_off, ygrid_off = coords.rtheta_to_xy(rvals_all, thvals_all)
# xyoff_half = 10**(np.linspace(-2,1,5))
# xoff = yoff = np.concatenate([-1*xyoff_half[::-1],[0],xyoff_half])
# # Mask Offset grid positions in arcsec
# xgrid_off, ygrid_off = np.meshgrid(xoff, yoff)
# xgrid_off, ygrid_off = xgrid_off.flatten(), ygrid_off.flatten()
# Science positions in detector pixels
xoff_sci_asec, yoff_sci_asec = coords.xy_rot(-1*xgrid_off, -1*ygrid_off, -1*field_rot)
xsci = xoff_sci_asec / siaf_ap.XSciScale + siaf_ap.XSciRef
ysci = yoff_sci_asec / siaf_ap.YSciScale + siaf_ap.YSciRef
xtel, ytel = siaf_ap.convert(xsci, ysci, 'sci', 'tel')
plt.plot(xtel, ytel, marker='o', ls='none', alpha=0.5)
%%time
# Now, create all PSFs, one for each (xsci, ysci) location
# Only need to do this once. Can be used for multiple dither positions.
hdul_psfs = inst.calc_psf_from_coeff(coord_vals=(xtel, ytel), coord_frame='tel', return_oversample=True)
```
### 1.3.2 Disk Model Image
```
# Disk model information
disk_params = {
'file': "Vega/Vega_F444Wsccomb.fits",
'pixscale': inst.pixelscale,
'wavelength': 4.4,
'units': 'Jy/pixel',
'dist' : 7.7,
'cen_star' : False,
}
# Open model and rebin to PSF sampling
# Scale to instrument wavelength assuming grey scattering function
# Converts to phot/sec/lambda
hdul_disk_model = image_manip.make_disk_image(inst, disk_params, sp_star=obj_params['sp'])
# Rotation necessary to go from sky coordinates to 'idl' frame
rotate_to_idl = -1*(tel_point.siaf_ap_obs.V3IdlYAngle + tel_point.pos_ang)
```
### 1.3.3 Dither Position
```
# Select the first dither location offset
delx, dely = tel_point.position_offsets_act[0]
hdul_out = image_manip.rotate_shift_image(hdul_disk_model, PA_offset=rotate_to_idl,
delx_asec=delx, dely_asec=dely)
sci_cen = (siaf_ap.XSciRef, siaf_ap.YSciRef)
# Distort image on 'sci' coordinate grid
im_sci, xsci_im, ysci_im = image_manip.distort_image(hdul_out, ext=0, to_frame='sci', return_coords=True,
aper=siaf_ap, sci_cen=sci_cen)
# Distort image onto 'tel' (V2, V3) coordinate grid for plot illustration
im_tel, v2_im, v3_im = image_manip.distort_image(hdul_out, ext=0, to_frame='tel', return_coords=True,
aper=siaf_ap, sci_cen=sci_cen)
# Plot locations for PSFs that we will generate
fig, ax = plt.subplots(1,1)
# Show image in V2/V3 plane
extent = [v2_im.min(), v2_im.max(), v3_im.min(), v3_im.max()]
ax.imshow(im_tel**0.1, extent=extent)
# Add on SIAF aperture boundaries
tel_point.plot_inst_apertures(ax=ax, clear=False, label=True)
tel_point.plot_ref_aperture(ax=ax)
tel_point.plot_obs_aperture(ax=ax, color='C4')
# Add PSF location points
v2, v3 = siaf_ap.convert(xsci, ysci, 'sci', 'tel')
ax.scatter(v2, v3, marker='.', alpha=0.5, color='C3', edgecolors='none', linewidths=0)
ax.set_title('Model disk image and PSF Locations in SIAF FoV')
fig.tight_layout()
```
This particular disk image is oversized, so we will need to crop the image after convolving PSFs. We may want to consider trimming some of this image prior to convolution, depending on how some of the FoV is blocked before reaching the coronagraphic optics.
```
# If the image is too large, then this process will eat up much of your computer's RAM
# So, crop image to more reasonable size (20% oversized)
osamp = inst.oversample
xysize = int(1.2 * np.max([siaf_ap.XSciSize,siaf_ap.YSciSize]) * osamp)
xy_add = osamp - np.mod(xysize, osamp)
xysize += xy_add
im_sci = pad_or_cut_to_size(im_sci, xysize)
hdul_disk_model_sci = fits.HDUList(fits.PrimaryHDU(data=im_sci, header=hdul_out[0].header))
# Convolve image
im_conv = image_manip.convolve_image(hdul_disk_model_sci, hdul_psfs, aper=siaf_ap)
import scipy
from scipy import fftpack
from astropy.convolution import convolve, convolve_fft
psf = hdul_psfs[-1].data
im_temp = im_sci.copy()
norm = psf.sum()
psf = psf / norm
res = convolve_fft(im_temp, psf, fftn=fftpack.fftn, ifftn=fftpack.ifftn, allow_huge=True)
res *= norm
im_conv = res
# Add cropped image to final oversampled image
im_conv = pad_or_cut_to_size(im_conv, hdul_full[0].data.shape)
hdul_full[0].data = im_conv
def quick_ref_psf(idl_coord, inst, tel_point, out_shape, sp=None):
"""
Create a quick reference PSF for subtraction of the science target.
"""
# Observed SIAF aperture
siaf_ap = tel_point.siaf_ap_obs
# Location of observation
xidl, yidl = idl_coord
# Get offset in SCI pixels
xsci_off, ysci_off = np.array(siaf_ap.convert(xidl, yidl, 'idl', 'sci')) - \
np.array(siaf_ap.reference_point('sci'))
# Get oversampled pixels offests
osamp = inst.oversample
xsci_off_over, ysci_off_over = np.array([xsci_off, ysci_off]) * osamp
yx_offset = (ysci_off_over, xsci_off_over)
# Create PSF
prev_log = webbpsf_ext.conf.logging_level
setup_logging('WARN', verbose=False)
xtel, ytel = siaf_ap.convert(xidl, yidl, 'idl', 'tel')
hdul_psf_ref = inst.calc_psf_from_coeff(sp=sp, coord_vals=(xtel, ytel), coord_frame='tel')
setup_logging(prev_log, verbose=False)
im_psf = pad_or_cut_to_size(hdul_psf_ref[0].data, out_shape, offset_vals=yx_offset)
return im_psf
# Subtract a reference PSF from the science data
coord_vals = tel_point.position_offsets_act[0]
im_psf = quick_ref_psf(coord_vals, inst, tel_point, hdul_full[0].data.shape, sp=sp_star)
im_ref = image_manip.frebin(im_psf, scale=1/osamp)
# Rebin science data to detector pixels
im_sci = image_manip.frebin(hdul_full[0].data, scale=1/osamp) + im_ref
imdiff = im_sci - im_ref
# De-rotate to sky orientation
imrot = image_manip.rotate_offset(imdiff, rotate_to_idl, reshape=False, cval=np.nan)
from matplotlib.colors import LogNorm
from webbpsf_ext.coords import plotAxes
fig, axes = plt.subplots(1,3, figsize=(12,4.5))
############################
# Plot raw image
ax = axes[0]
im = im_sci
mn = np.median(im)
std = np.std(im)
vmin = 0
vmax = mn+10*std
xsize_asec = siaf_ap.XSciSize * siaf_ap.XSciScale
ysize_asec = siaf_ap.YSciSize * siaf_ap.YSciScale
extent = [-1*xsize_asec/2, xsize_asec/2, -1*ysize_asec/2, ysize_asec/2]
norm = LogNorm(vmin=im.max()/1e5, vmax=im.max())
ax.imshow(im, extent=extent, norm=norm)
ax.set_title("Raw Image (log scale)")
ax.set_xlabel('XSci (arcsec)')
ax.set_ylabel('YSci (arcsec)')
plotAxes(ax, angle=-1*siaf_ap.V3SciYAngle)
############################
# Basic PSF subtraction
# Subtract a near-perfect reference PSF
ax = axes[1]
norm = LogNorm(vmin=imdiff.max()/1e5, vmax=imdiff.max())
ax.imshow(imdiff, extent=extent, norm=norm)
ax.set_title("PSF Subtracted (log scale)")
ax.set_xlabel('XSci (arcsec)')
ax.set_ylabel('YSci (arcsec)')
plotAxes(ax, angle=-1*siaf_ap.V3SciYAngle)
############################
# De-rotate to sky orientation
ax = axes[2]
ax.imshow(imrot, extent=extent, norm=norm)
ax.set_title("De-Rotated (log scale)")
ax.set_xlabel('RA offset (arcsec)')
ax.set_ylabel('Dec offset (arcsec)')
plotAxes(ax, position=(0.95,0.35), label1='E', label2='N')
for i, ax in enumerate(axes.flatten()):
ax.xaxis.get_major_locator().set_params(nbins=7, steps=[1, 2, 5, 10])
ax.yaxis.get_major_locator().set_params(nbins=7, steps=[1, 2, 5, 10])
fig.suptitle(f"Fomalhaut ({siaf_ap.AperName})", fontsize=14)
fig.tight_layout()
# Save image to FITS file
hdu_diff = fits.PrimaryHDU(imdiff)
copy_keys = [
'PIXELSCL', 'DISTANCE',
'INSTRUME', 'FILTER', 'PUPIL', 'CORONMSK',
'APERNAME', 'MODULE', 'CHANNEL',
'DET_NAME', 'DET_X', 'DET_Y', 'DET_V2', 'DET_V3'
]
hdr = hdu_diff.header
for head_temp in (inst.psf_coeff_header, hdul_out[0].header):
for key in copy_keys:
try:
hdr[key] = (head_temp[key], head_temp.comments[key])
except (AttributeError, KeyError):
pass
hdr['PIXELSCL'] = inst.pixelscale
name = obj_params['name']
outfile = f'Vega/{name}_{inst.aperturename}_.fits'
hdu_diff.writeto(outfile, overwrite=True)
```
| github_jupyter |
# Deep Neural Networks (DNN) Model Development
## Preparing Packages
```
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from sklearn import metrics
from numpy import genfromtxt
from scipy import stats
from sklearn import preprocessing
from keras.callbacks import ModelCheckpoint
from keras.callbacks import Callback
from keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
import keras
from keras.layers import Dense, Flatten, Reshape,Dropout
from keras.layers import Conv2D, MaxPooling2D, LSTM
from keras.models import Sequential
from sklearn.model_selection import train_test_split
import timeit #package for recording the model running time
import time
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, Conv3D, MaxPooling3D, Reshape, BatchNormalization, MaxPooling2D
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.callbacks import ModelCheckpoint
from keras import metrics
from keras.optimizers import Adam
from keras import backend as K
from sklearn.metrics import fbeta_score
from sklearn.model_selection import KFold,StratifiedKFold,ShuffleSplit,StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report,f1_score,accuracy_score
```
## Preparing Functions
```
def win_seg(data,windowsize,overlap):#function for overlap segmentation
length=int((data.shape[0]*data.shape[1]-windowsize)/(windowsize*overlap)+1)
newdata=np.empty((length,windowsize, data.shape[2],1))
data_dim=data.shape[2]
layers=data.shape[3]
data=data.reshape(-1,data_dim,layers)
for i in range(0,length) :
start=int(i*windowsize*overlap)
end=int(start+windowsize)
newdata[i]=data[start:end]
return newdata
def lab_vote(data,windowsize):
y_data=data.reshape(-1,windowsize,1,1)
y_data=win_seg(y_data,windowsize,0.5)
y_data=y_data.reshape(y_data.shape[0],y_data.shape[1],y_data.shape[2])
y_data=stats.mode(y_data,axis=1)
y_data=y_data.mode
y_data=y_data.reshape(-1,1)
y_data=np.float64(keras.utils.to_categorical(y_data))
return y_data
def lab_vote_cat(data,windowsize): # non one-hot coding
y_data=data.reshape(-1,windowsize,1,1)
y_data=win_seg(y_data,windowsize,0.5)
y_data=y_data.reshape(y_data.shape[0],y_data.shape[1],y_data.shape[2])
y_data=stats.mode(y_data,axis=1)
y_data=y_data.mode
y_data=y_data.reshape(-1,1)
return y_data
def write_csv(data):
a = np.asarray(data)
a.tofile('check.csv',sep=',',format='%10.5f')
def average(lst):
a = np.array(lst)
return np.mean(a)
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def f1(y_true, y_pred):
y_pred = K.round(y_pred)
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
# tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
```
## Convolutional LSTM Model Development
```
#loading the training and testing data
os.chdir("...") #changing working directory
buffer = np.float64(preprocessing.scale(genfromtxt('S3_X.csv', delimiter=','))) # using S3 as an example
x_data=buffer.reshape(-1,40,30,1)
x_data=win_seg(x_data,40,0.5) # data segmentation with 0.5 overlap
#majority vote on training label
buffer = np.float64(genfromtxt('S3_Y.csv', delimiter=','))-1 #0 based index
y_data=lab_vote(buffer,40)
y_data2=lab_vote_cat(buffer,40) # for stratification purposes
#five round Stratified Random Shuffle
SRS=StratifiedShuffleSplit(n_splits=5, test_size=0.1, random_state=42) #split the train and test by 9:1
#model evaluation metrics
acc_score=list()
f_score=list()
eopch_time_record=list()
oper_time_record=list()
i=0
for train_index, test_index in SRS.split(x_data,y_data):
X_train, X_test = x_data[train_index], x_data[test_index]
y_train, y_test = y_data[train_index], y_data[test_index]
#split the train data into training (training the model) and validation (tuning hypeparameters) by 8:2
X_training, X_validation, y_training, y_validation = train_test_split(X_train, y_train, test_size=0.20)
#setup model parameters
data_dim = X_train.shape[2] #y of 2D Motion Image
timesteps = X_train.shape[1] #x of 2D Motion Image
num_classes = y_train.shape[1]
batchsize=300
epcoh=300
#build model
model = Sequential()
#five convolutional layers as an exmaple, adjust the convolutional layer depth if needed
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh',input_shape=(timesteps, data_dim,1)))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
#turn the multilayer tensor into single layer tensor
model.add(Reshape((40, -1),input_shape=(40,30,64)))
model.add(Dropout(0.5)) #add dropout layers for controlling overfitting
model.add(LSTM(128, return_sequences=True, input_shape=(40, 1920))) # returns a sequence of vectors
model.add(Dropout(0.5)) #add dropout layers for controlling overfitting
model.add(LSTM(128)) # return a single vector
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy',f1])
checkpointer = ModelCheckpoint(filepath="2D_CNN5_LSTM_checkpoint(F1)_sss_%s.h5" % i, monitor='val_f1',verbose=1, mode='max', save_best_only=True)
time_callback = TimeHistory() #record the model training time for each epoch
callbacks_list = [checkpointer,time_callback]
train_history=model.fit(X_training, y_training,
batch_size=batchsize, epochs=epcoh,callbacks=callbacks_list,
validation_data=(X_validation, y_validation))
eopch_time=time_callback.times
eopch_time_record.append(eopch_time) #record the traing time of each epoch
CNN_LSTM_model=load_model("2D_CNN5_LSTM_checkpoint(F1)_sss_%s.h5" % i, custom_objects={'f1': f1})
#model operation and timing
start=timeit.default_timer()
y_pred=CNN_LSTM_model.predict(X_test)
stop=timeit.default_timer()
oper_time=stop-start
oper_time_record.append(oper_time)
#check the model test result
y_pred=CNN_LSTM_model.predict(X_test)
y_pred = np.argmax(y_pred, axis=1)
Y_test=np.argmax(y_test, axis=1)
acc_score.append(accuracy_score(Y_test, y_pred)) # Evaluation of accuracy
f_score.append(f1_score(Y_test, y_pred,average='macro')) # Evaluation of F1 score
print("This is the", i+1, "out of ",5, "Shuffle")
i+=1
del model #delete the model for retrain the neural network from scrach, instead of starting from trained model
# record performance
performance=pd.DataFrame(columns=['Acc_score','Macro_Fscore','Average_Epoch','Average_Run'])
performance['Acc_score']=acc_score
performance['Macro_Fscore']=f_score
performance['Average_Epoch']=average(eopch_time_record)
performance['Average_Run']=average(oper_time_record)
performance.to_csv("2DConv5LSTM_Performance_sss_test.csv")
```
## Baseline LSTM Model Development
```
acc_score=list()
f_score=list()
eopch_time_record=list()
oper_time_record=list()
#loading data
buffer = np.float64(preprocessing.scale(genfromtxt('S3_X.csv', delimiter=',')))
x_data=buffer.reshape(-1,40,30,1)
x_data=win_seg(x_data,40,0.5) # data segmentation with 0.5 overlap
x_data=x_data.reshape(x_data.shape[0],x_data.shape[1],x_data.shape[2]) #reshape the dataset as LSTM input shape
#majority vote on training label
buffer = np.float64(genfromtxt('S3_Y.csv', delimiter=','))-1 #0 based index
y_data=lab_vote(buffer,40)
i=0
for train_index, test_index in SRS.split(x_data,y_data):
X_train, X_test = x_data[train_index], x_data[test_index]
y_train, y_test = y_data[train_index], y_data[test_index]
#split the train data into training (training the model) and validation (tuning hypeparameters) by 8:2
X_training, X_validation, y_training, y_validation = train_test_split(X_train, y_train, test_size=0.20)
#setup model parameters
data_dim = X_train.shape[2] #y of figure
timesteps = X_train.shape[1] #x of figure
num_classes = y_train.shape[1]
batchsize=300
epcoh=300
#Build Model
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=(timesteps, data_dim))) # returns a sequence of vectors of dimension 64
model.add(Dropout(0.5)) #add dropout layers for controlling overfitting
model.add(LSTM(128)) # return a single vector of dimension 64
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy',f1])
checkpointer = ModelCheckpoint(filepath='LSTM_checkpoint(F1)_sss_%s.h5' % i, monitor='val_f1',verbose=1,mode='max', save_best_only=True)
time_callback = TimeHistory() #record the model training time for each epoch
callbacks_list = [checkpointer,time_callback]
model.fit(X_training, y_training,
batch_size=batchsize, epochs=epcoh,callbacks=callbacks_list,
validation_data=(X_validation, y_validation))
eopch_time=time_callback.times
eopch_time_record.append(eopch_time) #record the traing time of each epoch
LSTM_model=load_model('LSTM_checkpoint(F1)_sss_%s.h5' % i,custom_objects={'f1': f1})
#model operation and timing
start=timeit.default_timer()
y_pred=LSTM_model.predict(X_test)
stop=timeit.default_timer()
oper_time=stop-start
oper_time_record.append(oper_time)
#check the model test result
y_pred = np.argmax(y_pred, axis=1)
Y_test=np.argmax(y_test, axis=1)
acc_score.append(accuracy_score(Y_test, y_pred))
f_score.append(f1_score(Y_test, y_pred,average='macro'))
print("This is the", i+1, "out of ",5, "Shuffle")
del model #delete the model for retrain the neural network from scrach, instead of starting from trained model
i+=1
# record performance
performance=pd.DataFrame(columns=['Acc_score','Macro_Fscore','Average_Epoch','Average_Run'])
performance['Acc_score']=acc_score
performance['Macro_Fscore']=f_score
performance['Average_Epoch']=average(eopch_time_record)
performance['Average_Run']=average(oper_time_record)
performance.to_csv("LSTM_Performance_sss_test.csv")
```
## Baseline CNN Model
```
acc_score=list()
f_score=list()
eopch_time_record=list()
oper_time_record=list()
i=0
for train_index, test_index in SRS.split(x_data,y_data):
X_train, X_test = x_data[train_index], x_data[test_index]
y_train, y_test = y_data[train_index], y_data[test_index]
#split the train data into training (training the model) and validation (tuning hypeparameters) by 8:2
X_training, X_validation, y_training, y_validation = train_test_split(X_train, y_train, test_size=0.20)
#setup model parameters
data_dim = X_train.shape[2] #y of figure
timesteps = X_train.shape[1] #x of figure
num_classes = y_train.shape[1]
batchsize=300
epcoh=300
#Build Model
model = Sequential()
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh',input_shape=(timesteps, data_dim,1)))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Conv2D(64, kernel_size=(5, 30), strides=(1, 1),padding='same',
activation='tanh'))
model.add(Flatten())
model.add(Dropout(0.5)) #add dropout layers for controlling overfitting
model.add(Dense(128, activation='tanh'))
model.add(Dropout(0.5)) #add dropout layers for controlling overfitting
model.add(Dense(128, activation='tanh'))
model.add(Dense(num_classes, activation='softmax'))#second flat fully connected layer for softmatrix (classification)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),metrics=['accuracy',f1])
checkpointer = ModelCheckpoint(filepath='2D_CNN_checkpoint(F1)_sss_%s.h5' % i, monitor='val_f1',mode='max',verbose=1, save_best_only=True)
time_callback = TimeHistory() #record the model training time for each epoch
callbacks_list = [checkpointer,time_callback]
model.fit(X_training, y_training,
batch_size=batchsize, epochs=epcoh,callbacks=callbacks_list,
validation_data=(X_validation, y_validation))
eopch_time=time_callback.times
eopch_time_record.append(eopch_time) #record the traingtime of each epoch
CNN_model=load_model('2D_CNN_checkpoint(F1)_sss_%s.h5' % i, custom_objects={'f1': f1})
#model operation and timing
start=timeit.default_timer()
y_pred=CNN_model.predict(X_test)
stop=timeit.default_timer()
oper_time=stop-start
oper_time_record.append(oper_time)
#check the model test result
y_pred = np.argmax(y_pred, axis=1)
Y_test=np.argmax(y_test, axis=1)
acc_score.append(accuracy_score(Y_test, y_pred))
f_score.append(f1_score(Y_test, y_pred,average='macro'))
print("This is the", i+1, "out of ",5, "Shuffle")
del model #delete the model for retrain the neural network from scrach, instead of starting from trained model
i+=1
# record performance
import pandas as pd
performance=pd.DataFrame(columns=['Acc_score','Macro_Fscore','Average_Epoch','Average_Run'])
performance['Acc_score']=acc_score
performance['Macro_Fscore']=f_score
performance['Average_Epoch']=average(eopch_time_record)
performance['Average_Run']=average(oper_time_record)
performance.to_csv("2DConv_Performance_sss_test.csv")
```
# Benchmark Machine Learing-based Model Development
## Packages Preparation
```
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report,f1_score,accuracy_score
import timeit
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
```
## Functions Preparation
```
def win_seg(data,windowsize,overlap):#function for overlap segmentation
length=int((data.shape[0]*data.shape[1]-windowsize)/(windowsize*overlap)+1)
newdata=np.empty((length,windowsize, data.shape[2],1))
data_dim=data.shape[2]
layers=data.shape[3]
data=data.reshape(-1,data_dim,layers)
for i in range(0,length) :
start=int(i*windowsize*overlap)
end=int(start+windowsize)
newdata[i]=data[start:end]
return newdata
def lab_vote(data,windowsize):
y_data=data.reshape(-1,windowsize,1,1)
y_data=win_seg(y_data,windowsize,0.5)
y_data=y_data.reshape(y_data.shape[0],y_data.shape[1],y_data.shape[2])
y_data=stats.mode(y_data,axis=1)
y_data=y_data.mode
y_data=y_data.reshape(-1,1)
y_data=np.float64(keras.utils.to_categorical(y_data))
return y_data
def lab_vote_cat(data,windowsize): # non one-hot coding
y_data=data.reshape(-1,windowsize,1,1)
y_data=win_seg(y_data,windowsize,0.5)
y_data=y_data.reshape(y_data.shape[0],y_data.shape[1],y_data.shape[2])
y_data=stats.mode(y_data,axis=1)
y_data=y_data.mode
y_data=y_data.reshape(-1,1)
return y_data
def preparation(dataset):
x_data=preprocessing.scale(pd.read_csv(dataset).iloc[:,1:]) #Column-wise normalization
y_data=pd.read_csv(dataset).iloc[:,0]
X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.20, random_state=42)#split the data into train and test by 8:2
return X_train, X_test, x_data,y_train, y_test, y_data
def TrainModels(X_train, X_test, y_train, y_test):
# Time cost
train_time=[]
run_time=[]
#SVM
svm=SVC(gamma='auto',random_state=42)
start = timeit.default_timer()
svm.fit(X_train,y_train)
stop = timeit.default_timer()
train_time.append(stop - start)
start = timeit.default_timer()
svm_pre=pd.DataFrame(data=svm.predict(X_test))
stop = timeit.default_timer()
run_time.append(stop - start)
#Naive Bayes
nb=GaussianNB()
start = timeit.default_timer()
nb.fit(X_train,y_train)
stop = timeit.default_timer()
train_time.append(stop - start)
start = timeit.default_timer()
nb_pre=pd.DataFrame(data=nb.predict(X_test))
stop = timeit.default_timer()
run_time.append(stop - start)
#KNN
knn=KNeighborsClassifier(n_neighbors=7) # based on a simple grid search
start = timeit.default_timer()
knn.fit(X_train,y_train)
stop = timeit.default_timer()
train_time.append(stop - start)
start = timeit.default_timer()
knn_pre=pd.DataFrame(data=knn.predict(X_test))
stop = timeit.default_timer()
run_time.append(stop - start)
#Decision Tree
dt=DecisionTreeClassifier(random_state=42)
start = timeit.default_timer()
dt.fit(X_train,y_train)
stop = timeit.default_timer()
train_time.append(stop - start)
start = timeit.default_timer()
dt_pre= pd.DataFrame(data=dt.predict(X_test))
stop = timeit.default_timer()
run_time.append(stop - start)
#Random Forest
rf=RandomForestClassifier(n_estimators=100)
start = timeit.default_timer()
rf.fit(X_train,y_train)
stop = timeit.default_timer()
train_time.append(stop - start)
start = timeit.default_timer()
rf_pre=pd.DataFrame(data=rf.predict(X_test))
stop = timeit.default_timer()
run_time.append(stop - start)
report = pd.DataFrame(columns=['Models','Accuracy','Macro F1','Micro F1','Train Time','Run Time'])
report['Models']=modelnames
for i in range(len(result.columns)):
report.iloc[i,1]=accuracy_score(y_test, result.iloc[:,i])
report.iloc[i,2]=f1_score(y_test, result.iloc[:,i],average='macro')
report.iloc[i,3]=f1_score(y_test, result.iloc[:,i],average='micro')
if i<len(train_time):
report.iloc[i,4]=train_time[i]
report.iloc[i,5]=run_time[i]
return report
```
## Sliding Window Segmentation
```
#loading the training and testing data
os.chdir("...") #changing working directory
buffer = np.float64(genfromtxt('S3_X.csv', delimiter=','))
x_data=buffer.reshape(-1,40,30,1)
x_data=win_seg(x_data,40,0.5) # data segmentation with 0.5 overlap
x_data=x_data.reshape(-1,40,30)
x_data_pd=x_data.reshape(-1,30)
x_data_pd = pd.DataFrame(data=x_data_pd)
adj_win=[i//40+1 for i in range(len(x_data_pd.iloc[:,0]))]
x_data_pd["adjwin"]=adj_win
x_data_pd.to_csv("S3_X_ML.csv")
#majority vote on training label
buffer = np.float64(genfromtxt('S3_Y.csv', delimiter=',')) #0 based index
y_data=lab_vote(buffer,40)
y_data2=lab_vote_cat(buffer,40) # for stratification purposes
y_data_pd = pd.DataFrame(data=y_data2)
y_data_pd.to_csv("S3_Y_ML.csv")
```
## Feature Selection Using Recursive Feature Elimination
```
X, y = X_train, y_train
svc = SVC(kernel="linear")
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(10),scoring='f1_macro')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
#plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
# Export the best features
sel_features=pd.DataFrame()
sel_features["label"]=y_test
fullfeatures=pd.read_csv("fullfeatures.csv")
names=list(fullfeatures.columns.values)[1:]
for index, val in enumerate(list(rfecv.support_)):
if val:
sel_features=pd.concat([sel_features,fullfeatures.iloc[:,index+1]],axis=1)
sel_features.to_csv("S3_Dataset_ML_SelectetedFeatures.csv")
```
## Test on Selected Features
```
X_train, X_test, X_data,y_train, y_test, y_data=preparation("S3_Dataset_ML_SelectetedFeatures.csv")
sf = ShuffleSplit(n_splits=5, test_size=0.1, random_state=42) # Random Shuffle
SRS = StratifiedShuffleSplit(n_splits=5, test_size=0.1, random_state=42) # Stratified Shuffle
finalreport = pd.DataFrame(columns=['Models','Accuracy','Macro F1','Micro F1','Train Time','Run Time'])
for train_index, test_index in SRS.split(X_data, y_data):
X_train, X_test = X_data[train_index], X_data[test_index]
y_train, y_test = y_data[train_index], y_data[test_index]
finalreport=finalreport.append(TrainModels(X_train, X_test, y_train, y_test))
finalreport.to_csv("S3_Dataset_ML_SelectetedFeatures_Evalucation.csv")
```
| github_jupyter |
<h2>Cheat sheet for numpy/scipy factorizations and operations on sparse matrices</h2>
Python's API for manipulating sparse matrices is not as well designed as Matlab's.
In Matlab, you can do (almost) anything to a sparse matrix with the same syntax
as a dense matrix, or any mixture of dense and sparse. In numpy/scipy, you often
have to use different syntax for sparse matrices. Here is my own cheat sheet for
how to operations that involve sparse matrices in numpy/scipy.
This is also a cheat sheet for some of the dense matrix factorizations in scipy, namely LU, Cholesky, and QR.
```
# These are the standard imports for CS 111.
# This list may change as the quarter goes on.
import os
import math
import numpy as np
import numpy.linalg as npla
import scipy as sp
import scipy.sparse.linalg as spla
from scipy import sparse
from scipy import linalg
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
%matplotlib tk
# create sparse from list of triples
triples = [
(0, 0, 4.0),
(0, 1, -1.0),
(0, 2, -1.0),
(1, 0, -1.0),
(1, 1, 4.0),
(1, 3, -1.0),
(2, 0, -1.0),
(2, 2, 4.0),
(2, 3, -1.0),
(3, 1, -1.0),
(3, 2,-1.0),
(3, 3, 4.0)
]
rownum = [t[0] for t in triples]
colnum = [t[1] for t in triples]
values = [t[2] for t in triples]
nrows = 4
ncols = 4
A = sparse.csr_matrix((values, (rownum, colnum)), shape = (nrows, ncols))
print('\nA:'); print(A)
# sparse to dense
Ad = A.todense()
print('\nAd:'); print(Ad)
# dense to sparse
As = sparse.csr_matrix(Ad)
print('\nAs:'); print(As)
# sparse matrix times dense vector
v = np.array(range(4))
print('\nv:', v)
w = As.dot(v)
print('\nw with As.dot:', w)
w = A @ v
print('\nw with A @ v :', w)
w = Ad @ v
print('\nw with Ad @ v:', w)
# sparse matrix times sparse matrix
Bs = As @ As
print('\nAs @ As:'); print(Bs)
Bd = Ad @ Ad
print('\nAd @ Ad:'); print(Bd)
Bdiff = Bs - Bd
print('\ndiff:'); print(Bdiff)
norm_diff = npla.norm(Bdiff)
print('\nnorm_diff:', norm_diff)
# sparse transpose
Ast = As.T
print('\nAs.T:'); print(Ast)
Adt = Ad.T
print('\nAd.T:'); print(Adt)
norm_diff = npla.norm(Adt - Ast)
print('\nnorm_diff:', norm_diff)
# indexing sparse matrix
print('\nAs[2,3]:', As[2,3])
print('\nAs[2,:]:'); print(As[2,:])
print('\nAs[:2,1:]:'); print(As[:2,1:])
# dense Ax = b solver
x = npla.solve(Ad,v)
print('\nrhs :', v)
print('\nx :', x)
print('\nA @ x:', Ad @ x)
print('\nrelative residual norm:', npla.norm(v - Ad @ x) / npla.norm(v))
# sparse Ax = b solver
x = spla.spsolve(As,v)
print('\nrhs :', v)
print('\nx:', x)
print('\nA @ x:', As @ x)
print('\nrelative residual norm:', npla.norm(v - As @ x) / npla.norm(v))
# dense least squares solver
B = np.round(10*np.random.rand(6,4))
print('\nB:'); print(B)
b = np.random.rand(6)
solution = npla.lstsq(B, b, rcond = None)
x = solution[0]
print('\nrhs :', b)
print('\nx :', x)
print('\nB @ x:', B @ x)
print('\nrelative residual norm:', npla.norm(b - B @ x) / npla.norm(b))
# dense LU factorization
B = np.round(10*np.random.rand(4,4))
print('\nB:'); print(B)
P, L, U = linalg.lu(B)
print('\nP:'); print(P)
print('\nL:'); print(L)
print('\nU:'); print(U)
norm_diff = npla.norm(B - P @ L @ U)
print('\nnorm_diff:', norm_diff)
# dense Cholesky factorization
L = linalg.cholesky(Ad, lower = True) # omit second parameter to get upper triangular factor
print('\nL:'); print(L)
print('\nL @ L.T:'); print(L @ L.T)
print('\nAd:'); print(Ad)
print('\nnorm_diff:', npla.norm(L @ L.T - Ad))
# dense QR factorization
print('\nB:'); print(B)
Q,R = linalg.qr(B)
print('\nQ:'); print(Q)
print('\nQ @ Q.t:'); print(Q @ Q.T)
print('\nR:'); print(R)
print('\nQ @ R:'); print(Q @ R)
print('\nnorm_diff:', npla.norm(Q @ R - B))
# sparse LU factorization
print('\nB:'); print(B)
Bs = sparse.csc_matrix(B)
print('\nBs:'); print(Bs)
lu = spla.splu(Bs)
print('\nL:'); print(lu.L)
print('\nU:'); print(lu.U)
print('\nperm_r:', lu.perm_r)
print('\nperm_c:', lu.perm_c)
# sparse LU factorization of large temperature matrix
AA = cs111.make_A(100)
print('\nA dimensions, nonzeros:', AA.shape, AA.size)
AA = sparse.csc_matrix(AA)
lu = spla.splu(AA)
print('\nL dimensions, nonzeros:', lu.L.shape, lu.L.size)
#plt.spy(lu.L)
# sparse Cholesky factorization (hard to do, there's a python wrapper for cholmod somewhere)
```
| github_jupyter |
## Classification - Before and After MMLSpark
### 1. Introduction
<p><img src="https://images-na.ssl-images-amazon.com/images/G/01/img16/books/bookstore/landing-page/1000638_books_landing-page_bookstore-photo-01.jpg" style="width: 500px;" title="Image from https://images-na.ssl-images-amazon.com/images/G/01/img16/books/bookstore/landing-page/1000638_books_landing-page_bookstore-photo-01.jpg" /><br /></p>
In this tutorial, we perform the same classification task in two
different ways: once using plain **`pyspark`** and once using the
**`mmlspark`** library. The two methods yield the same performance,
but one of the two libraries is drastically simpler to use and iterate
on (can you guess which one?).
The task is simple: Predict whether a user's review of a book sold on
Amazon is good (rating > 3) or bad based on the text of the review. We
accomplish this by training LogisticRegression learners with different
hyperparameters and choosing the best model.
### 2. Read the data
We download and read in the data. We show a sample below:
```
rawData = spark.read.parquet("wasbs://publicwasb@mmlspark.blob.core.windows.net/BookReviewsFromAmazon10K.parquet")
rawData.show(5)
```
### 3. Extract more features and process data
Real data however is more complex than the above dataset. It is common
for a dataset to have features of multiple types: text, numeric,
categorical. To illustrate how difficult it is to work with these
datasets, we add two numerical features to the dataset: the **word
count** of the review and the **mean word length**.
```
from pyspark.sql.functions import udf
from pyspark.sql.types import *
def wordCount(s):
return len(s.split())
def wordLength(s):
import numpy as np
ss = [len(w) for w in s.split()]
return round(float(np.mean(ss)), 2)
wordLengthUDF = udf(wordLength, DoubleType())
wordCountUDF = udf(wordCount, IntegerType())
from mmlspark.stages import UDFTransformer
wordLength = "wordLength"
wordCount = "wordCount"
wordLengthTransformer = UDFTransformer(inputCol="text", outputCol=wordLength, udf=wordLengthUDF)
wordCountTransformer = UDFTransformer(inputCol="text", outputCol=wordCount, udf=wordCountUDF)
from pyspark.ml import Pipeline
data = Pipeline(stages=[wordLengthTransformer, wordCountTransformer]) \
.fit(rawData).transform(rawData) \
.withColumn("label", rawData["rating"] > 3).drop("rating")
data.show(5)
```
### 4a. Classify using pyspark
To choose the best LogisticRegression classifier using the `pyspark`
library, need to *explictly* perform the following steps:
1. Process the features:
* Tokenize the text column
* Hash the tokenized column into a vector using hashing
* Merge the numeric features with the vector in the step above
2. Process the label column: cast it into the proper type.
3. Train multiple LogisticRegression algorithms on the `train` dataset
with different hyperparameters
4. Compute the area under the ROC curve for each of the trained models
and select the model with the highest metric as computed on the
`test` dataset
5. Evaluate the best model on the `validation` set
As you can see below, there is a lot of work involved and a lot of
steps where something can go wrong!
```
from pyspark.ml.feature import Tokenizer, HashingTF
from pyspark.ml.feature import VectorAssembler
# Featurize text column
tokenizer = Tokenizer(inputCol="text", outputCol="tokenizedText")
numFeatures = 10000
hashingScheme = HashingTF(inputCol="tokenizedText",
outputCol="TextFeatures",
numFeatures=numFeatures)
tokenizedData = tokenizer.transform(data)
featurizedData = hashingScheme.transform(tokenizedData)
# Merge text and numeric features in one feature column
featureColumnsArray = ["TextFeatures", "wordCount", "wordLength"]
assembler = VectorAssembler(
inputCols = featureColumnsArray,
outputCol="features")
assembledData = assembler.transform(featurizedData)
# Select only columns of interest
# Convert rating column from boolean to int
processedData = assembledData \
.select("label", "features") \
.withColumn("label", assembledData.label.cast(IntegerType()))
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.classification import LogisticRegression
# Prepare data for learning
train, test, validation = processedData.randomSplit([0.60, 0.20, 0.20], seed=123)
# Train the models on the 'train' data
lrHyperParams = [0.05, 0.1, 0.2, 0.4]
logisticRegressions = [LogisticRegression(regParam = hyperParam)
for hyperParam in lrHyperParams]
evaluator = BinaryClassificationEvaluator(rawPredictionCol="rawPrediction",
metricName="areaUnderROC")
metrics = []
models = []
# Select the best model
for learner in logisticRegressions:
model = learner.fit(train)
models.append(model)
scoredData = model.transform(test)
metrics.append(evaluator.evaluate(scoredData))
bestMetric = max(metrics)
bestModel = models[metrics.index(bestMetric)]
# Get AUC on the validation dataset
scoredVal = bestModel.transform(validation)
print(evaluator.evaluate(scoredVal))
```
### 4b. Classify using mmlspark
Life is a lot simpler when using `mmlspark`!
1. The **`TrainClassifier`** Estimator featurizes the data internally,
as long as the columns selected in the `train`, `test`, `validation`
dataset represent the features
2. The **`FindBestModel`** Estimator find the best model from a pool of
trained models by find the model which performs best on the `test`
dataset given the specified metric
3. The **`CompueModelStatistics`** Transformer computes the different
metrics on a scored dataset (in our case, the `validation` dataset)
at the same time
```
from mmlspark.train import TrainClassifier, ComputeModelStatistics
from mmlspark.automl import FindBestModel
# Prepare data for learning
train, test, validation = data.randomSplit([0.60, 0.20, 0.20], seed=123)
# Train the models on the 'train' data
lrHyperParams = [0.05, 0.1, 0.2, 0.4]
logisticRegressions = [LogisticRegression(regParam = hyperParam)
for hyperParam in lrHyperParams]
lrmodels = [TrainClassifier(model=lrm, labelCol="label", numFeatures=10000).fit(train)
for lrm in logisticRegressions]
# Select the best model
bestModel = FindBestModel(evaluationMetric="AUC", models=lrmodels).fit(test)
# Get AUC on the validation dataset
predictions = bestModel.transform(validation)
metrics = ComputeModelStatistics().transform(predictions)
print("Best model's AUC on validation set = "
+ "{0:.2f}%".format(metrics.first()["AUC"] * 100))
```
| github_jupyter |
# Coordinate Descent
### Lower Bound, Take 4
Ensure feasibility of "Lower Bound, Take 2" by adjusting alpha as necessary.
### Lower Bound, Take 3
Ensure feasibility by allowing a stochastic mixture with the MLE. Doesn't work (not DCP).
Assume $r_{\min} = 0$ for simplicity. Idea for online solving $$
\begin{aligned}
&\!\min_{Q \succeq 0} &\qquad& \sum_{(w,r)} w r Q_{w, r}, \\
&\text{subject to} & & \sum_{(w,r)} w Q_{w,r} = 1, \\
& & & \sum_{(w,r)} Q_{w,r} = 1, \\
& & & \sum_n \log(Q_{w_n, r_n}) \geq \phi
\end{aligned}
$$ where $\phi = -\frac{1}{2} \chi^{2,\alpha}_{(1)} + \sum_n \log(Q^{\text{(mle)}}_{w_n, r_n})$. Because the support of $Q$ is at most the empirical support plus $(w_{\min}, 0)$ and $(w_{\max}, 0)$ we will maintain two variables $q_{\min}$ and $q_{\max}$ corresponding to $w_{\min}$ and $w_{\max}$ respectively. Otherwise we need one primal variable for each data point. However we will use two primal variables corresponding to $r = 0$ and $r = 1$ respectively. We will split each datapoint into two points whose fractional counts are proportional to $r$ and $(1 - r)$ respectively.
At time $t$ we receive $(w_t, r_t)$ and we want to determine $q_t$. We are allowed to choose the new $q_{\min}$ and $q_{\max}$ arbitrarily. For points $q_{<t}$ we are allowed to scale them by $\psi_0$ and $\psi_1$, corresponding to points with $r = 0$ and $r = 1$ respectively. We are also allowed to stochastically mix in the maximum likelihood solution according to $\psi_{\text{mle}}$. We assume $(q_{0,<t}, q_{1,<t}, q_{<t,\min}, q_{<t,\max})$ is feasible before receiving $(w_t, r_t)$. Then
$$
\begin{aligned}
&\!\min_{q_{0,t}, q_{1,t}, q_{\min}, q_{\max}, \psi_0, \psi_1 \succeq 0, \psi_{\text{mle}} \in [0, 1]} &\qquad& q_{1,t} w_t + \psi_1 v_{<t} + \psi_{\text{mle}} v_{\text{mle}} \\
&\text{subject to} & & w_t q_{0,t} + w_t q_{1,t} + w_{\min} q_{\min} + w_{\max} q_{\max} + \psi_0 w_{0,<t} q_{0,<t} + \psi_1 w_{1,<t} q_{1,<t} = 1, \\
& & & q_{0,t} + q_{1,t} + q_{\min} + q_{\max} + \psi_0 q_{0,<t} + \psi_1 q_{1,<t} = 1, \\
& & & (1 - r_t) \log(q_{0,t}) + r_t \log(q_{1,t}) + (t - r_{<t}) \log(\psi_0) + r_{<t} \log(\psi_1) \geq \phi - \mathcal{L}_{<t} = -\frac{1}{2} \chi^{2,\alpha}_{(1)} + \mathcal{L}^{\text{(mle)}}_t - \mathcal{L}_{<t}
\end{aligned}
$$ where $v_{<t}$ is the previous lower bound, $\mathcal{L}^{\text{(mle)}}_t$ is the mle likelihood of the observed data including point $t$, and $\mathcal{L}_{<t}$ is the previously obtained likelihood of the observed data for the lower bound.
### Lower Bound, Take 2
Better, but still has feasibility issues as the stream progresses.
Assume $r_{\min} = 0$ for simplicity. Idea for online solving $$
\begin{aligned}
&\!\min_{Q \succeq 0} &\qquad& \sum_{(w,r)} w r Q_{w, r}, \\
&\text{subject to} & & \sum_{(w,r)} w Q_{w,r} = 1, \\
& & & \sum_{(w,r)} Q_{w,r} = 1, \\
& & & \sum_n \log(Q_{w_n, r_n}) \geq \phi
\end{aligned}
$$ where $\phi = -\frac{1}{2} \chi^{2,\alpha}_{(1)} + \sum_n \log(Q^{\text{(mle)}}_{w_n, r_n})$. Because the support of $Q$ is at most the empirical support plus $(w_{\min}, 0)$ and $(w_{\max}, 0)$ we will maintain two variables $q_{\min}$ and $q_{\max}$ corresponding to $w_{\min}$ and $w_{\max}$ respectively. Otherwise we need one primal variable for each data point.
At time $t$ we receive $(w_t, r_t)$ and we want to determine $q_t$. We are allowed to choose the new $q_{\min}$ and $q_{\max}$ arbitrarily. For points $q_{<t}$ we are allowed to scale them by $\psi_0$ and $\psi_1$, corresponding to points with $r = 0$ and $r = 1$ respectively. We assume $(q_{0,<t}, q_{1,<t}, q_{<t,\min}, q_{<t,\max})$ is feasible before receiving $(w_t, r_t)$. Then
$$
\begin{aligned}
&\!\min_{q_t, q_{\min}, q_{\max},\psi_0, \psi_1 \succeq 0} &\qquad& q_t w_t r_t + \psi_1 v_{<t} \\
&\text{subject to} & & w_t q_t + w_{\min} q_{\min} + w_{\max} q_{\max} + \psi_0 w_{0,<t} q_{0,<t} + \psi_1 w_{1,<t} q_{1,<t} = 1, \\
& & & q_t + q_{\min} + q_{\max} + \psi_0 q_{0,<t} + \psi_1 q_{1,<t} = 1, \\
& & & \log(q_t) + (t - r_{<t}) \log(\psi_0) + r_{<t} \log(\psi_1) \geq \phi - \mathcal{L}_{<t} = -\frac{1}{2} \chi^{2,\alpha}_{(1)} + \mathcal{L}^{\text{(mle)}}_t - \mathcal{L}_{<t}
\end{aligned}
$$ where $v_{<t}$ is the previous lower bound, $\mathcal{L}^{\text{(mle)}}_t$ is the mle likelihood of the observed data including point $t$, and $\mathcal{L}_{<t}$ is the previously obtained likelihood of the observed data for the lower bound.
After computing $q_t$ we increment $q_{0,<t}$ and $q_{1,<t}$ proportional to $1 - r_t$ and $r_t$ respectively; and $w_{0,<t}$ and $w_{1,<t}$ by $w_t (1 - r_t)$ and $w_t r_t$ respectively.
### Lower Bound
Starts out good, but then runs into infeasibility.
Assume $r_{\min} = 0$ for simplicity. Idea for online solving $$
\begin{aligned}
&\!\min_{Q \succeq 0} &\qquad& \sum_{(w,r)} w r Q_{w, r}, \\
&\text{subject to} & & \sum_{(w,r)} w Q_{w,r} = 1, \\
& & & \sum_{(w,r)} Q_{w,r} = 1, \\
& & & \sum_n \log(Q_{w_n, r_n}) \geq \phi
\end{aligned}
$$ where $\phi = -\frac{1}{2} \chi^{2,\alpha}_{(1)} + \sum_n \log(Q^{\text{(mle)}}_{w_n, r_n})$. Because the support of $Q$ is at most the empirical support plus $(w_{\min}, 0)$ and $(w_{\max}, 0)$ we will maintain two variables $q_{\min}$ and $q_{\max}$ corresponding to $w_{\min}$ and $w_{\max}$ respectively. Otherwise we need one primal variable for each data point.
At time $t$ we receive $(w_t, r_t)$ and we want to determine $q_t$. We are allowed to choose the new $q_{\min}$ and $q_{\max}$ arbitrarily. For points $q_{<t}$ we only allowed to scale them by $\psi$. We assume $(q_{<t}, q_{<t,\min}, q_{<t,\max})$ is feasible before receiving $(w_t, r_t)$. Then
$$
\begin{aligned}
&\!\min_{q_t, q_{\min}, q_{\max}, \psi \succeq 0} &\qquad& q_t w_t r_t + \psi v_{<t} \\
&\text{subject to} & & w_t q_t + w_{\min} q_{\min} + w_{\max} q_{\max} + \psi w_{<t} q_{<t} = 1, \\
& & & q_t + q_{\min} + q_{\max} + \psi q_{<t} = 1, \\
& & & \log(q_t) + t \log(\psi) \geq \phi - \mathcal{L}_{<t} = -\frac{1}{2} \chi^{2,\alpha}_{(1)} + \mathcal{L}^{\text{(mle)}}_t - \mathcal{L}_{<t}
\end{aligned}
$$ where $v_{<t}$ is the previous lower bound, $\mathcal{L}^{\text{(mle)}}_t$ is the mle likelihood of the observed data including point $t$, and $\mathcal{L}_{<t}$ is the previously obtained likelihood of the observed data for the lower bound. Substituting $q_{<t} + q_{<t,\min} + q_{<t,\max} = 1$ and $w_{<t} q_{<t} + w_{\min} q_{<t,\min} + w_{\max} q_{<t,\max} = 1$ yields
$$
\begin{aligned}
&\!\min_{q_t, q_{\min}, q_{\max} \succeq 0, \psi \in [0, 1]} &\qquad& q_t w_t r_t + \psi v_{<t} \\
&\text{subject to} & & w_t q_t + w_{\min} q_{\min} + w_{\max} q_{\max} + \psi (1 - w_{\min} q_{<t,\min} - w_{\max} q_{<t,\max}) = 1, \\
& & & q_t + q_{\min} + q_{\max} + \psi (1 - q_{<t,\max} - q_{<t,\min}) = 1 \\
& & & -\log(q_t) - t \log(\psi) - \frac{1}{2} \chi^{2,\alpha}_{(1)} + \mathcal{L}^{\text{(mle)}}_t - \mathcal{L}_{<t} \leq 0
\end{aligned}
$$
### MLE
Idea for online solving $$
\begin{aligned}
&\!\max_{Q \succeq 0} &\qquad& \sum_n \log(Q_{w_n, r_n}), \\
&\text{subject to} & & \sum_{(w,r)} w Q_{w,r} = 1, \\
& & & \sum_{(w,r)} Q_{w,r} = 1.
\end{aligned}
$$
Because the support of $Q$ is at most the empirical support plus $w_{\min}$ and $w_{\max}$ we will maintain two variables $q_{\min}$ and $q_{\max}$ corresponding to $w_{\min}$ and $w_{\max}$ respectively. Otherwise we need one primal variable for each data point.
At time $t$ we receive $(w_t, r_t)$ and we want to determine $q_t$. We are allowed to choose the new $q_{\min}$ and $q_{\max}$ arbitrarily. For points $q_{<t}$ we only allowed to scale them by $\psi$. We assume $(q_{<t}, q_{<t,\min}, q_{<t,\max})$ is feasible before receiving $(w_t, r_t)$. Then
$$
\begin{aligned}
&\!\max_{q_t, q_{\min}, q_{\max}, \psi > 0} &\qquad& t \log(\psi) + \log(q_t), \\
&\text{subject to} & & w_t q_t + w_{\min} q_{\min} + w_{\max} q_{\max} + \psi w_{<t} q_{<t} = 1, \\
& & & q_t + q_{\min} + q_{\max} + \psi q_{<t} = 1
\end{aligned}
$$
Substituting $q_{<t} + q_{<t,\min} + q_{<t,\max} = 1$ and $w_{<t} q_{<t} + w_{\min} q_{<t,\min} + w_{\max} q_{<t,\max} = 1$ yields
$$
\begin{aligned}
&\!\max_{q_t, q_{\min}, q_{\max}, \psi > 0} &\qquad& t \log(\psi) + \log(q_t), \\
&\text{subject to} & & w_t q_t + w_{\min} q_{\min} + w_{\max} q_{\max} + \psi (1 - w_{\min} q_{<t,\min} - w_{\max} q_{<t,\max}) = 1, \\
& & & q_t + q_{\min} + q_{\max} + \psi (1 - q_{<t,\max} - q_{<t,\min}) = 1
\end{aligned}
$$
At the beginning of time we can initialize with $$
\begin{aligned}
q_{0,\min} &= \frac{1 - w_{\min}}{w_{\max} - w_{\min}} \\
q_{0,\max} &= \frac{w_{\max} - 1}{w_{\max} - w_{\min}}
\end{aligned}
$$
### Code
```
class OnlineCoordinateDescentMLE:
def __init__(self, wmin, wmax):
from cvxopt import matrix
assert wmax > 1
assert wmin >= 0
assert wmin < wmax
self.wmin = wmin
self.wmax = wmax
self.qmin = (wmax - 1) / (wmax - wmin)
self.qmax = (1 - wmin) / (wmax - wmin)
self.obj = 0
self.vmin = 0
self.lastphi = 0
self.G = matrix([ [ -1, 0, 0, 0 ],
[ 0, -1, 0, 0 ],
[ 0, 0, -1, 0 ],
[ 0, 0, 0, -1 ],
],
tc='d').T
self.h = matrix([ 0, 0, 0, 0 ], tc='d')
self.b = matrix([ 1 / wmax, 1 ], tc='d')
self.t = 0
def update(self, c, w, r):
from cvxopt import matrix, solvers
assert c > 0
safet = max(self.t, 1)
x0 = matrix([ c / (c + safet),
self.qmin * safet / (c + safet),
self.qmax * safet / (c + safet),
safet / (c + safet) ],
tc='d')
def F(x=None, z=None):
import math
if x is None: return 0, x0
if x[0] <= 0 or x[3] <= 0:
return None
f = -c * math.log(x[0]) / safet - self.t * math.log(x[3]) / safet
jf = matrix([ -c / (safet * x[0]), 0, 0, -self.t / (safet * x[3]) ], tc='d').T
if z is None: return f, jf
hf = z[0] * matrix([ [ (c / safet) * 1/x[0]**2, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, (self.t / safet) * 1/x[3]**2 ]
], tc='d')
return f, jf, hf
A = matrix([
[ float(w) / self.wmax,
self.wmin / self.wmax,
1,
(1 / self.wmax - (self.wmin / self.wmax) * self.qmin - self.qmax) ],
[ 1, 1, 1, (1 - self.qmin - self.qmax) ]
],
tc='d')
soln = solvers.cp(F=F, G=self.G, h=self.h, A=A.T, b=self.b, options={'show_progress': False})
from pprint import pformat
assert soln['status'] == 'optimal', pformat([ soln, self.t ])
self.obj -= safet * soln['primal objective']
self.lastq = soln['x'][0]
self.qmin = soln['x'][1]
self.qmax = soln['x'][2]
self.lastphi = soln['x'][3]
self.vmin = soln['x'][0] * w * r + soln['x'][3] * self.vmin
self.t += c
return self.lastq
class OnlineCoordinateDescentLB:
class Flass:
def __init__(self):
pass
def __init__(self, wmin, wmax, alpha):
assert wmax > 1
assert wmin >= 0
assert wmin < wmax
self.wmin = wmin
self.wmax = wmax
self.qmin = (wmax - 1) / (wmax - wmin)
self.qmax = (1 - wmin) / (wmax - wmin)
self.vlb = 0
self.wq0 = 0
self.wq1 = 0
self.q0t = 0
self.q1t = 0
self.t = 0
self.rt = 0
self.llb = 0
self.alpha = alpha
self.mle = OnlineCoordinateDescentMLE(wmin=wmin, wmax=wmax)
from scipy.stats import chi2
import cvxpy as cp
self.vars = OnlineCoordinateDescentLB.Flass()
self.vars.qt = cp.Variable(nonneg=True)
self.vars.qmin = cp.Variable(nonneg=True)
self.vars.qmax = cp.Variable(nonneg=True)
self.vars.psi0 = cp.Variable(nonneg=True)
self.vars.psi1 = cp.Variable(nonneg=True)
self.params = OnlineCoordinateDescentLB.Flass()
self.params.w = cp.Parameter(nonneg=True)
self.params.wr = cp.Parameter(nonneg=True)
self.params.vlb = cp.Parameter(nonneg=True)
self.params.wq0 = cp.Parameter(nonneg=True)
self.params.wq1 = cp.Parameter(nonneg=True)
self.params.q0t = cp.Parameter(nonneg=True)
self.params.q1t = cp.Parameter(nonneg=True)
self.params.c = cp.Parameter(nonneg=True)
self.params.tminusrt = cp.Parameter(nonneg=True)
self.params.rt = cp.Parameter(nonneg=True)
self.params.constraintrhs = cp.Parameter()
self.prob = cp.Problem(cp.Minimize(self.params.wr * self.vars.qt + self.params.vlb * self.vars.psi1), [
self.params.w * self.vars.qt
+ (self.wmin / self.wmax) * self.vars.qmin
+ self.vars.qmax
+ self.params.wq0 * self.vars.psi0
+ self.params.wq1 * self.vars.psi1
== 1 / self.wmax,
self.vars.qt
+ self.vars.qmin
+ self.vars.qmax
+ self.params.q0t * self.vars.psi0
+ self.params.q1t * self.vars.psi1 == 1,
self.params.c * cp.log(self.vars.qt)
+ self.params.tminusrt * cp.log(self.vars.psi0)
+ self.params.rt * cp.log(self.vars.psi1)
>= self.params.constraintrhs
])
def innersolve(self, c, w, r, alpha):
from scipy.stats import chi2
safet = max(self.t, 1)
halfchisq = 0.5 * chi2.isf(q=alpha, df=1)
self.params.w.value = w / self.wmax
self.params.wr.value = w * r
self.params.vlb.value = self.vlb
self.params.wq0.value = self.wq0 / self.wmax
self.params.wq1.value = self.wq1 / self.wmax
self.params.q0t.value = self.q0t
self.params.q1t.value = self.q1t
self.params.c.value = c / safet
self.params.tminusrt.value = (self.t - self.rt) / safet
self.params.rt.value = self.rt / safet
self.params.constraintrhs.value = (-halfchisq + self.mle.obj - self.llb) / safet
self.prob.solve(verbose=False)
return (self.prob.value,
self.prob.status,
(1 - r) * self.vars.qt.value if self.vars.qt.value is not None else None,
r * self.vars.qt.value if self.vars.qt.value is not None else None,
self.vars.qmin.value,
self.vars.qmax.value,
self.vars.psi0.value,
self.vars.psi1.value
)
def updatev3(self, c, w, r):
import cvxpy as cp
import math
from scipy.special import xlogy
qmle = self.mle.update(c, w, r)
q0t = cp.Variable(nonneg=True)
q1t = cp.Variable(nonneg=True)
qmin = cp.Variable(nonneg=True)
qmax = cp.Variable(nonneg=True)
psi0 = cp.Variable(nonneg=True)
psi1 = cp.Variable(nonneg=True)
psimle = cp.Variable(nonneg=True)
safet = max(self.t, 1)
# prob = cp.Problem(cp.Minimize((float(w) * q1t + self.vlb * psi1) * (1 - psimle) + self.mle.vhat * psimle), [
prob = cp.Problem(cp.Minimize(float(w) * q1t + self.vlb * psi1 + self.mle.vhat * psimle), [
float(w / self.wmax) * q0t
+ float(w / self.wmax) * q1t
+ (self.wmin / self.wmax) * qmin
+ qmax
+ psi0 * (self.wq0 / self.wmax)
+ psi1 * (self.wq1 / self.wmax)
== 1 / self.wmax,
q0t + q1t + qmin + qmax + psi0 * self.q0t + psi1 * self.q1t == 1,
float(c * (1 - r) / safet) * cp.log(q0t)
+ float(c * r / safet) * cp.log(q1t)
+ float((self.t - self.rt) / safet) * cp.log(psi0)
+ float(self.rt / safet) * cp.log(psi1)
# >= ((-self.halfchisq + self.mle.obj) / safet) * cp.inv_pos(1 - psimle) - (self.llb / safet),
>= ((-self.halfchisq + self.mle.obj) / safet) * (1 + psimle) - (self.llb / safet),
psimle <= 1,
psimle >= 0.9
])
prob.solve(verbose=False)
assert prob.status[:7] == 'optimal', prob.solve(verbose=True)
self.vlb = (w * q1t.value + self.vlb * psi1.value) * (1 - psimle.value) + psimle.value * self.mle.vhat
self.qmin = qmin.value
self.qmax = qmax.value
self.llb += ( xlogy(c * (1 - r), q0t.value)
+ xlogy(c * r, q1t.value)
+ xlogy(self.t - self.rt, psi0.value)
+ xlogy(self.rt, psi1.value)
)
self.llb = (1 - psimle.value) * self.llb + psimle.value * self.mle.obj
self.t += c
self.rt += c * r
self.q0t = q0t.value + psi0.value * self.q0t
self.wq0 = w * q0t.value + psi0.value * self.wq0
self.q1t = q1t.value + psi1.value * self.q1t
self.wq1 = w * q1t.value + psi1.value * self.wq1
myq = (1 - r) * q0t.value + r * q1t.value
myq = (1 - psimle.value) * myq + psimle.value * qmle
return myq, { 'self.q0t': self.q0t,
'self.q1t': self.q1t,
'qmin': qmin.value,
'qmax': qmax.value,
'vlb': self.vlb,
'llb': self.llb,
'vmle': self.mle.vhat,
'self.rt': self.rt,
'psimle': psimle.value,
}
def __initturg__(self, wmin, wmax, alpha):
assert wmax > 1
assert wmin >= 0
assert wmin < wmax
self.wmin = wmin
self.wmax = wmax
self.qmin = (wmax - 1) / (wmax - wmin)
self.qmax = (1 - wmin) / (wmax - wmin)
self.vlb = 0
self.wq0 = 0
self.wq1 = 0
self.q0t = 0
self.q1t = 0
self.t = 0
self.rt = 0
self.llb = 0
self.alpha = alpha
self.mle = OnlineCoordinateDescentMLE(wmin=wmin, wmax=wmax)
from scipy.stats import chi2
import cvxpy as cp
self.vars = OnlineCoordinateDescentLB.Flass()
self.vars.q0t = cp.Variable(nonneg=True)
self.vars.q1t = cp.Variable(nonneg=True)
self.vars.qmin = cp.Variable(nonneg=True)
self.vars.qmax = cp.Variable(nonneg=True)
self.vars.psi0 = cp.Variable(nonneg=True)
self.vars.psi1 = cp.Variable(nonneg=True)
self.params = OnlineCoordinateDescentLB.Flass()
self.params.w = cp.Parameter(nonneg=True)
self.params.wcost = cp.Parameter(nonneg=True)
self.params.vlb = cp.Parameter(nonneg=True)
self.params.wq0 = cp.Parameter(nonneg=True)
self.params.wq1 = cp.Parameter(nonneg=True)
self.params.q0t = cp.Parameter(nonneg=True)
self.params.q1t = cp.Parameter(nonneg=True)
self.params.coneminusr = cp.Parameter(nonneg=True)
self.params.cr = cp.Parameter(nonneg=True)
self.params.tminusrt = cp.Parameter(nonneg=True)
self.params.rt = cp.Parameter(nonneg=True)
self.params.constraintrhs = cp.Parameter()
self.prob = cp.Problem(cp.Minimize(self.params.wcost * self.vars.q1t + self.params.vlb * self.vars.psi1), [
self.params.w * self.vars.q0t
+ self.params.w * self.vars.q1t
+ (self.wmin / self.wmax) * self.vars.qmin
+ self.vars.qmax
+ self.params.wq0 * self.vars.psi0
+ self.params.wq1 * self.vars.psi1
== 1 / self.wmax,
self.vars.q0t
+ self.vars.q1t
+ self.vars.qmin
+ self.vars.qmax
+ self.params.q0t * self.vars.psi0
+ self.params.q1t * self.vars.psi1 == 1,
self.params.coneminusr * cp.log(self.vars.q0t)
+ self.params.cr * cp.log(self.vars.q1t)
+ self.params.tminusrt * cp.log(self.vars.psi0)
+ self.params.rt * cp.log(self.vars.psi1)
>= self.params.constraintrhs
])
def innersolveflass(self, c, w, r, alpha):
# doesn't work, not sure why (?)
from cvxopt import matrix, spdiag, solvers
import numpy as np
from scipy.stats import chi2
assert 0 < c
assert 0 <= r
assert r <= 1
safet = max(self.t, 1)
halfchisq = 0.5 * chi2.isf(q=alpha, df=1)
print([
w / self.wmax,
w,
self.vlb,
self.wq0 / self.wmax,
self.wq1 / self.wmax,
self.q0t,
self.q1t,
c * (1 - r) / safet,
c * r / safet,
(self.t - self.rt) / safet,
self.rt / safet,
(-halfchisq + self.mle.obj - self.llb) / safet
])
G = matrix(-np.eye(6), tc='d')
h = matrix(0, size=(6,1), tc='d')
A = matrix([ [ w / self.wmax,
w / self.wmax,
self.wmin / self.wmax,
1,
self.wq0 / self.wmax,
self.wq1 / self.wmax
],
[ 1, 1, 1, 1, self.q0t, self.q1t ]
],
tc='d')
b = matrix([ 1 / self.wmax, 1 ], tc='d')
cost = matrix(0, size=(6,1), tc='d')
cost[1] = float(w)
cost[5] = self.vlb
x0 = matrix([ c * (1 - r) / (c + safet),
c * r / (c + safet),
self.qmin * safet / (c + safet),
self.qmax * safet / (c + safet),
safet / (c + safet),
safet / (c + safet)
], tc='d')
def F(x=None, z=None):
from scipy.special import xlogy
if x is None: return 1, x0
if any(z < 0 for z in x):
return None
f = ( xlogy(c * (1 - r), x[0])
+ xlogy(c * r, x[1])
+ xlogy(self.t - self.rt, x[3])
+ xlogy(self.rt, x[4])
+ halfchisq
- self.mle.obj
+ self.llb)
f *= -1 / safet
jf = matrix(0, size=(1, 6), tc='d')
jf[0] = c * (1 - r) / x[0] if c * (1 - r) > 0 else 0
jf[1] = c * r / x[1] if c * r > 0 else 0
jf[4] = (self.t - self.rt) / x[4] if self.t > self.rt else 0
jf[5] = self.rt / x[5] if self.rt > 0 else 0
jf *= -1 / safet
if z is None: return f, jf
hf = spdiag([
-c * (1 - r) / x[0]**2 if c * (1 - r) > 0 else 0,
-c * r / x[1]**2 if c * r > 0 else 0,
0,
0,
-(self.t - self.rt) / x[4]**2 if self.t > self.rt else 0,
-self.rt / x[5]**2 if self.rt > 0 else 0
])
hf *= -z[0] / safet
return f, jf, hf
soln = solvers.cpl(c=cost, F=F, G=G, h=h, A=A.T, b=b)
from pprint import pformat
import numpy
assert soln['status'][:7] == 'optimal', pformat({ 'soln': soln,
'solnx': [ z for z in soln['x'] ],
'datum': (c, w, r),
'F(x=x0)': F(x=x0),
'A': numpy.matrix(A),
'b': [ z for z in b ],
'A.x0 - b': [ z for z in A.T*x0 - b ],
'G.x0 - h': [ z for z in G*x0 - h ],
'F(x=soln)': F(x=soln['x']),
'A.x - b': [ z for z in A.T*soln['x'] - b ],
'G.x - h': [ z for z in G*soln['x'] - h ],
})
return (soln['primal objective'], soln['status'],
(1 - r) * soln['x'][0],
r * soln['x'][0]
) + tuple(soln['x'][1:])
def innersolveturg(self, c, w, r, alpha):
from scipy.stats import chi2
safet = max(self.t, 1)
halfchisq = 0.5 * chi2.isf(q=alpha, df=1)
self.params.w.value = w / self.wmax
self.params.wcost.value = w
self.params.vlb.value = self.vlb
self.params.wq0.value = self.wq0 / self.wmax
self.params.wq1.value = self.wq1 / self.wmax
self.params.q0t.value = self.q0t
self.params.q1t.value = self.q1t
self.params.coneminusr.value = c * (1 - r) / safet
self.params.cr.value = c * r / safet
self.params.tminusrt.value = (self.t - self.rt) / safet
self.params.rt.value = self.rt / safet
self.params.constraintrhs.value = (-halfchisq + self.mle.obj - self.llb) / safet
self.prob.solve(verbose=False)
return (self.prob.value,
self.prob.status,
self.vars.q0t.value,
self.vars.q1t.value,
self.vars.qmin.value,
self.vars.qmax.value,
self.vars.psi0.value,
self.vars.psi1.value
)
def update(self, c, w, r):
import math
from scipy.special import xlogy
self.mle.update(c, w, r)
alpha = self.alpha
(pvalue, pstatus, q0t, q1t, qmin, qmax, psi0, psi1) = self.innersolve(c, w, r, alpha)
if pstatus[:7] != 'optimal':
alphalb = 0
alphaub = alpha
while alphaub - alphalb >= 1e-3:
alphatest = 0.5 * (alphalb + alphaub)
(pvalue, pstatus, q0t, q1t, qmin, qmax, psi0, psi1) = self.innersolve(c, w, r, alphatest)
if pstatus[:7] == 'optimal':
alphalb = alphatest
else:
alphaub = alphatest
alpha = alphalb
(pvalue, pstatus, q0t, q1t, qmin, qmax, psi0, psi1) = self.innersolve(c, w, r, alpha)
assert pstatus[:7] == 'optimal', { 'alpha': alpha, 'pstatus': pstatus }
self.vlb = pvalue
self.qmin = qmin
self.qmax = qmax
self.llb += ( xlogy(c * (1 - r), q0t)
+ xlogy(c * r, q1t)
+ xlogy(self.t - self.rt, psi0)
+ xlogy(self.rt, psi1)
)
self.t += c
self.rt += c * r
self.q0t = q0t + psi0 * self.q0t
self.wq0 = w * q0t + psi0 * self.wq0
self.q1t = q1t + psi1 * self.q1t
self.wq1 = w * q1t + psi1 * self.wq1
return (1 - r) * q0t + r * q1t, { # 'self.q0t': self.q0t,
# 'self.q1t': self.q1t,
# 'qmin': qmin.value,
# 'qmax': qmax.value,
'vlb': self.vlb,
'vmle': self.mle.vmin,
# 'self.rt': self.rt,
'alpha': alpha,
}
def __initv1__(self, wmin, wmax, alpha):
from scipy.stats import chi2
from cvxopt import matrix
assert wmax > 1
assert wmin >= 0
assert wmin < wmax
self.wmin = wmin
self.wmax = wmax
self.qmin = (wmax - 1) / (wmax - wmin)
self.qmax = (1 - wmin) / (wmax - wmin)
self.G = matrix([ [ -1, 0, 0, 0 ],
[ 0, -1, 0, 0 ],
[ 0, 0, -1, 0 ],
[ 0, 0, 0, -1 ],
],
tc='d').T
self.h = matrix([ 0, 0, 0, 0 ], tc='d')
self.b = matrix([ 1 / wmax, 1 ], tc='d')
self.t = 0
self.halfchisq = 0.5 * chi2.isf(q=alpha, df=1)
self.llb = 0
self.vlb = 0
self.mle = OnlineCoordinateDescentMLE(wmin=wmin, wmax=wmax)
def updatev1(self, c, w, r):
import cvxpy as cp
import math
self.mle.update(c, w, r)
qt = cp.Variable(nonneg=True)
qmin = cp.Variable(nonneg=True)
qmax = cp.Variable(nonneg=True)
psi = cp.Variable(nonneg=True)
safet = max(self.t, 1)
prob = cp.Problem(cp.Minimize(float(w * r) * qt + self.vlb * psi), [
float(w / self.wmax) * qt
+ (self.wmin / self.wmax) * qmin
+ qmax
+ (1 / self.wmax - (self.wmin / self.wmax) * self.qmin - self.qmax) * psi == 1 / self.wmax,
qt + qmin + qmax + (1 - self.qmin - self.qmax) * psi == 1,
float(c / safet) * cp.log(qt) + (self.t / safet) * cp.log(psi) >= (-self.halfchisq + self.mle.obj - self.llb) / safet
])
prob.solve(verbose=False)
if prob.status[:7] != 'optimal':
# just maximize likelihood to recover (?)
pass
from pprint import pformat
assert prob.status[:7] == 'optimal', pformat({ 'datum': [ c, w, r ],
'x': [ z.value for z in [ qt, qmin, qmax, psi ] ],
'prob': prob.status,
't': self.t,
'lmle': self.mle.obj,
'llb': self.llb,# + c * math.log(qt.value) + self.t * math.log(psi.value),
'halfchisq': self.halfchisq,
})
self.vlb = prob.value
self.qmin = qmin.value
self.qmax = qmax.value
self.lastphi = psi.value
self.llb += c * math.log(qt.value) + self.t * math.log(psi.value)
self.t += c
return { 'soln': [ z.value for z in [ qt, qmin, qmax, psi ] ], 'lmle': self.mle.obj,
'llb': self.llb, 'halfchisq': self.halfchisq, 'vlb': self.vlb, 'vhat': self.mle.vhat }
if False:
from cvxopt import matrix, solvers
import math
assert c > 0
self.mle.update(c, w, r)
lmle = self.mle.obj
safet = max(self.t, 1)
x0 = matrix([ c / (c + safet),
self.qmin * safet / (c + safet),
self.qmax * safet / (c + safet),
safet / (c + safet) ], tc='d')
def F(x=None, z=None):
if x is None: return 1, x0
if x[0] <= 0 or x[3] <= 0:
return None
f = -c * math.log(x[0]) - self.t * math.log(x[3]) - self.halfchisq + lmle - self.llb
f /= safet
jf = matrix([ -c / (safet * x[0]), 0, 0, -self.t / (safet * x[3]) ], tc='d').T
if z is None: return f, jf
hf = z[0] * matrix([ [ (c / safet) * 1/x[0]**2, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, (self.t / safet) * 1/x[3]**2 ]
], tc='d')
return f, jf, hf
cost = matrix([ float(w * r), 0, 0, self.vlb ], tc='d')
A = matrix([
[ float(w) / self.wmax,
self.wmin / self.wmax,
1,
(1 / self.wmax - (self.wmin / self.wmax) * self.qmin - self.qmax) ],
[ 1, 1, 1, (1 - self.qmin - self.qmax) ]
],
tc='d')
soln = solvers.cpl(c=cost, F=F, G=self.G, h=self.h, A=A.T, b=self.b, options={'show_progress': True,
'maxiters': 100 })
from pprint import pformat
assert soln['status'] == 'optimal', pformat({ 'datum': [ c, w, r ],
'x': [ z for z in soln['x'] ],
'soln': soln,
't': self.t,
'lmle': lmle,
'llb': self.llb,
'F(x0)': F(x=x0),
'F(x)': F(soln['x'])
})
print(pformat((F(x=x0), F(x=soln['x']))))
self.vlb = soln['primal objective']
self.qmin = soln['x'][1]
self.qmax = soln['x'][2]
self.lastphi = soln['x'][3]
self.llb += c * math.log(soln['x'][0]) + self.t * math.log(soln['x'][3])
self.t += c
return { 'soln': [ z for z in soln['x'] ], 'lmle': lmle,
'llb': self.llb, 'halfchisq': self.halfchisq, 'vlb': self.vlb, 'vhat': self.mle.vhat }
class Test:
def flass():
import environments.ControlledRangeVariance
import MLE.MLE
def batchtoonline(samples, seed=45):
import numpy as np
state = np.random.RandomState(seed)
n = sum(c for c, w, r in samples)
while n > 0:
p = np.array([ c for c, w, r in samples ], dtype='float64') / n
what = state.choice(len(samples), p=p)
c = min(samples[what][0], 1)
yield (c, samples[what][1], samples[what][2])
samples[what] = (samples[what][0] - c, samples[what][1], samples[what][2])
n -= c
env = environments.ControlledRangeVariance.ControlledRangeVariance(seed=45, wsupport=[0,2,1000], expwsq=100)
_, samples = env.sample(1 + (1 << 16))
ocd = OnlineCoordinateDescentLB(wmin=0, wmax=1000, alpha=0.05)
from pprint import pformat
t = 0
for n, (c, w, r) in enumerate(batchtoonline(samples)):
qt, qex = ocd.update(c, w, r)
t += c
if (n & (n - 1) == 0 and n & 0xAAAAAAAA == 0) or w == 1000:
vlb = '{:.5}'.format(qex['vlb'])
vmle = '{:.5}'.format(qex['vmle'])
print([ c, w, r, t, '{:.3g}'.format(qt), '{:.3f}'.format(t * qt), { 'vlb': vlb, 'vmle': vmle, 'alpha': qex['alpha'] } ],
flush=True)
Test.flass()
```
# Histogram Based
```
from importlib import reload
import environments.ControlledRangeVariance
import MLE.MLE
reload(environments.ControlledRangeVariance)
reload(MLE.MLE)
def batchtoonline(samples, seed=45):
import numpy as np
state = np.random.RandomState(seed)
n = sum(c for c, w, r in samples)
while n > 0:
p = np.array([ c for c, w, r in samples ], dtype='float64') / n
what = state.choice(len(samples), p=p)
c = min(samples[what][0], 1)
yield (c, samples[what][1], samples[what][2])
samples[what] = (samples[what][0] - c, samples[what][1], samples[what][2])
n -= c
env = environments.ControlledRangeVariance.ControlledRangeVariance(seed=45, wsupport=[0,2,1000], expwsq=100)
happrox = MLE.MLE.Online.HistApprox(wmin=0, wmax=1000, numbuckets=10)
onlineci = MLE.MLE.Online.CI(wmin=0, wmax=1000, rmin=0, rmax=1, alpha=0.05)
onlinemle = MLE.MLE.Online.MLE(wmin=0, wmax=1000, rmin=0, rmax=1)
_, samples = env.sample(1 + (1 << 16))
from pprint import pformat
print(pformat(samples), flush=True)
t = 0
for n, (c, w, r) in enumerate(batchtoonline(samples)):
happrox.update(c, w, r)
onlineci.update(happrox.iterator)
onlinemle.update(happrox.iterator)
t += c
if (n & (n - 1) == 0 and n & 0xAAAAAAAA == 0) or w == 1000:
vmle = MLE.MLE.estimate(happrox.iterator, wmin=0, wmax=1000)[1]['vmin']
vlb = MLE.MLE.asymptoticconfidenceinterval(happrox.iterator, wmin=0, wmax=1000)[0][0]
print([ c, w, r, t, 'n/a', 'n/a', { 'vlb': '{:.3f}'.format(vlb), 'vmle': '{:.3f}'.format(vmle), 'alpha': 0.05 } ],
flush=True)
# from pprint import pformat
# print(pformat(
# {
# 'n': n,
# 'onlineci': onlineci.getqfunc(),
# 'onlinemle': onlinemle.getqfunc(),
# 'batchmle': MLE.MLE.estimate(happrox.iterator, wmin=0, wmax=1000),
# 'batchci': MLE.MLE.asymptoticconfidenceinterval(happrox.iterator, wmin=0, wmax=1000)
# }),
# flush=True)
```
# Forest Covertype
### Full Information Online Learning
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
def flass():
from sklearn.datasets import fetch_covtype
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from math import ceil
import numpy as np
cov = fetch_covtype()
cov.data = PCA(whiten=True).fit_transform(cov.data)
classes = np.unique(cov.target - 1)
ndata = len(cov.target)
order = np.random.RandomState(seed=42).permutation(ndata)
ntrain = ceil(0.2 * ndata)
Object = lambda **kwargs: type("Object", (), kwargs)()
train = Object(data = cov.data[order[:ntrain]], target = cov.target[order[:ntrain]] - 1)
test = Object(data = cov.data[order[ntrain:]], target = cov.target[order[ntrain:]] - 1)
for lr in (1, ):
print("**** lr = {} ****".format(lr))
print('{:8.8s}\t{:8.8s}\t{:10.10s}'.format('n', 'emp loss', 'since last'))
classweights = { k: lr for k, _ in Counter(train.target).items() }
cls = SGDClassifier(loss='log', class_weight=classweights, shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
blocksize = 32
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = np.array([ np.outer(t, np.append(t, [1])).ravel() for z in ind for t in ( train.data[z], ) ])
actual = [ train.target[z] for z in ind ]
if n > 0:
pred = cls.predict(v)
for p, a in zip(pred, actual):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f}\t{:<10.3f}'.format(loss.n, loss.mean(), sincelast.mean()), flush=True)
sincelast = OnlineDRO.EasyAcc()
cls.partial_fit(v, actual, classes=classes)
print('{:<8d}\t{:<8.3f}\t{:<10.3f}'.format(loss.n, loss.mean(), sincelast.mean()), flush=True)
sincelast = OnlineDRO.EasyAcc()
preds = cls.predict(np.array([np.outer(d, np.append(d, [1])).ravel() for d in test.data]))
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
OnlineDRO.flass()
```
### Partial Information Online Learning, Softmax Logging Policy
Uniform $(\tau = 0)$ and softmax $(\tau = 4)$ are pretty similar for off-policy learning but uniform has larger regret.
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
def flass():
from scipy.special import softmax
from sklearn.datasets import fetch_covtype
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from math import ceil
import numpy as np
cov = fetch_covtype()
cov.data = PCA(whiten=True).fit_transform(cov.data)
classes = np.unique(cov.target - 1)
ndata = len(cov.target)
order = np.random.RandomState(seed=42).permutation(ndata)
ntrain = ceil(0.2 * ndata)
Object = lambda **kwargs: type("Object", (), kwargs)()
train = Object(data = cov.data[order[:ntrain]], target = cov.target[order[:ntrain]] - 1)
test = Object(data = cov.data[order[ntrain:]], target = cov.target[order[ntrain:]] - 1)
blocksize = 32
for lr, tau in ( (x, y) for x in np.logspace(-2.5, -2, 1) for y in (0, 4, ) ):
print("*** lr = {} tau = {} ***".format(lr, tau), flush=True)
print('{:8.8s}\t{:8.8s}\t{:10.10s}\t{:8.8s}\t{:10.10s}'.format(
'n', 'emp loss', 'since last', 'log pv', 'since last')
)
cls = SGDClassifier(loss='log', shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
logpv = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
loggerrand = np.random.RandomState(seed=2112)
logchoices = [None]*len(train.data)
pchoices = [None]*len(train.data)
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = np.array([ np.outer(t, np.append(t, [1])).ravel() for z in ind for t in ( train.data[z], ) ])
if n == 0 and pno == 0:
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, size=1)
logchoices[z] = choice[0]
pchoices[z] = 1.0 / len(classes)
else:
predlogp = cls.predict_proba(v)
soft = softmax(tau * predlogp, axis=1)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, p=soft[i,:], size=1)
logchoices[z] = choice[0]
pchoices[z] = soft[i, choice[0]]
pred = cls.predict(v)
actual = [ train.target[z] for z in ind ]
for i, (p, a) in enumerate(zip(pred, actual)):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
logpv += soft[i, a]
logpvsl += soft[i, a]
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
x = np.array([ v[i] for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
y = np.array([ logchoices[z] for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
w = np.array([ (lr / len(classes)) * (1 / pchoices[z])
for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
if np.any(x):
cls.partial_fit(x, y, classes=classes, sample_weight=w)
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
preds = cls.predict(np.array([np.outer(d, np.append(d, [1])).ravel() for d in test.data]))
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
OnlineDRO.flass()
```
### Bound Online Learning
Either MLE, lower bound, or upper bound. Some count decay seems better than no count decay. Upper bound seems to want less count decay than the MLE or lower bound. All forms of bound learning have lower regret than IPS learning.
TODO: "delayed batch" online learning.
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
class OnlineCressieReadLB:
from math import inf
def __init__(self, alpha, gamma=1, wmin=0, wmax=inf):
import numpy as np
self.alpha = alpha
self.gamma = gamma
self.n = 0
self.sumw = 0
self.sumwsq = 0
self.sumwr = 0
self.sumwsqr = 0
self.sumwsqrsq = 0
self.wmin = wmin
self.wmax = wmax
self.duals = None
self.mleduals = None
def update(self, c, w, r):
if c > 0:
assert w + 1e-6 >= self.wmin and w <= self.wmax + 1e-6, 'w = {} < {} < {}'.format(self.wmin, w, self.wmax)
assert r >= 0 and r <= 1, 'r = {}'.format(r)
decay = self.gamma ** c
self.n = decay * self.n + c
self.sumw = decay * self.sumw + c * w
self.sumwsq = decay * self.sumwsq + c * w**2
self.sumwr = decay * self.sumwr + c * w * r
self.sumwsqr = decay * self.sumwsqr + c * (w**2) * r
self.sumwsqrsq = decay * self.sumwsqrsq + c * (w**2) * (r**2)
self.duals = None
self.mleduals = None
return self
def recomputeduals(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.duals = CrMinusTwo.intervalimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, self.sumwsqrsq,
self.wmin, self.wmax, self.alpha, raiseonerr=True)
def recomputedualsmle(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.mleduals = CrMinusTwo.estimateimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, None, None,
self.wmin, self.wmax, raiseonerr=True)
def qlb(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][0]['qfunc'](c, w, r) if self.duals[1][0] is not None else 1
def qub(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][1]['qfunc'](c, w, r) if self.duals[1][1] is not None else 1
def qmle(self, c, w, r):
if self.mleduals is None:
self.recomputedualsmle()
assert self.mleduals is not None
return self.mleduals[1]['qfunc'](c, w, r) if self.mleduals[1] is not None else 1
def flass():
from scipy.special import softmax
from sklearn.datasets import fetch_covtype
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from math import ceil, exp
import numpy as np
cov = fetch_covtype()
cov.data = PCA(whiten=True).fit_transform(cov.data)
classes = np.unique(cov.target - 1)
ndata = len(cov.target)
order = np.random.RandomState(seed=42).permutation(ndata)
ntrain = ceil(0.2 * ndata)
Object = lambda **kwargs: type("Object", (), kwargs)()
train = Object(data = cov.data[order[:ntrain]], target = cov.target[order[:ntrain]] - 1)
test = Object(data = cov.data[order[ntrain:]], target = cov.target[order[ntrain:]] - 1)
blocksize = 32
for lr, tau, what, gamma in ( (x, y, z, g) for x in np.logspace(-2.5, -2, 1) for y in (4, )
for z in ('mle', 'ub', 'lb') for g in (1, 0.9999, 0.999, 0.99, ) ):
print("*** lr = {} tau = {} what = {} gamma = {} ***".format(lr, tau, what, gamma), flush=True)
print('{:8.8s}\t{:8.8s}\t{:10.10s}\t{:8.8s}\t{:10.10s}\t{:8.8s}\t{:10.10s}'.format(
'n', 'eff n', 'since last', 'emp loss', 'since last', 'log pv', 'since last')
)
cls = SGDClassifier(loss='log', shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
logpv = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effn = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
loggerrand = np.random.RandomState(seed=2112)
logchoices = [None]*len(train.data)
pchoices = [None]*len(train.data)
ocrl = OnlineDRO.OnlineCressieReadLB(alpha=0.05,
gamma=gamma,
wmin=0,
wmax=exp(tau) + len(classes) - 1
)
qfunc = ocrl.qmle if what == 'mle' else ocrl.qlb if what == 'lb' else ocrl.qub
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = np.array([ np.outer(t, np.append(t, [1])).ravel() for z in ind for t in ( train.data[z], ) ])
if n == 0 and pno == 0:
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, size=1)
logchoices[z] = choice[0]
pchoices[z] = 1.0 / len(classes)
else:
predlogp = cls.predict_proba(v)
soft = softmax(tau * predlogp, axis=1)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, p=soft[i,:], size=1)
logchoices[z] = choice[0]
pchoices[z] = soft[i, choice[0]]
pred = cls.predict(v)
actual = [ train.target[z] for z in ind ]
for i, (p, a) in enumerate(zip(pred, actual)):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
logpv += soft[i, a]
logpvsl += soft[i, a]
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, effn.mean(), effnsl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
for i, z in enumerate(ind):
r = 1 if logchoices[z] == train.target[z] else 0
w = 1 / pchoices[z]
ocrl.update(1, w, r)
if n == 0 and pno == 0:
sampweight = np.array([ lr for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
else:
sampweight = np.array([ lr * w * ocrl.n
* max(0, qfunc(1, w, 1))
for i, z in enumerate(ind)
if logchoices[z] == train.target[z]
for w in (1 / pchoices[z],)# if logchoices[z] == pred[i] else 0,)
])
effn += sampweight.sum() / (lr * blocksize)
effnsl += sampweight.sum() / (lr * blocksize)
x = np.array([ v[i] for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
y = np.array([ logchoices[z] for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
if np.any(x):
cls.partial_fit(x, y, classes=classes, sample_weight=sampweight)
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, effn.mean(), effnsl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
preds = cls.predict(np.array([np.outer(d, np.append(d, [1])).ravel() for d in test.data]))
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
OnlineDRO.flass()
```
### Delayed Batch Bound Online Learning
Process larger batches as sets of smaller batches to emulate time delay in policy updates without changing optimization properties (i.e., SGD batch size).
Everything still seems to work.
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
class OnlineCressieReadLB:
from math import inf
def __init__(self, alpha, gamma=1, wmin=0, wmax=inf):
import numpy as np
self.alpha = alpha
self.gamma = gamma
self.n = 0
self.sumw = 0
self.sumwsq = 0
self.sumwr = 0
self.sumwsqr = 0
self.sumwsqrsq = 0
self.wmin = wmin
self.wmax = wmax
self.duals = None
self.mleduals = None
def update(self, c, w, r):
if c > 0:
assert w + 1e-6 >= self.wmin and w <= self.wmax + 1e-6, 'w = {} < {} < {}'.format(self.wmin, w, self.wmax)
assert r >= 0 and r <= 1, 'r = {}'.format(r)
decay = self.gamma ** c
self.n = decay * self.n + c
self.sumw = decay * self.sumw + c * w
self.sumwsq = decay * self.sumwsq + c * w**2
self.sumwr = decay * self.sumwr + c * w * r
self.sumwsqr = decay * self.sumwsqr + c * (w**2) * r
self.sumwsqrsq = decay * self.sumwsqrsq + c * (w**2) * (r**2)
self.duals = None
self.mleduals = None
return self
def recomputeduals(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.duals = CrMinusTwo.intervalimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, self.sumwsqrsq,
self.wmin, self.wmax, self.alpha, raiseonerr=True)
def recomputedualsmle(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.mleduals = CrMinusTwo.estimateimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, None, None,
self.wmin, self.wmax, raiseonerr=True)
def qlb(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][0]['qfunc'](c, w, r) if self.duals[1][0] is not None else 1
def qub(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][1]['qfunc'](c, w, r) if self.duals[1][1] is not None else 1
def qmle(self, c, w, r):
if self.mleduals is None:
self.recomputedualsmle()
assert self.mleduals is not None
return self.mleduals[1]['qfunc'](c, w, r) if self.mleduals[1] is not None else 1
def flass():
from scipy.special import softmax
from sklearn.datasets import fetch_covtype
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from math import ceil, exp
import numpy as np
cov = fetch_covtype()
cov.data = PCA(whiten=True).fit_transform(cov.data)
classes = np.unique(cov.target - 1)
ndata = len(cov.target)
order = np.random.RandomState(seed=42).permutation(ndata)
ntrain = ceil(0.2 * ndata)
Object = lambda **kwargs: type("Object", (), kwargs)()
train = Object(data = cov.data[order[:ntrain]], target = cov.target[order[:ntrain]] - 1)
test = Object(data = cov.data[order[ntrain:]], target = cov.target[order[ntrain:]] - 1)
subblocksize = 32
delay = 8
blocksize = delay * subblocksize
for lr, tau, what, gamma in ( (x, y, z, g) for x in np.logspace(-2.5, -2, 1) for y in (4, )
for z in ('mle', 'ub', 'lb') for g in (1, 0.9999, 0.999, 0.99,) ):
print("*** lr = {} tau = {} what = {} gamma = {} ***".format(lr, tau, what, gamma), flush=True)
print('{:8.8s}\t{:8.8s}\t{:10.10s}\t{:8.8s}\t{:10.10s}\t{:8.8s}\t{:10.10s}'.format(
'n', 'eff n', 'since last', 'emp loss', 'since last', 'log pv', 'since last')
)
cls = SGDClassifier(loss='log', shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
logpv = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effn = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
loggerrand = np.random.RandomState(seed=2112)
logchoices = [None]*len(train.data)
pchoices = [None]*len(train.data)
ocrl = OnlineDRO.OnlineCressieReadLB(alpha=0.05,
gamma=gamma,
wmin=0,
wmax=exp(tau) + len(classes) - 1
)
qfunc = ocrl.qmle if what == 'mle' else ocrl.qlb if what == 'lb' else ocrl.qub
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = np.array([ np.outer(t, np.append(t, [1])).ravel() for z in ind for t in ( train.data[z], ) ])
if n == 0 and pno == 0:
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, size=1)
logchoices[z] = choice[0]
pchoices[z] = 1.0 / len(classes)
else:
predlogp = cls.predict_proba(v)
soft = softmax(tau * predlogp, axis=1)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, p=soft[i,:], size=1)
logchoices[z] = choice[0]
pchoices[z] = soft[i, choice[0]]
pred = cls.predict(v)
actual = [ train.target[z] for z in ind ]
for i, (p, a) in enumerate(zip(pred, actual)):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
logpv += soft[i, a]
logpvsl += soft[i, a]
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, effn.mean(), effnsl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
for i, z in enumerate(ind):
r = 1 if logchoices[z] == train.target[z] else 0
w = 1 / pchoices[z]
ocrl.update(1, w, r)
for d in range(delay):
x = np.array([ v[i]
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
y = np.array([ logchoices[z]
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
if n == 0 and pno == 0:
sampweight = np.array([ lr
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
else:
sampweight = np.array([ lr * w * ocrl.n
* max(0, qfunc(1, w, 1))
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z]
for w in (1 / pchoices[z],)
])
effn += sampweight.sum() / (lr * subblocksize)
effnsl += sampweight.sum() / (lr * subblocksize)
if np.any(x):
cls.partial_fit(x, y, classes=classes, sample_weight=sampweight)
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, effn.mean(), effnsl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
# from pprint import pformat
# print(pformat(ocrl.__dict__))
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
preds = cls.predict(np.array([np.outer(d, np.append(d, [1])).ravel() for d in test.data]))
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
OnlineDRO.flass()
```
### Auto-Temperature + Delayed Batch Bound Online Learning
Best regret achieved with lower bound optimization.
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
class OnlineCressieReadLB:
from math import inf
def __init__(self, alpha, gamma=1, wmin=0, wmax=inf):
import numpy as np
self.alpha = alpha
self.gamma = gamma
self.n = 0
self.sumw = 0
self.sumwsq = 0
self.sumwr = 0
self.sumwsqr = 0
self.sumwsqrsq = 0
self.wmin = wmin
self.wmax = wmax
self.duals = None
self.mleduals = None
def update(self, c, w, r):
if c > 0:
assert w + 1e-6 >= self.wmin and w <= self.wmax + 1e-6, 'w = {} < {} < {}'.format(self.wmin, w, self.wmax)
assert r >= 0 and r <= 1, 'r = {}'.format(r)
decay = self.gamma ** c
self.n = decay * self.n + c
self.sumw = decay * self.sumw + c * w
self.sumwsq = decay * self.sumwsq + c * w**2
self.sumwr = decay * self.sumwr + c * w * r
self.sumwsqr = decay * self.sumwsqr + c * (w**2) * r
self.sumwsqrsq = decay * self.sumwsqrsq + c * (w**2) * (r**2)
self.duals = None
self.mleduals = None
return self
def recomputeduals(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.duals = CrMinusTwo.intervalimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, self.sumwsqrsq,
self.wmin, self.wmax, self.alpha, raiseonerr=True)
def recomputedualsmle(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.mleduals = CrMinusTwo.estimateimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, None, None,
self.wmin, self.wmax, raiseonerr=True)
def qlb(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][0]['qfunc'](c, w, r) if self.duals[1][0] is not None else 1
def qub(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][1]['qfunc'](c, w, r) if self.duals[1][1] is not None else 1
def qmle(self, c, w, r):
if self.mleduals is None:
self.recomputedualsmle()
assert self.mleduals is not None
return self.mleduals[1]['qfunc'](c, w, r) if self.mleduals[1] is not None else 1
def autotune(pre, target, taumax):
from scipy.optimize import root_scalar
def f(tau):
from scipy.special import softmax
import numpy as np
soft = softmax(tau * pre, axis=1)
minsoft = np.min(soft, axis=1)
return np.mean(minsoft) - target
fmax = f(taumax)
if fmax >= 0:
return taumax
taumin = 0
fmin = f(taumin)
assert fmin > 0, { 'fmin': fmin, 'target': target }
root = root_scalar(f, bracket=(taumin, taumax))
assert root.converged, root
return root.root
def flass():
from scipy.special import softmax
from sklearn.datasets import fetch_covtype
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from math import ceil, exp
import numpy as np
cov = fetch_covtype()
cov.data = PCA(whiten=True).fit_transform(cov.data)
classes = np.unique(cov.target - 1)
ndata = len(cov.target)
order = np.random.RandomState(seed=42).permutation(ndata)
ntrain = ceil(0.2 * ndata)
Object = lambda **kwargs: type("Object", (), kwargs)()
train = Object(data = cov.data[order[:ntrain]], target = cov.target[order[:ntrain]] - 1)
test = Object(data = cov.data[order[ntrain:]], target = cov.target[order[ntrain:]] - 1)
subblocksize = 32
delay = 8
blocksize = delay * subblocksize
for lr, taumax, target, what, gamma in ( (x, 8, y, z, g) for x in np.logspace(-2.5, -2, 1)
for z in ('mle', 'ub', 'lb') for g in (0.9999, 0.999,) for y in (0.025, 0.05, 0.1, 0.2, ) ):
print("*** lr = {} taumax = {} target = {} what = {} gamma = {} ***".format(lr, taumax, target, what, gamma), flush=True)
print('{:8.8s}\t{:8.8s} [{:10.10s}]\t{:8.8s} [{:10.10s}]\t{:8.8s} [{:10.10s}]\t{:8.8s} [{:10.10s}]'.format(
'n', 'eff n', 'since last', 'av tau', 'since last', 'emp loss', 'since last', 'log pv', 'since last')
)
try:
target /= len(classes)
cls = SGDClassifier(loss='log', shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
logpv = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effn = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
avtau = OnlineDRO.EasyAcc()
avtausl = OnlineDRO.EasyAcc()
loggerrand = np.random.RandomState(seed=2112)
logchoices = [None]*len(train.data)
pchoices = [None]*len(train.data)
ocrl = OnlineDRO.OnlineCressieReadLB(alpha=0.05,
gamma=gamma,
wmin=0,
# wmax=exp(taumax) + len(classes) - 1
)
qfunc = ocrl.qmle if what == 'mle' else ocrl.qlb if what == 'lb' else ocrl.qub
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = np.array([ np.outer(t, np.append(t, [1])).ravel() for z in ind for t in ( train.data[z], ) ])
if n == 0 and pno == 0:
pred = np.zeros(blocksize)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, size=1)
logchoices[z] = choice[0]
pchoices[z] = 1.0 / len(classes)
else:
predlogp = cls.predict_proba(v)
tau = OnlineDRO.autotune(predlogp, target, taumax)
avtau += tau
avtausl += tau
soft = softmax(tau * predlogp, axis=1)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, p=soft[i,:], size=1)
logchoices[z] = choice[0]
pchoices[z] = soft[i, choice[0]]
pred = cls.predict(v)
actual = [ train.target[z] for z in ind ]
for i, (p, a) in enumerate(zip(pred, actual)):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
logpv += soft[i, a]
logpvsl += soft[i, a]
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]'.format(
loss.n, effn.mean(), effnsl.mean(), avtau.mean(), avtausl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
avtausl = OnlineDRO.EasyAcc()
for i, z in enumerate(ind):
r = 1 if logchoices[z] == train.target[z] else 0
w = 1 / pchoices[z] if pred[i] == logchoices[z] else 0
ocrl.update(1, w, r)
for d in range(delay):
x = np.array([ v[i]
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
y = np.array([ logchoices[z]
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
if n == 0 and pno == 0:
sampweight = np.array([ lr
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
else:
sampweight = np.array([ lr * w * ocrl.n
* max(0, qfunc(1, w, 1))
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z]
for w in (1 / pchoices[z],)
])
effn += sampweight.sum() / (lr * subblocksize)
effnsl += sampweight.sum() / (lr * subblocksize)
if np.any(x):
cls.partial_fit(x, y, classes=classes, sample_weight=sampweight)
print('{:<8d}\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]'.format(
loss.n, effn.mean(), effnsl.mean(), avtau.mean(), avtausl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
avtausl = OnlineDRO.EasyAcc()
preds = cls.predict(np.array([np.outer(d, np.append(d, [1])).ravel() for d in test.data]))
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
except KeyboardInterrupt:
raise
except:
pass
OnlineDRO.flass()
```
# 20 Newsgroups
Really hard ... 20 actions and only 22K examples.
### Full Information Online Learning
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
def flass():
from scipy.sparse import vstack
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
import numpy as np
train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
classes = np.unique(train.target)
test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
for ngram, norm, lr, nfeat in ( (2, 'l2', 1, 20), ):
print("**** ngram = {} norm = {} lr = {} nfeat = {} ****".format(ngram, norm, lr, nfeat))
print('{:8.8s}\t{:8.8s}\t{:10.10s}'.format('n', 'emp loss', 'since last'))
vectorizer = HashingVectorizer(n_features = 1 << nfeat, norm=norm, ngram_range=(1, ngram), alternate_sign=True)
docs = vectorizer.transform(train.data)
testdocs = vectorizer.transform(test.data)
classweights = { k: lr for k in classes }
cls = SGDClassifier(loss='log', class_weight=classweights, shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
blocksize = 32
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = vstack([ docs[z] for z in ind ])
actual = [ train.target[z] for z in ind ]
if n > 0:
pred = cls.predict(v)
for p, a in zip(pred, actual):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f}\t{:<10.3f}'.format(loss.n, loss.mean(), sincelast.mean()), flush=True)
sincelast = OnlineDRO.EasyAcc()
cls.partial_fit(v, actual, classes=classes)
print('{:<8d}\t{:<8.3f}\t{:<10.3f}'.format(loss.n, loss.mean(), sincelast.mean()), flush=True)
sincelast = OnlineDRO.EasyAcc()
preds = cls.predict(testdocs)
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
OnlineDRO.flass()
```
### Partial Information Online Learning, Softmax Logging Policy
Uniform $(\tau = 0)$ and softmax $(\tau = 4)$ are pretty similar for off-policy learning but uniform has larger regret.
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
def flass():
from scipy.sparse import vstack
from scipy.special import softmax
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
import numpy as np
train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
classes = np.unique(train.target)
test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
blocksize = 32
for ngram, norm, lr, nfeat, tau in ( (2, 'l2', z, 20, x) for x in (0, 4,) for z in (0.5, ) ):
print("**** ngram = {} norm = {} lr = {} nfeat = {} tau = {} ****".format(ngram, norm, lr, nfeat, tau))
print('{:8.8s}\t{:8.8s}\t{:10.10s}\t{:8.8s}\t{:10.10s}'.format(
'n', 'emp loss', 'since last', 'log pv', 'since last')
)
vectorizer = HashingVectorizer(n_features = 1 << nfeat, norm=norm, ngram_range=(1, ngram), alternate_sign=True)
docs = vectorizer.transform(train.data)
testdocs = vectorizer.transform(test.data)
classweights = { k: lr for k in classes }
cls = SGDClassifier(loss='log', class_weight=classweights, shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
logpv = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
loggerrand = np.random.RandomState(seed=2112)
logchoices = [None]*len(train.data)
pchoices = [None]*len(train.data)
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = vstack([ docs[z] for z in ind ])
if n == 0 and pno == 0:
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, size=1)
logchoices[z] = choice[0]
pchoices[z] = 1.0 / len(classes)
else:
predlogp = cls.predict_proba(v)
soft = softmax(tau * predlogp, axis=1)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, p=soft[i,:], size=1)
logchoices[z] = choice[0]
pchoices[z] = soft[i, choice[0]]
pred = cls.predict(v)
actual = [ train.target[z] for z in ind ]
for i, (p, a) in enumerate(zip(pred, actual)):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
logpv += soft[i, a]
logpvsl += soft[i, a]
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
# y = np.array([ logchoices[z] for i, z in enumerate(ind) ])
# w = np.array([ (lr / len(classes)) * (1 / pchoices[z])
# if logchoices[z] == train.target[z]
# else -(lr / len(classes)**2) * (1/pchoices[z])
# for i, z in enumerate(ind) ])
y = np.array([ logchoices[z] for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
w = np.array([ (lr / len(classes)) * (1 / pchoices[z])
for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
if np.any(y):
x = vstack([ v[i] for i, z in enumerate(ind) if logchoices[z] == train.target[z] ])
# x = vstack([ v[i] for i, z in enumerate(ind) ])
cls.partial_fit(x, y, classes=classes, sample_weight=w)
print('{:<8d}\t{:<8.3f}\t{:<10.3f}\t{:<8.3f}\t{:<10.3f}'.format(
loss.n, loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
preds = cls.predict(testdocs)
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
OnlineDRO.flass()
```
### Auto-Temperature and Bound Optimization
Nothing working great, but regret is better than other approaches.
```
class OnlineDRO:
class EasyAcc:
def __init__(self):
self.n = 0
self.sum = 0
def __iadd__(self, other):
self.n += 1
self.sum += other
return self
def mean(self):
return self.sum / max(self.n, 1)
class OnlineCressieReadLB:
from math import inf
def __init__(self, alpha, gamma=1, wmin=0, wmax=inf):
import numpy as np
self.alpha = alpha
self.gamma = gamma
self.n = 0
self.sumw = 0
self.sumwsq = 0
self.sumwr = 0
self.sumwsqr = 0
self.sumwsqrsq = 0
self.wmin = wmin
self.wmax = wmax
self.duals = None
self.mleduals = None
def update(self, c, w, r):
if c > 0:
assert w + 1e-6 >= self.wmin and w <= self.wmax + 1e-6, 'w = {} < {} < {}'.format(self.wmin, w, self.wmax)
assert r >= 0 and r <= 1, 'r = {}'.format(r)
decay = self.gamma ** c
self.n = decay * self.n + c
self.sumw = decay * self.sumw + c * w
self.sumwsq = decay * self.sumwsq + c * w**2
self.sumwr = decay * self.sumwr + c * w * r
self.sumwsqr = decay * self.sumwsqr + c * (w**2) * r
self.sumwsqrsq = decay * self.sumwsqrsq + c * (w**2) * (r**2)
self.duals = None
self.mleduals = None
return self
def recomputeduals(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.duals = CrMinusTwo.intervalimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, self.sumwsqrsq,
self.wmin, self.wmax, self.alpha, raiseonerr=True)
def recomputedualsmle(self):
from MLE.MLE import CrMinusTwo as CrMinusTwo
self.mleduals = CrMinusTwo.estimateimpl(self.n, self.sumw, self.sumwsq,
self.sumwr, self.sumwsqr, None, None,
self.wmin, self.wmax, raiseonerr=True)
def qlb(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][0]['qfunc'](c, w, r) if self.duals[1][0] is not None else 1
def qub(self, c, w, r):
if self.duals is None:
self.recomputeduals()
assert self.duals is not None
return self.duals[1][1]['qfunc'](c, w, r) if self.duals[1][1] is not None else 1
def qmle(self, c, w, r):
if self.mleduals is None:
self.recomputedualsmle()
assert self.mleduals is not None
return self.mleduals[1]['qfunc'](c, w, r) if self.mleduals[1] is not None else 1
def autotune(pre, target, taumax):
from scipy.optimize import root_scalar
def f(tau):
from scipy.special import softmax
import numpy as np
soft = softmax(tau * pre, axis=1)
minsoft = np.min(soft, axis=1)
return np.mean(minsoft) - target
fmax = f(taumax)
if fmax >= 0:
return taumax
taumin = 0
fmin = f(taumin)
assert fmin > 0, { 'fmin': fmin, 'target': target }
root = root_scalar(f, bracket=(taumin, taumax))
assert root.converged, root
return root.root
def flass():
from scipy.sparse import vstack
from scipy.special import softmax
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from math import exp
import numpy as np
train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
classes = np.unique(train.target)
test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
subblocksize = 32
delay = 8
blocksize = delay * subblocksize
for lr, taumax, target, what, gamma in ( (x, 8, y, z, g) for x in (0.5,)
for z in ('mle', 'ub', 'lb')
for g in (0.9999, 0.999,)
for y in (0.025, 0.05, 0.1, 0.2, ) ):
ngram = 2
norm = 'l2'
nfeat = 20
print("*** lr = {} taumax = {} target = {} what = {} gamma = {} ***".format(lr, taumax, target, what, gamma), flush=True)
print('{:8.8s}\t{:8.8s} [{:10.10s}]\t{:8.8s} [{:10.10s}]\t{:8.8s} [{:10.10s}]\t{:8.8s} [{:10.10s}]'.format(
'n', 'eff n', 'since last', 'av tau', 'since last', 'emp loss', 'since last', 'log pv', 'since last')
)
try:
target /= len(classes)
vectorizer = HashingVectorizer(n_features = 1 << nfeat, norm=norm, ngram_range=(1, ngram), alternate_sign=True)
docs = vectorizer.transform(train.data)
testdocs = vectorizer.transform(test.data)
classweights = { k: lr for k in classes }
cls = SGDClassifier(loss='log', class_weight=classweights, shuffle=False)
loss = OnlineDRO.EasyAcc()
sincelast = OnlineDRO.EasyAcc()
logpv = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effn = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
avtau = OnlineDRO.EasyAcc()
avtausl = OnlineDRO.EasyAcc()
loggerrand = np.random.RandomState(seed=2112)
logchoices = [None]*len(train.data)
pchoices = [None]*len(train.data)
ocrl = OnlineDRO.OnlineCressieReadLB(alpha=0.05,
gamma=gamma,
wmin=0,
wmax=exp(taumax) + len(classes) - 1
)
qfunc = ocrl.qmle if what == 'mle' else ocrl.qlb if what == 'lb' else ocrl.qub
for pno in range(1):
order = np.random.RandomState(seed=42+pno).permutation(len(train.data))
for n, ind in enumerate(zip(*(iter(order),)*blocksize)):
v = vstack([ docs[z] for z in ind ])
if n == 0 and pno == 0:
pred = np.zeros(blocksize)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, size=1)
logchoices[z] = choice[0]
pchoices[z] = 1.0 / len(classes)
else:
predlogp = cls.predict_proba(v)
tau = OnlineDRO.autotune(predlogp, target, taumax)
avtau += tau
avtausl += tau
soft = softmax(tau * predlogp, axis=1)
for i, z in enumerate(ind):
if logchoices[z] is None:
choice = loggerrand.choice(a=classes, p=soft[i,:], size=1)
logchoices[z] = choice[0]
pchoices[z] = soft[i, choice[0]]
pred = cls.predict(v)
actual = [ train.target[z] for z in ind ]
for i, (p, a) in enumerate(zip(pred, actual)):
loss += 0 if p == a else 1
sincelast += 0 if p == a else 1
logpv += soft[i, a]
logpvsl += soft[i, a]
if (n & (n - 1) == 0): # and n & 0xAAAAAAAA == 0):
print('{:<8d}\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]'.format(
loss.n, effn.mean(), effnsl.mean(), avtau.mean(), avtausl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
avtausl = OnlineDRO.EasyAcc()
for i, z in enumerate(ind):
r = 1 if logchoices[z] == train.target[z] else 0
w = 1 / pchoices[z] if pred[i] == logchoices[z] else 0
ocrl.update(1, w, r)
for d in range(delay):
y = np.array([ logchoices[z]
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
if n == 0 and pno == 0:
sampweight = np.array([ lr
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
else:
sampweight = np.array([ lr * w * ocrl.n
* max(0, qfunc(1, w, 1))
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z]
for w in (1 / pchoices[z],)
])
effn += sampweight.sum() / (lr * subblocksize)
effnsl += sampweight.sum() / (lr * subblocksize)
if np.any(y):
x = vstack([ v[i]
for i, z in enumerate(ind)
if (d-1)*subblocksize <= i and i < d*subblocksize
if logchoices[z] == train.target[z] ])
cls.partial_fit(x, y, classes=classes, sample_weight=sampweight)
print('{:<8d}\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]\t{:<8.3f} [{:<10.3f}]'.format(
loss.n, effn.mean(), effnsl.mean(), avtau.mean(), avtausl.mean(), loss.mean(), sincelast.mean(), logpv.mean(), logpvsl.mean()),
flush=True)
sincelast = OnlineDRO.EasyAcc()
logpvsl = OnlineDRO.EasyAcc()
effnsl = OnlineDRO.EasyAcc()
avtausl = OnlineDRO.EasyAcc()
preds = cls.predict(testdocs)
ascores = []
for b in range(16):
bootie = np.random.RandomState(90210+b).choice(len(test.target), replace=True, size=len(test.target))
ascores.append(accuracy_score(y_true=test.target[bootie], y_pred=preds[bootie]))
print("test accuracy: {}".format(np.quantile(ascores, [0.05, 0.5, 0.95])))
except KeyboardInterrupt:
raise
except:
pass
OnlineDRO.flass()
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
# Classify Flowers with Transfer Learning
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/image_feature_vector"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/image_feature_vector.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/image_feature_vector.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/image_feature_vector.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
<td>
<a href="https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a>
</td>
</table>
Have you ever seen a beautiful flower and wondered what kind of flower it is? Well, you're not the first, so let's build a way to identify the type of flower from a photo!
For classifying images, a particular type of *deep neural network*, called a *convolutional neural network* has proved to be particularly powerful. However, modern convolutional neural networks have millions of parameters. Training them from scratch requires a lot of labeled training data and a lot of computing power (hundreds of GPU-hours or more). We only have about three thousand labeled photos and want to spend much less time, so we need to be more clever.
We will use a technique called *transfer learning* where we take a pre-trained network (trained on about a million general images), use it to extract features, and train a new layer on top for our own task of classifying images of flowers.
## Setup
```
import collections
import io
import math
import os
import random
from six.moves import urllib
from IPython.display import clear_output, Image, display, HTML
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_hub as hub
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as sk_metrics
import time
```
## The flowers dataset
The flowers dataset consists of images of flowers with 5 possible class labels.
When training a machine learning model, we split our data into training and test datasets. We will train the model on our training data and then evaluate how well the model performs on data it has never seen - the test set.
Let's download our training and test examples (it may take a while) and split them into train and test sets.
Run the following two cells:
```
FLOWERS_DIR = './flower_photos'
TRAIN_FRACTION = 0.8
RANDOM_SEED = 2018
def download_images():
"""If the images aren't already downloaded, save them to FLOWERS_DIR."""
if not os.path.exists(FLOWERS_DIR):
DOWNLOAD_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
print('Downloading flower images from %s...' % DOWNLOAD_URL)
urllib.request.urlretrieve(DOWNLOAD_URL, 'flower_photos.tgz')
!tar xfz flower_photos.tgz
print('Flower photos are located in %s' % FLOWERS_DIR)
def make_train_and_test_sets():
"""Split the data into train and test sets and get the label classes."""
train_examples, test_examples = [], []
shuffler = random.Random(RANDOM_SEED)
is_root = True
for (dirname, subdirs, filenames) in tf.gfile.Walk(FLOWERS_DIR):
# The root directory gives us the classes
if is_root:
subdirs = sorted(subdirs)
classes = collections.OrderedDict(enumerate(subdirs))
label_to_class = dict([(x, i) for i, x in enumerate(subdirs)])
is_root = False
# The sub directories give us the image files for training.
else:
filenames.sort()
shuffler.shuffle(filenames)
full_filenames = [os.path.join(dirname, f) for f in filenames]
label = dirname.split('/')[-1]
label_class = label_to_class[label]
# An example is the image file and it's label class.
examples = list(zip(full_filenames, [label_class] * len(filenames)))
num_train = int(len(filenames) * TRAIN_FRACTION)
train_examples.extend(examples[:num_train])
test_examples.extend(examples[num_train:])
shuffler.shuffle(train_examples)
shuffler.shuffle(test_examples)
return train_examples, test_examples, classes
# Download the images and split the images into train and test sets.
download_images()
TRAIN_EXAMPLES, TEST_EXAMPLES, CLASSES = make_train_and_test_sets()
NUM_CLASSES = len(CLASSES)
print('\nThe dataset has %d label classes: %s' % (NUM_CLASSES, CLASSES.values()))
print('There are %d training images' % len(TRAIN_EXAMPLES))
print('there are %d test images' % len(TEST_EXAMPLES))
```
## Explore the data
The flowers dataset consists of examples which are labeled images of flowers. Each example contains a JPEG flower image and the class label: what type of flower it is. Let's display a few images together with their labels.
```
#@title Show some labeled images
def get_label(example):
"""Get the label (number) for given example."""
return example[1]
def get_class(example):
"""Get the class (string) of given example."""
return CLASSES[get_label(example)]
def get_encoded_image(example):
"""Get the image data (encoded jpg) of given example."""
image_path = example[0]
return tf.gfile.GFile(image_path, 'rb').read()
def get_image(example):
"""Get image as np.array of pixels for given example."""
return plt.imread(io.BytesIO(get_encoded_image(example)), format='jpg')
def display_images(images_and_classes, cols=5):
"""Display given images and their labels in a grid."""
rows = int(math.ceil(len(images_and_classes) / cols))
fig = plt.figure()
fig.set_size_inches(cols * 3, rows * 3)
for i, (image, flower_class) in enumerate(images_and_classes):
plt.subplot(rows, cols, i + 1)
plt.axis('off')
plt.imshow(image)
plt.title(flower_class)
NUM_IMAGES = 15 #@param {type: 'integer'}
display_images([(get_image(example), get_class(example))
for example in TRAIN_EXAMPLES[:NUM_IMAGES]])
```
## Build the model
We will load a [TF-Hub](https://tensorflow.org/hub) image feature vector module, stack a linear classifier on it, and add training and evaluation ops. The following cell builds a TF graph describing the model and its training, but it doesn't run the training (that will be the next step).
```
LEARNING_RATE = 0.01
tf.reset_default_graph()
# Load a pre-trained TF-Hub module for extracting features from images. We've
# chosen this particular module for speed, but many other choices are available.
image_module = hub.Module('https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/2')
# Preprocessing images into tensors with size expected by the image module.
encoded_images = tf.placeholder(tf.string, shape=[None])
image_size = hub.get_expected_image_size(image_module)
def decode_and_resize_image(encoded):
decoded = tf.image.decode_jpeg(encoded, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
return tf.image.resize_images(decoded, image_size)
batch_images = tf.map_fn(decode_and_resize_image, encoded_images, dtype=tf.float32)
# The image module can be applied as a function to extract feature vectors for a
# batch of images.
features = image_module(batch_images)
def create_model(features):
"""Build a model for classification from extracted features."""
# Currently, the model is just a single linear layer. You can try to add
# another layer, but be careful... two linear layers (when activation=None)
# are equivalent to a single linear layer. You can create a nonlinear layer
# like this:
# layer = tf.layers.dense(inputs=..., units=..., activation=tf.nn.relu)
layer = tf.layers.dense(inputs=features, units=NUM_CLASSES, activation=None)
return layer
# For each class (kind of flower), the model outputs some real number as a score
# how much the input resembles this class. This vector of numbers is often
# called the "logits".
logits = create_model(features)
labels = tf.placeholder(tf.float32, [None, NUM_CLASSES])
# Mathematically, a good way to measure how much the predicted probabilities
# diverge from the truth is the "cross-entropy" between the two probability
# distributions. For numerical stability, this is best done directly from the
# logits, not the probabilities extracted from them.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# Let's add an optimizer so we can train the network.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE)
train_op = optimizer.minimize(loss=cross_entropy_mean)
# The "softmax" function transforms the logits vector into a vector of
# probabilities: non-negative numbers that sum up to one, and the i-th number
# says how likely the input comes from class i.
probabilities = tf.nn.softmax(logits)
# We choose the highest one as the predicted class.
prediction = tf.argmax(probabilities, 1)
correct_prediction = tf.equal(prediction, tf.argmax(labels, 1))
# The accuracy will allow us to eval on our test set.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
```
## Train the network
Now that our model is built, let's train it and see how it perfoms on our test set.
```
# How long will we train the network (number of batches).
NUM_TRAIN_STEPS = 100 #@param {type: 'integer'}
# How many training examples we use in each step.
TRAIN_BATCH_SIZE = 10 #@param {type: 'integer'}
# How often to evaluate the model performance.
EVAL_EVERY = 10 #@param {type: 'integer'}
def get_batch(batch_size=None, test=False):
"""Get a random batch of examples."""
examples = TEST_EXAMPLES if test else TRAIN_EXAMPLES
batch_examples = random.sample(examples, batch_size) if batch_size else examples
return batch_examples
def get_images_and_labels(batch_examples):
images = [get_encoded_image(e) for e in batch_examples]
one_hot_labels = [get_label_one_hot(e) for e in batch_examples]
return images, one_hot_labels
def get_label_one_hot(example):
"""Get the one hot encoding vector for the example."""
one_hot_vector = np.zeros(NUM_CLASSES)
np.put(one_hot_vector, get_label(example), 1)
return one_hot_vector
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(NUM_TRAIN_STEPS):
# Get a random batch of training examples.
train_batch = get_batch(batch_size=TRAIN_BATCH_SIZE)
batch_images, batch_labels = get_images_and_labels(train_batch)
# Run the train_op to train the model.
train_loss, _, train_accuracy = sess.run(
[cross_entropy_mean, train_op, accuracy],
feed_dict={encoded_images: batch_images, labels: batch_labels})
is_final_step = (i == (NUM_TRAIN_STEPS - 1))
if i % EVAL_EVERY == 0 or is_final_step:
# Get a batch of test examples.
test_batch = get_batch(batch_size=None, test=True)
batch_images, batch_labels = get_images_and_labels(test_batch)
# Evaluate how well our model performs on the test set.
test_loss, test_accuracy, test_prediction, correct_predicate = sess.run(
[cross_entropy_mean, accuracy, prediction, correct_prediction],
feed_dict={encoded_images: batch_images, labels: batch_labels})
print('Test accuracy at step %s: %.2f%%' % (i, (test_accuracy * 100)))
def show_confusion_matrix(test_labels, predictions):
"""Compute confusion matrix and normalize."""
confusion = sk_metrics.confusion_matrix(
np.argmax(test_labels, axis=1), predictions)
confusion_normalized = confusion.astype("float") / confusion.sum(axis=1)
axis_labels = list(CLASSES.values())
ax = sns.heatmap(
confusion_normalized, xticklabels=axis_labels, yticklabels=axis_labels,
cmap='Blues', annot=True, fmt='.2f', square=True)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
show_confusion_matrix(batch_labels, test_prediction)
```
## Incorrect predictions
Let's a take a closer look at the test examples that our model got wrong.
- Are there any mislabeled examples in our test set?
- Is there any bad data in the test set - images that aren't actually pictures of flowers?
- Are there images where you can understand why the model made a mistake?
```
incorrect = [
(example, CLASSES[prediction])
for example, prediction, is_correct in zip(test_batch, test_prediction, correct_predicate)
if not is_correct
]
display_images(
[(get_image(example), "prediction: {0}\nlabel:{1}".format(incorrect_prediction, get_class(example)))
for (example, incorrect_prediction) in incorrect[:20]])
```
## Exercises: Improve the model!
We've trained a baseline model, now let's try to improve it to achieve better accuracy. (Remember that you'll need to re-run the cells when you make a change.)
### Exercise 1: Try a different image model.
With TF-Hub, trying a few different image models is simple. Just replace the `"https://tfhub.dev/google/imagenet/mobilenet_v2_050_128/feature_vector/2"` handle in the `hub.Module()` call with a handle of different module and rerun all the code. You can see all available image modules at [tfhub.dev](https://tfhub.dev/s?module-type=image-feature-vector).
A good choice might be one of the other [MobileNet V2 modules](https://tfhub.dev/s?module-type=image-feature-vector&network-architecture=mobilenet-v2). Many of the modules -- including the MobileNet modules -- were trained on the [ImageNet dataset](http://image-net.org/challenges/LSVRC/2012/index#task) which contains over 1 million images and 1000 classes. Choosing a network architecture provides a tradeoff between speed and classification accuracy: models like MobileNet or NASNet Mobile are fast and small, more traditional architectures like Inception and ResNet were designed for accuracy.
For the larger Inception V3 architecture, you can also explore the benefits of pre-training on a domain closer to your own task: it is also available as a [module trained on the iNaturalist dataset](https://tfhub.dev/google/inaturalist/inception_v3/feature_vector/1) of plants and animals.
### Exercise 2: Add a hidden layer.
Stack a hidden layer between extracted image features and the linear classifier (in function `create_model()` above). To create a non-linear hidden layer with e.g. 100 nodes, use [tf.layers.dense](https://www.tensorflow.org/api_docs/python/tf/compat/v1/layers/dense) with units set to 100 and activation set to `tf.nn.relu`. Does changing the size of the hidden layer affect the test accuracy? Does adding second hidden layer improve the accuracy?
### Exercise 3: Change hyperparameters.
Does increasing *number of training steps* improves final accuracy? Can you *change the learning rate* to make your model converge more quickly? Does the training *batch size* affect your model's performance?
### Exercise 4: Try a different optimizer.
Replace the basic GradientDescentOptimizer with a more sophisticate optimizer, e.g. [AdagradOptimizer](https://www.tensorflow.org/api_docs/python/tf/compat/v1/train/AdagradOptimizer). Does it make a difference to your model training? If you want to learn more about the benefits of different optimization algorithms, check out [this post](http://ruder.io/optimizing-gradient-descent/).
## Want to learn more?
If you are interested in a more advanced version of this tutorial, check out the [TensorFlow image retraining tutorial](https://www.tensorflow.org/hub/tutorials/image_retraining) which walks you through visualizing the training using TensorBoard, advanced techniques like dataset augmentation by distorting images, and replacing the flowers dataset to learn an image classifier on your own dataset.
You can learn more about TensorFlow at [tensorflow.org](http://tensorflow.org) and see the TF-Hub API documentation is available at [tensorflow.org/hub](https://www.tensorflow.org/hub/). Find available TensorFlow Hub modules at [tfhub.dev](http://tfhub.dev) including more image feature vector modules and text embedding modules.
Also check out the [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/) which is Google's fast-paced, practical introduction to machine learning.
| github_jupyter |
# DRF of CNS-data
```
%load_ext autoreload
%autoreload 2
%matplotlib notebook
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
from matplotlib.colors import LogNorm
from sklearn.utils import shuffle
from itertools import groupby
from matplotlib.figure import figaspect
# package developed for the analysis
from world_viewer.cns_world import CNSWorld
from world_viewer.glasses import Glasses
# DTU Data Wrapper
from sensible_raw.loaders import loader
```
## 1) Load and Prepare Data
```
# load data for analysis
cns = CNSWorld()
cns.load_world(opinions = ['fitness'], read_cached = False, stop=False, write_pickle = False, continous_op=False)
# load analysis tools
cns_glasses = Glasses(cns)
# remove not needed data in order to save mermory
cns.d_ij = None
# set analysis parameters
analysis = 'expo_frac'
opinion_type = "op_fitness"
binning = True
n_bins = 10
save_plots = True
show_plot = True
4.1_CopenhagenDataRelativeExposure.ipynb# load previously calculated exposure instead of recalculate it
exposure = pd.read_pickle("tmp/fitness_exposure_tx7.pkl")
# alternative: recalculate exposure value
# exposure = cns_glasses.calc_exposure("expo_frac", "op_fitness", exposure_time = 7)
# filter by degre
degree = exposure.groupby("node_id").n_nbs.mean().to_frame("avg").reset_index()
exposure = exposure.loc[degree.loc[degree.avg >= 4,"node_id"]]
exposure = exposure.loc[exposure.n_nbs_mean > 1/7]
# cut time series in time slices: spring + summer
exposure.reset_index(inplace=True)
start_spring = "2014-02-01"
end_spring = "2014-04-30"
exposure_spring = exposure.loc[(exposure.time >= pd.to_datetime(start_spring)) & (exposure.time <= pd.to_datetime(end_spring))].copy()
start_summer = "2014-07-01"
end_summer = "2014-09-30"
exposure_summer = exposure.loc[(exposure.time >= pd.to_datetime(start_summer)) & (exposure.time <= pd.to_datetime(end_summer))].copy()
exposure_spring.set_index(['node_id','time'],inplace=True)
exposure_summer.set_index(['node_id','time'],inplace=True)
exposure.set_index(['node_id','time'],inplace=True)
# column "exposure" equals relative exposure
# column "n_influencer_summed" equals absolute exposure
# use absolute exposure for further calculations
exposure.rename(columns={"exposure":"exposure_old", "n_influencer_summed":"exposure"},inplace=True)
exposure_spring.rename(columns={"exposure":"exposure_old", "n_influencer_summed":"exposure"},inplace=True)
exposure_summer.rename(columns={"exposure":"exposure_old", "n_influencer_summed":"exposure"},inplace=True)
# calculate if nodes changed trait after experiencing a certain exposure
# save value as column "op_change" (bool)
data_spring, expo_agg_spring = cns_glasses.opinion_change_per_exposure(exposure_spring, opinion_type, opinion_change_time = 1)
data_summer, expo_agg_summer = cns_glasses.opinion_change_per_exposure(exposure_summer, opinion_type, opinion_change_time = 1)
data_full, expo_agg_full = cns_glasses.opinion_change_per_exposure(exposure, opinion_type, opinion_change_time = 1)
# save calculated values on hard drive
expo_agg_spring.to_pickle("tmp/final/exposure_filtered_spring.pkl")
```
## 2) Plot Dose Response Functions (FIG.: 4.9)
```
# plot drf for full timeseries
fig, ax = plt.subplots(1,2,subplot_kw = {"adjustable":'box', "aspect":200/0.25})
cns_glasses.output_folder = "final/"
suffix = "_full"
data = data_full[data_full.exposure <= 200]
bin_width=1
q_binning=True
bin_width=5
n_bins=15
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == True], "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=False, show_plot=show_plot, y_lower_lim=0, y_upper_lim = 0.2, fig=fig, ax=ax[0], label="become active", q_binning = q_binning, \
loglog=False, step_plot=True, color="forestgreen", suffix=suffix,x_lim=200)
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == False], "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=True, show_plot=show_plot, y_lower_lim=0, y_upper_lim = 0.2, fig=fig, ax=ax[1], label="become passive", loglog=False, \
q_binning=q_binning, step_plot=True, color="darkorange", suffix=suffix,x_lim=200)
# plot drf for summer timeseries
fig, ax = plt.subplots(1,2,subplot_kw = {"adjustable":'box', "aspect":200/0.25})
cns_glasses.output_folder = "final/"
suffix = "_summer"
data = data_summer[data_summer.exposure <= 200]
q_binning=False
bin_width=15
n_bins=20
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == True].dropna(), "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=False, show_plot=show_plot, y_lower_lim=-0.01, y_upper_lim = 0.25, fig=fig, ax=ax[0], label="become active", q_binning = q_binning, loglog=False, step_plot=True, color="forestgreen", suffix=suffix,x_lim=200)
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == False].dropna(), "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=True, show_plot=show_plot, y_lower_lim=-0.01, y_upper_lim = 0.25, fig=fig, ax=ax[1], label="become passive", loglog=False, q_binning=q_binning, step_plot=True, color="darkorange", suffix=suffix,x_lim=200)
# plot drf for spring timeseries
x_max = 330
w, h = figaspect(0.5)
fig, ax = plt.subplots(1,2,figsize=(w,h))
cns_glasses.output_folder = "final/"
suffix = "_spring"
data = data_spring[data_spring.exposure <= x_max]
q_binning=False
bin_width=15
n_bins=15
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == True], "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=False, show_plot=show_plot, y_lower_lim=-0.01, y_upper_lim = 0.2, fig=fig, ax=ax[0], label="become active", \
q_binning = q_binning, loglog=False, step_plot=True, color="forestgreen", suffix=suffix, x_lim=x_max)
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == False], "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=True, show_plot=show_plot, y_lower_lim=-0.01, y_upper_lim = 0.2, fig=fig, ax=ax[1], label="become passive", loglog=False, \
q_binning=q_binning, step_plot=True, color="darkorange", suffix=suffix, x_lim=x_max)
fig.savefig("tmp/final/empirical_drfs.pdf" , bbox_inches='tight')
x_max = 330
fig, ax = plt.subplots()
cns_glasses.output_folder = "final/"
suffix = "_spring"
data = data_spring[data_spring.exposure <= x_max]
q_binning=False
bin_width=15
n_bins=15
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == True], "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=False, show_plot=show_plot, y_lower_lim=-0.01, y_upper_lim = 0.2, fig=fig, ax=ax, label="become active", \
q_binning = q_binning, loglog=False, step_plot=True, color="forestgreen", suffix=suffix, x_lim=x_max,marker="^", markersize=5)
fig.savefig("tmp/final/empirical_drf_1.pdf" , bbox_inches='tight')
cns_glasses.plot_opinion_change_per_exposure_number(data[data.op_fitness == False], "expo_nmb", binning, n_bins=n_bins, bin_width=bin_width, \
save_plots=True, show_plot=show_plot, y_lower_lim=-0.01, y_upper_lim = 0.2, fig=fig, ax=ax, label="become passive", loglog=False, \
q_binning=q_binning, step_plot=True, color="darkorange", suffix=suffix, x_lim=x_max, marker=".")
fig.savefig("tmp/final/empirical_drf_2.pdf" , bbox_inches='tight')
```
## 3) Plot Distribution of the Absolute Exposure (FIG.: 4.10)
```
expo = expo_agg_spring[expo_agg_spring.op_fitness==True].reset_index()
#expo = expo.loc[(expo.time > "2013-09-01") & (expo.time < "2014-09-01")]
#expo.time = expo.time.dt.dayofyear
expo.time = expo.time.astype("int")
mean_expo = expo.groupby("time").exposure.mean().to_frame("mean exposure").reset_index()
mean_expo.set_index("time",inplace=True)
fig,ax = plt.subplots()
expo.dropna(inplace=True)
#expo = expo[expo.exposure < 250]
plot = ax.hist2d(expo.time,expo.exposure,norm=LogNorm(), bins = [len(expo.time.unique())
,120])#, vmin=1, vmax=100)
expo.groupby("time").exposure.mean().plot(label=r"mean exposure $<K>$",color="red",linestyle="--")
ax.legend(loc="upper left")
ax.set_xlabel("time")
ax.set_ylabel(r"absolute exposure $K$")
ax.set_xticklabels(pd.to_datetime(ax.get_xticks()).strftime('%d. %B %Y'), rotation=40, ha="right")
cbar = fig.colorbar(plot[3])
cbar.set_label('# number of occurrences')
fig.savefig("tmp/final/abs_expo_distrib_spring.pdf",bbox_inches='tight')
```
| github_jupyter |
# Example data analysis notebook
This notebook downloads and analyses some surface air temperature anomaly data from [Berkeley Earth](http://berkeleyearth.org/).
Import the required libraries.
```
import matplotlib.pyplot as plt
import pandas as pd
import requests
```
Use the [requests](http://docs.python-requests.org/) library to download the data file for Australia.
```
# URL to the data
url = 'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/australia-TAVG-Trend.txt'
```
## Open & Clean the Data
To begin there are some data cleaning steps that you need to implement here.
Often when you are building a workflow you build it out in it's entirety first
to get the pieces working. And then you turn those pieces into functions to
modularize and scale your workflow
```
temp_df = pd.read_csv(url,
skiprows=69,
delim_whitespace=True)
all_cols = temp_df.columns[1:]
# Remove the last row
temp_df = temp_df.iloc[:, :-1]
# CLEANUP: Drop the commas from the column names & Add a day column
temp_df.columns = [acol.replace(',', '') for acol in all_cols]
temp_df = temp_df.assign(Day=1)
# Finally create a date time column
temp_df["date"] = pd.to_datetime(temp_df[['Year', 'Month', 'Day']])
temp_df.set_index("date", inplace=True)
temp_df
```
# Initial Visualization
Plot the data to explore it!
```
f, ax = plt.subplots(figsize=(10, 6))
temp_df.plot(y="Anomaly",
ax=ax,
legend=False,
color="Purple")
ax.set(title="Temperature anomaly for Australia",
xlabel='Date',
ylabel='Temperature anomaly (C)')
plt.grid()
plt.show()
```
## Calculate Moving Average
Next you calculate a 12-month moving average for a smoother time series.
```
temp_moving_avg = temp_df["Anomaly"].rolling(window=12).mean()
temp_moving_avg
```
## Plot the Data
```
f, ax = plt.subplots(figsize=(10, 6))
temp_df.plot(y="Anomaly",
ax=ax,
legend=True,
color="DarkGrey",
label="Anomaly")
temp_moving_avg.plot(ax=ax,
legend=True,
color="Purple",
linewidth=2,
label="Moving average")
ax.set(title="Temperature Anomaly for Australia",
xlabel='Date',
ylabel='Temperature anomaly (C)')
plt.grid()
plt.show()
```
## Modularize Your Workflow
The function below may be bigger than you want. It is a
starting place.
```
from agu_oss import open_and_clean
help(open_and_clean)
url = 'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/australia-TAVG-Trend.txt'
temp_australia = open_and_clean(url)
temp_australia.head()
f, ax = plt.subplots(figsize=(10, 6))
temp_australia.plot(y="Anomaly",
color="purple",
ax=ax)
plt.show()
```
## Try this on another location
http://berkeleyearth.lbl.gov/country-list/
http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/belize-TAVG-Trend.txt
```
url_belize = "http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/belize-TAVG-Trend.txt"
temp_belize = open_and_clean(url_belize)
f, ax = plt.subplots(figsize=(10, 6))
temp_belize.plot(y="Anomaly",
color="purple",
ax=ax)
plt.show()
```
| github_jupyter |
# Policy Evaluation in Contextual Bandits
** *
This IPython notebook illustrates the usage of the [contextualbandits](https://www.github.com/david-cortes/contextualbandits) package's `evaluation` module through a simulation with public datasets.
** Small note: if the TOC here is not clickable or the math symbols don't show properly, try visualizing this same notebook from nbviewer following [this link](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/policy_evaluation.ipynb). **
** *
### Sections
[1. Problem description](#p1)
[2. Methods](#p2)
[3. Experiments](#p3)
[4. References](#p4)
** *
<a id="p1"></a>
## 1. Problem description
For a general description of the contextual bandits problem, see the first part of the package's guide [Online Contextual Bandits](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/online_contextual_bandits.ipynb).
The previous two guides [Online Contextual Bandits](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/online_contextual_bandits.ipynb) and [Off-policy Learning in Contextual Bandits](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/offpolicy_learning.ipynb) evaluated the performance of different policies by looking at the actions they would have chosen in a fully-labeled dataset for multi-label classification.
However, in contextual bandits settings one doesn't have access to fully-labeled data, and the data that one has is usually very biased, as it is collected through some policy that aims to maximize rewards. In this situation, it is a lot more difficult to evaluate the performance of a new policy. This module deals with such problem.
** *
<a id="p2"></a>
## 2. Methods
This module implements two policy evaluation methods:
* `evaluateRejectionSampling` (see _"A contextual-bandit approach to personalized news article recommendation"_), for both online and offline policies.
* `evaluateDoublyRobust` (see _"Doubly Robust Policy Evaluation and Learning"_).
Both of these are based on a train-test split - that is, the policy is trained with some data and evaluated on different data.
The best way to obtain a good estimate of the performance of a policy is to collect some data on which actions are chosen at random. When such data is available, one can iterate through it, let the policy choose an action for each observation, and if it matches with what was chosen, take it along with its rewards for evaluation purposes, skip it if not. This simple rejection sampling method is unbiased and let's you evaluate both online and offline algorithms. **It must be stressed that evaluating data like this only works when the actions of this test sample are chosen at random, otherwise the estimates will be biased (and likely very wrong)**.
When such data is not available and there is reasonable variety of actions chosen, another option is doubly-robust estimates. These are meant for the case of continuous rewards, and don't work as well with discrete rewards though, especially when there are many labels, but they can still be tried.
The doubly-robust estimate requires, as it names suggests, two estimates: one of the reward that each arm will give, and another of the probability or score that the policy that collected the data gave to each arm it chose for each observation.
In a scenario such as online advertising, we don't need the second estimate if we record the scores that the models output along with the covariates-action-reward history. When using the functions from this package's `online` module, you can get such estimates for some of the policies by using their `predict_proba_separate` function.
For the first estimate, there are different options to obtain it. One option is to fit a (non-online) model to both the train and test sets to make reward estimates on the test set, or fit it only on the test set (while the policy to be evaluated is fitted to the training set); or perhaps even use the score estimates from the old policy (which chose the actions on the training and test data) or from the new policy. The function `evaluateDoublyRobust` provides an API that can accomodate all these methods.
** *
<a id="p3"></a>
## 3. Experiments
Just like in the previous guide [Off-policy Learning in Contextual Bandits](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/offpolicy_learning.ipynb), I will simualate data generated from a policy by fitting a logistic regression model with a sample of the **fully-labeled** data, then let it choose actions for some more data, and take those actions and rewards as input for a new policy, along with the estimated reward probabilities for the actions that were chosen.
The new policy will then be evaluated on a test sample with actions already pre-selected, and the estimates from the methods here will be compared with the real rewards, which we can know because the data is fully labeled.
The data are again the Bibtext and Mediamill datasets.
** *
Loading the Bibtex dataset again:
```
import pandas as pd, numpy as np, re
from sklearn.preprocessing import MultiLabelBinarizer
def parse_data(file_name):
features = list()
labels = list()
with open(file_name, 'rt') as f:
f.readline()
for l in f:
if bool(re.search("^[0-9]", l)):
g = re.search("^(([0-9]{1,2},?)+)\s(.*)$", l)
labels.append([int(i) for i in g.group(1).split(",")])
features.append(eval("{" + re.sub("\s", ",", g.group(3)) + "}"))
else:
l = l.strip()
labels.append([])
features.append(eval("{" + re.sub("\s", ",", l) + "}"))
features = pd.DataFrame.from_dict(features).fillna(0).as_matrix()
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(labels)
return features, y
features, y = parse_data("Bibtex_data.txt")
print(features.shape)
print(y.shape)
```
Simulating a stationary exploration policy and a test set:
```
from sklearn.linear_model import LogisticRegression
# the 'explorer' polcy will be fit with this small sample of the rows
st_seed = 0
end_seed = 2000
# then it will choose actions for this larger sample, which will be the input for the new policy
st_exploration = 0
end_exploration = 3000
# the new policy will be evaluated with a separate test set
st_test = 3000
end_test = 7395
# separating the covariates data for each case
Xseed = features[st_seed:end_seed, :]
Xexplore_sample = features[st_exploration:end_exploration, :]
Xtest = features[st_test:end_test, :]
nchoices = y.shape[1]
# now constructing an exploration policy as explained above, with fully-labeled data
explorer = LogisticRegression()
np.random.seed(100)
explorer.fit(Xseed, np.argmax(y[st_seed:end_seed], axis=1))
# letting the exploration policy choose actions for the new policy input
np.random.seed(100)
actions_explore_sample=explorer.predict(Xexplore_sample)
rewards_explore_sample=y[st_exploration:end_exploration, :]\
[np.arange(end_exploration - st_exploration), actions_explore_sample]
# extracting the probabilities it estimated
ix_internal_actions = {j:i for i,j in enumerate(explorer.classes_)}
ix_internal_actions = [ix_internal_actions[i] for i in actions_explore_sample]
ix_internal_actions = np.array(ix_internal_actions)
prob_actions_explore = explorer.predict_proba(Xexplore_sample)[np.arange(Xexplore_sample.shape[0]),
ix_internal_actions]
# generating a test set with random actions
actions_test = np.random.randint(nchoices, size=end_test - st_test)
rewards_test = y[st_test:end_test, :][np.arange(end_test - st_test), actions_test]
```
Rejection sampling estimate:
```
from contextualbandits.online import SeparateClassifiers
from contextualbandits.evaluation import evaluateRejectionSampling
new_policy = SeparateClassifiers(LogisticRegression(C=0.1), y.shape[1])
np.random.seed(100)
new_policy.fit(Xexplore_sample, actions_explore_sample, rewards_explore_sample)
np.random.seed(100)
est_r, ncases = evaluateRejectionSampling(new_policy, X=Xtest, a=actions_test, r=rewards_test, online=False)
np.random.seed(100)
real_r = np.mean(y[st_test:end_test,:][np.arange(end_test - st_test), new_policy.predict(Xtest)])
print('Test set Rejection Sampling mean reward estimate (new policy)')
print('Estimated mean reward: ',est_r)
print('Sample size: ', ncases)
print('----------------')
print('Real mean reward: ', real_r)
```
We can also evaluate the exploration policy with the same method:
```
np.random.seed(100)
est_r, ncases = evaluateRejectionSampling(explorer, X=Xtest, a=actions_test, r=rewards_test, online=False)
real_r = np.mean(y[st_test:end_test, :][np.arange(end_test - st_test), explorer.predict(Xtest)])
print('Test set Rejection Sampling mean reward estimate (old policy)')
print('Estimated mean reward: ', est_r)
print('Sample size: ', ncases)
print('----------------')
print('Real mean reward: ', real_r)
```
_(Remember that the exploration policy was fit with a smaller set of fully-labeled data, thus it's no surprise it performs a lot better)_
The estimates are not exact, but they are somewhat close to the real values as expected. They get better the more cases are successfully sampled, and their estimate should follow the central limit theorem.
** *
To be stressed again, such an evaluation method only works when the data was collected by choosing actions at random. **If we evaluate it with the actions chosen by the exploration policy, the results will be totally biased as demonstrated here:**
```
actions_test_biased = explorer.predict(Xtest)
rewards_test_biased = y[st_test:end_test, :][np.arange(end_test - st_test), actions_test_biased]
est_r, ncases = evaluateRejectionSampling(new_policy, X=Xtest, a=actions_test_biased,\
r=rewards_test_biased, online=False)
real_r = np.mean(y[st_test:end_test, :][np.arange(end_test - st_test), new_policy.predict(Xtest)])
print('Biased Test set Rejection Sampling mean reward estimate (new policy)')
print('Estimated mean reward: ', est_r)
print('Sample size: ', ncases)
print('----------------')
print('Real mean reward: ', real_r)
print("(Don't try rejection sampling on a biased test set)")
```
We can also try Doubly-Robust estimates, but these work poorly for a dataset like this:
```
from contextualbandits.evaluation import evaluateDoublyRobust
# getting estimated probabilities for the biased test sample chosen by the old policy
ix_internal_actions = {j:i for i,j in enumerate(explorer.classes_)}
ix_internal_actions = [ix_internal_actions[i] for i in actions_test_biased]
ix_internal_actions = np.array(ix_internal_actions)
prob_actions_test_biased = explorer.predict_proba(Xtest)[np.arange(Xtest.shape[0]), ix_internal_actions]
# actions that the new policy will choose
np.random.seed(1)
pred = new_policy.predict(Xtest)
# method 1: estimating rewards by fitting another model to the whole data (train + test)
model_fit_on_all_data = SeparateClassifiers(LogisticRegression(), y.shape[1])
np.random.seed(1)
model_fit_on_all_data.fit(np.r_[Xexplore_sample, Xtest],
np.r_[actions_explore_sample, actions_test_biased],
np.r_[rewards_explore_sample, rewards_test_biased])
np.random.seed(1)
est_r_dr_whole = evaluateDoublyRobust(pred, X=Xtest, a=actions_test_biased, r=rewards_test_biased,\
p=prob_actions_test_biased, reward_estimator = model_fit_on_all_data)
# method 2: estimating rewards by fitting another model to the test data only
np.random.seed(1)
est_r_dr_test_only = evaluateDoublyRobust(pred, X=Xtest, a=actions_test_biased, r=rewards_test_biased,\
p=prob_actions_test_biased, reward_estimator = LogisticRegression(), nchoices=y.shape[1])
print('Biased Test set mean reward estimates (new policy)')
print('DR estimate (reward estimator fit on train+test): ', est_r_dr_whole)
print('DR estimate (reward estimator fit on test only): ', est_r_dr_test_only)
print('----------------')
print('Real mean reward: ', real_r)
```
Both estimates are very wrong, but they are still less wrong than the wrongly-conducted rejection sampling from before.
** *
Finally, rejection sampling can also be used to evaluate online policies - in this case though, be aware that the estimate will only be considered up to a certain number of rounds (as many as it accepts, but it will end up rejecting the majority), but online policies keep improving with time.
Here I will use the Mediamill dataset instead, as it has a lot more data:
```
from contextualbandits.online import BootstrappedUCB
features, y = parse_data("Mediamill_data.txt")
nchoices = y.shape[1]
Xall=features
actions_random = np.random.randint(nchoices, size = Xall.shape[0])
rewards_actions = y[np.arange(y.shape[0]), actions_random]
online_policy = BootstrappedUCB(LogisticRegression(), y.shape[1])
evaluateRejectionSampling(online_policy,
X = Xall,
a = actions_random,
r = rewards_actions,
online = True,
start_point_online = 'random',
batch_size = 5)
```
** *
<a id="p4"></a>
## 4. References
* Li, L., Chu, W., Langford, J., & Schapire, R. E. (2010, April). A contextual-bandit approach to personalized news article recommendation. In Proceedings of the 19th international conference on World wide web (pp. 661-670). ACM.
* Dudík, M., Langford, J., & Li, L. (2011). Doubly robust policy evaluation and learning. arXiv preprint arXiv:1103.4601.
| github_jupyter |
```
!pip install scikit-learn==1.0
!pip install xgboost==1.4.2
!pip install catboost==0.26.1
!pip install pandas==1.3.3
!pip install radiant-mlhub==0.3.0
!pip install rasterio==1.2.8
!pip install numpy==1.21.2
!pip install pathlib==1.0.1
!pip install tqdm==4.62.3
!pip install joblib==1.0.1
!pip install matplotlib==3.4.3
!pip install Pillow==8.3.2
!pip install torch==1.9.1
!pip install plotly==5.3.1
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ')
print('and then re-execute this cell.')
else:
print(gpu_info)
import pandas as pd
import numpy as np
import random
import torch
def seed_all(seed_value):
random.seed(seed_value) # Python
np.random.seed(seed_value) # cpu vars
torch.manual_seed(seed_value) # cpu vars
if torch.cuda.is_available():
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value) # gpu vars
torch.backends.cudnn.deterministic = True #needed
torch.backends.cudnn.benchmark = False
seed_all(13)
# from google.colab import drive
# drive.mount('/content/drive')
import warnings
warnings.filterwarnings("ignore")
import gc
import pandas as pd
import numpy as np
from sklearn.metrics import *
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from indices_creation import *
```
## Data Load Step
1. We load the mean aggregations for both train and test. The mean aggregations contain the labels and field IDs.
2. The quantile aggregations contain the field IDs.
```
import os
os.getcwd()
train_df_mean = pd.read_csv('train_mean.csv')
#### we need to drop 'label' and 'field_id' later in the code
test_df_mean = pd.read_csv('test_mean.csv')
#### we need to drop 'field_id' later in the code
train_df_median = pd.read_csv('train_median.csv')
#### we need to drop 'field_id' later in the code
test_df_median = pd.read_csv('test_median.csv')
#### we need to drop 'field_id' later in the code
train_size = pd.read_csv('size_of_field_train.csv')
test_size = pd.read_csv('size_of_field_test.csv')
train_size = train_size.rename({'Field_id':'field_id'},axis=1)
test_size = test_size.rename({'Field_id':'field_id'},axis=1)
train_df_median = train_df_median.merge(train_size, on =['field_id'],how='left')
test_df_median = test_df_median.merge(test_size, on =['field_id'],how='left')
cluster_df = pd.read_csv('seven_cluster.csv')
cluster_df = cluster_df.rename({'cluster_label':'cluster_label_7'},axis=1)
train_df_median = train_df_median.merge(cluster_df,on=['field_id'],how='left')
test_df_median = test_df_median.merge(cluster_df,on=['field_id'],how='left')
gc.collect()
full_nearest=pd.read_csv('full_nearest_radius_0.25.csv')
full_nearest
train_df_median = train_df_median.merge(full_nearest,on=['field_id'],how='left')
print(train_df_median.shape)
test_df_median = test_df_median.merge(full_nearest,on=['field_id'],how='left')
```
## Removing Erroneous data points
We observed some data points for which the labels were floats, we will remove them (they are few in number) to make sure our model is learning from correctly labelled data points
```
print(f'The shape of train data before outlier removal - {train_df_mean.shape}')
train_df_mean = train_df_mean[train_df_mean.label.isin(list(range(1,10)))]
print(f'The shape of train data after outlier removal - {train_df_mean.shape}')
relevant_fids = train_df_mean['field_id'].values.tolist()
train_df_median = train_df_median[train_df_median['field_id'].isin(relevant_fids)]
print(f'The shape of median train data - {train_df_median.shape} and mean train data {train_df_mean.shape}' )
### two extra columns in train_df_mean being 'label' and 'size_of_field'
```
### Extract date list
We extract the list of all dates where observations were seen for index generation
```
cols = ['B01_','B02_','B03_','B04_','B05_','B06_','B07_','B08_','B09_','B8A_','B11_','B12_']
columns_available = train_df_mean.columns.tolist()
cols2consider = []
for col in cols:
cols2consider.extend( [c for c in columns_available if col in c])
bands_with_dates = [c for c in columns_available if 'B01_' in c]
dates = [c.replace('B01_','') for c in bands_with_dates]
print(f'The sample showing the commencement dates where observations were seen is {dates[:10]}')
print(f'The sample showing the ending dates where observations were seen is {dates[-10:]}')
```
### Removal of field ID column
We consider only the relevant columns to be considered for the next step
```
train_df_mean = train_df_mean[cols2consider+['label']]
test_df_mean = test_df_mean[cols2consider]
train_df_median = train_df_median[cols2consider+['size_of_field']+['cluster_label_7']+full_nearest.columns.tolist()]
test_df_median = test_df_median[cols2consider+['size_of_field']+['cluster_label_7']+full_nearest.columns.tolist()]
```
### Indices Creation
We will create the indices for train and test data for mean aggregates using the indices coded in indices_creation.py module
```
# train_df_mean = get_band_ndvi_red(train_df_mean,dates)
# train_df_mean = get_band_afri(train_df_mean,dates)
# train_df_mean = get_band_evi2(train_df_mean,dates)
# train_df_mean = get_band_ndmi(train_df_mean,dates)
# train_df_mean = get_band_ndvi(train_df_mean,dates)
# train_df_mean = get_band_evi(train_df_mean,dates)
# train_df_mean = get_band_bndvi(train_df_mean,dates)
# train_df_mean = get_band_nli(train_df_mean,dates)
# train_df_mean = get_band_lci(train_df_mean,dates)
# test_df_mean = get_band_ndvi_red(test_df_mean,dates)
# test_df_mean = get_band_afri(test_df_mean,dates)
# test_df_mean = get_band_evi2(test_df_mean,dates)
# test_df_mean = get_band_ndmi(test_df_mean,dates)
# test_df_mean = get_band_ndvi(test_df_mean,dates)
# test_df_mean = get_band_evi(test_df_mean,dates)
# test_df_mean = get_band_bndvi(test_df_mean,dates)
# test_df_mean = get_band_nli(test_df_mean,dates)
# test_df_mean = get_band_lci(test_df_mean,dates)
```
We will create the indices for train and test data for median aggregates using the indices coded in indices_creation.py module
```
train_df_median = get_band_ndvi_red(train_df_median,dates)
train_df_median = get_band_afri(train_df_median,dates)
train_df_median = get_band_evi2(train_df_median,dates)
train_df_median = get_band_ndmi(train_df_median,dates)
train_df_median = get_band_ndvi(train_df_median,dates)
train_df_median = get_band_evi(train_df_median,dates)
train_df_median = get_band_bndvi(train_df_median,dates)
train_df_median = get_band_nli(train_df_median,dates)
# train_df_median = get_band_lci(train_df_median,dates)
test_df_median = get_band_ndvi_red(test_df_median,dates)
test_df_median = get_band_afri(test_df_median,dates)
test_df_median = get_band_evi2(test_df_median,dates)
test_df_median = get_band_ndmi(test_df_median,dates)
test_df_median = get_band_ndvi(test_df_median,dates)
test_df_median = get_band_evi(test_df_median,dates)
test_df_median = get_band_bndvi(test_df_median,dates)
test_df_median = get_band_nli(test_df_median,dates)
# test_df_median = get_band_lci(test_df_median,dates)
# train_df_median = train_df_median.drop(cols2consider,axis=1)
# test_df_median = test_df_median.drop(cols2consider,axis=1)
train_df_mean.shape,train_df_median.shape,test_df_mean.shape,test_df_median.shape
######### Saving the label variable and dropping it from the data
train_y = train_df_mean['label'].values
train_df_mean = train_df_mean.drop(['label'],axis=1)
train_df_mean.replace([np.inf, -np.inf], np.nan, inplace=True)
test_df_mean.replace([np.inf, -np.inf], np.nan, inplace=True)
train_df_median.replace([np.inf, -np.inf], np.nan, inplace=True)
test_df_median.replace([np.inf, -np.inf], np.nan, inplace=True)
# train_df_slope.replace([np.inf, -np.inf], np.nan, inplace=True)
# test_df_slope.replace([np.inf, -np.inf], np.nan, inplace=True)
train = train_df_median.values
test = test_df_median.values
# train = pd.concat([train_df_median,train_df_slope],axis=1).values
# test = pd.concat([test_df_median,test_df_slope],axis=1).values
print(f'The shape of model ready train data is {train.shape} and model ready test data is {test.shape}')
print(f'The shape of target is {train_y.shape}')
train1 = pd.read_csv('train_with_slopes.csv')
test1 = pd.read_csv('test_with_slopes.csv')
train1.replace([np.inf, -np.inf], np.nan, inplace=True)
test1.replace([np.inf, -np.inf], np.nan, inplace=True)
train2=pd.concat([pd.DataFrame(train1.values,columns=train1.columns),train_df_median[['size_of_field','cluster_label_7']+full_nearest.columns.tolist()].reset_index(drop=True)],axis=1)
test2=pd.concat([pd.DataFrame(test1.values,columns=test1.columns),test_df_median[['size_of_field','cluster_label_7']+full_nearest.columns.tolist()].reset_index(drop=True)],axis=1)
train2.head()
del train2['field_id']
del test2['field_id']
pivot=pd.read_csv('pivottable.csv')
pivot
train2=train2.merge(pivot,how='left',on='cluster_label_7')
test2=test2.merge(pivot,how='left',on='cluster_label_7')
train2
del train_df_mean,train_df_median,train1,train_size,test_df_mean,test_df_median,test1,test_size
import gc
gc.collect()
train = train2.values
test = test2.values
train.shape,test.shape
# (1616-1520)/8
oof_pred = np.zeros((len(train), 9))
y_pred_final = np.zeros((len(test),9 ))
num_models = 3
temperature = 50
n_splits = 15
error = []
kf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=13)
for fold, (tr_ind, val_ind) in enumerate(kf.split(train, train_y)):
wghts = [0]*num_models
logloss = []
X_train, X_val = train[tr_ind], train[val_ind]
# X_train1, X_val1 = train_max[tr_ind], train_max[val_ind]
y_train, y_val = train_y[tr_ind], train_y[val_ind]
model1 = XGBClassifier(n_estimators=2000,random_state=13,learning_rate=0.04,colsample_bytree=0.95,reg_lambda=11,
tree_method='gpu_hist',eval_metric='mlogloss')
model2 = CatBoostClassifier(task_type='GPU',verbose=False,n_estimators=5000,random_state=13,auto_class_weights='SqrtBalanced',max_depth=9,learning_rate=0.06)
model3 = CatBoostClassifier(task_type='GPU',verbose=False,n_estimators=5000,random_state=13,auto_class_weights='SqrtBalanced',max_depth=10,learning_rate=0.04)
# model4 = CatBoostClassifier(task_type='GPU',verbose=False,n_estimators=5000,random_state=13,auto_class_weights='SqrtBalanced',max_depth=11)
model1.fit(X_train,y_train)
val_pred1 = model1.predict_proba(X_val)
logloss.append(log_loss(y_val,val_pred1))
print('validation logloss model1 fold-',fold+1,': ',log_loss(y_val,val_pred1))
model2.fit(X_train,y_train)
val_pred2 = model2.predict_proba(X_val)
logloss.append(log_loss(y_val,val_pred2))
print('validation logloss model2 fold-',fold+1,': ',log_loss(y_val,val_pred2))
model3.fit(X_train,y_train)
val_pred3 = model3.predict_proba(X_val)
logloss.append(log_loss(y_val,val_pred3))
print('validation logloss model3 fold-',fold+1,': ',log_loss(y_val,val_pred3))
# model4.fit(X_train,y_train)
# val_pred4 = model4.predict_proba(X_val)
# logloss.append(log_loss(y_val,val_pred4))
# print('validation logloss model4 fold-',fold+1,': ',log_loss(y_val,val_pred4))
wghts = np.exp(-temperature*np.array(logloss/sum(logloss)))
wghts = wghts/sum(wghts)
print(wghts)
val_pred = wghts[0]*val_pred1+wghts[1]*val_pred2+wghts[2]*val_pred3 #+wghts[3]*val_pred4
print('Validation logloss for fold- ',fold+1,': ',log_loss(y_val,val_pred))
oof_pred[val_ind] = val_pred
y_pred_final += (wghts[0]*model1.predict_proba(test)+
wghts[1]*model2.predict_proba(test)+wghts[2]*model3.predict_proba(test)
)/(n_splits)
print('OOF LogLoss :- ',(log_loss(train_y,oof_pred)))
outputs = y_pred_final.copy()
test_df = pd.read_csv('test_mean.csv')
field_ids_test = test_df['field_id'].values.tolist()
data_test = pd.DataFrame(outputs)
data_test['field_id'] = field_ids_test
data_test = data_test[data_test.field_id != 0]
data_test
data_test = data_test.rename(columns={
0:'Lucerne/Medics',
1:'Planted pastures (perennial)',
2:'Fallow',
3:'Wine grapes',
4:'Weeds',
5:'Small grain grazing',
6:'Wheat',
7:'Canola',
8:'Rooibos'
})
pred_df = data_test[['field_id', 'Lucerne/Medics', 'Planted pastures (perennial)', 'Fallow', 'Wine grapes', 'Weeds', 'Small grain grazing', 'Wheat', 'Canola', 'Rooibos']]
pred_df['field_id'] = pred_df['field_id'].astype(int)
pred_df = pred_df.sort_values(by=['field_id'],ascending=True)
pred_df
pred_df.to_csv('trial1_sep_salim.csv',index=False)
```
| github_jupyter |
## 1 卷积神经网络
在之前的神经网络学习过程中,使用的都是全连接神经网络,全连接神经网络对识别和预测都有非常好的效果。在之前使用 MNIST 数据集的实践过程中,输入神经网络的是是一幅 28 行 28 列的 784 个像素点的灰度值,但是仅两层神经网络就有十多万个待训练参数(第一层$784\times128$个$\omega+128个b$,第二层$128\times10$个$z\omega+10个b$,共 101770 个参数)。
在实际项目中,输入神经网络的是具有更高分辨率的彩色图片,使得送入全连接网络的输入特征数特别多,随着隐藏层数的增加,网络规模过大,待优化参数过多,很容易造成过拟合。**为了减少待训练参数,在实际应用时会先对原始图片进行特征提取,再把提取到的特征送给全连接网络**。而卷积计算就是一种有效的提取图像特征的方法。
### 1.1 卷积层
#### 1.1.1 卷积过程
卷积(Convolutional)的计算过程:
(1)一般会用一个正方形的卷积核,按指定步长,在输入特征图上滑动,遍历输入特征图中的每个像素点。滑动过程如下所示:

(2)每移动一个步长,卷积核会与输入特征图出现重合区域,重合区域对应元素相乘、求和再加上偏置项得到输出特征的每一个像素点。如果输入特征是单通道灰度图,那么使用的就是深度为 1 的单通道卷积核。那么计算过程如下:

**单通道与卷积核深度为 1 有什么关系呢**?首先要知道图片除去长度和宽度,还有一个表示图片色彩的通道(channel)数,灰度图就是单通道,RGB 彩色图就是三通道。要想让卷积核与输入特征图对应点匹配上,就必须让卷积核的深度与输入特征图的通道数保持一致,所以**输入特征图的通道数(深度)决定了当前层卷积核的深度**。那么对于单通道的特征图而言,通道数为 1 自然卷积核的深度也为 1。
综上所述,如果输入特征是三通道彩色图,则需要使用一个 $3\times3\times3$ 的卷积核,结合上面两幅图片,去理解下面的卷积计算过程:

在上面的动图中,使用了两个卷积核 $\text{filter } \omega_0$ 和 $\text{filter } \omega_1$,分别得到了最右边 output 下的两个输出特征图,所以当前层使用了几个卷积核,就有几个输出特征图,即**当前卷积核的个数决定了当前层输出特征图的深度**。如果觉得某层模型的特征提取能力不足,可以在这一层多用几个卷积核提高这一层的特征提取能力。卷积核的深度是固定的,但卷积核的个数是任意的。
#### 1.1.2感受野
感受野(Receptive Field):卷积神经网络各输出特征图中的每个像素点,在原始输入图片上映射区域的大小。简单点说就是,输出特征图上的一个点对应输入图上的区域。有一张图来理解就是:

根据上面这个图可以发现,对于一个 $5\times5$ 的输入特征图,经过两个 $3\times3$ 的卷积核的运算,可以得到一个像素点,那么对于这一个像素点来说,映射到输入特征图的感受野就是 5, 正好与输入特征图的大小一致。那么进一步再思考一下,如果对一个 $5\times5$ 的输入特征图,使用一个 $5\times5$ 的卷积核进行运算,得到也会是一个像素点,而对于这一个像素点来说它映射到输入特征图的感受野也是 5。
那么问题来了,既然对于一个相同的输入特征图,应用两个 $3\times3$ 的卷积核和应用一个 $5\times5$ 卷积核的特征提取能力是一样的,那么应该使用那种方案比较好?
这个时候就要考虑,两个方案所承载的待训练参数量和计算量了:
+ 对于两个 $3\times3$ 卷积核,参数量为 $3\times3 + 3\times3 = 18$ 个
+ 对于一个 $5\times5$ 卷积核,参数量为 $5 \times 5 = 25$ 个
在计算量上,假设输入特征图宽、高为 $x$,卷积核宽、高为 $k$,卷积计算步长为 1,输出图边长计算公式为 `输出图边长=(输入图边长-卷积核长+1)/步长`,那么输出特征图的像素个数就是输出图边长的平方,每个像素点都需要进行 $k^2$ 次乘加运算。因此总的计算量就是像素个数乘以乘加运算的次数:
+ 对于两个 $3\times3$ 卷积核,计算量为 $3\times3\times(x-3+1)^2 + 3\times3\times(x-3+1-3+1)^2=18x^2-108x+180$ 个
+ 对于一个 $5\times5$ 卷积核,计算量为 $5\times5(x-5+1)^2=25x^2-200x+400$ 个
直接给出结论,当 $x>10$ 时,两层 $3\times3$ 卷积核会比一个 $5\times5$ 卷积核的计算量少,也就是说前者会比后者的性能要好。这也是为什么现在的神经网络在卷积计算中常使用两层 $3\times3$ 卷积核替换一层 $5\times5$ 卷积核。
#### 1.1.3 填充
填充(padding):为了保持输出图像尺寸与输入图像一致,经常会在输入图像周围进行全零填充。参考下面的动图:

可以发现,输入特征图经过 $3\times$ 卷积核的运算后,得到的输出特征图的大小与输入特征图的大小一致,其原因就在于,在进行卷积运算之前,在输入特征图周围进行了全 0 填充。此外,是否使用全 0 填充,对输出特征图尺寸的大小也有影响:
$$
L_{o}=\begin{cases}
\lceil \frac{L_{i}}{Step} \rceil & \text{ if } padding=same \\
\lceil \frac{L_{i} - L_{k}+1}{Step} \rceil & \text{ if } padding=valid
\end{cases}
$$
+ $L_{o}$:输出特征图边长
+ $L_{i}$:输入特征图边长
+ $L_{k}$:卷积核边长
+ $Step$:步长
+ $padding=same$:全 0 填充
+ $padding=valid$:不使用全 0 填充
如果计算结果为小数,需要向上取整,比如计算结果若为 2.2,则结果应该变为 3。后面的条件 `padding=same` 和 `padding=valid` 就是与 TensorFlow 的 API 相对应,在使用 `tf.keras.layers.Conv2D()` 来设置卷积层时,对于参数 `padding` 的取值就是 `same` 或者 `valid`。
在 TensorFlow 中,使用 `tf.keras.layers.Conv2D` 来定义卷积层
```python
tf.keras.layers.Conv2D (
filters=卷积核个数,
kernel_size=卷积核尺寸, #正方形写核长整数,或(核高h,核宽w)
strides=滑动步长, #横纵向相同写步长整数,或(纵向步长h,横向步长w),默认1
padding='same'或'valid', #使用全零填充是“same”,不使用是“valid”(默认)
activation='relu'或'sigmoid'或'tanh'或'softmax'等 , #如卷积层之后还有批标准化操作,则不在这里使用激活函数
input_shape=(高,宽,通道数) #输入特征图维度,可省略
)
```
### 1.2 批标准化层
神经网络对 0 附近的数据更敏感,但随着网络层数的增加,特征数据会出现偏离 0 均值的情况。标准化可以使数据符合以 0 为均值 1 为标准差的标准正态分布,把偏移的特征数据重新拉回到 0 附近。
批标准化(Batch Normalization,BN)就是对一小批(batch)数据进行标准化处理,使数据回归标准正态分布。常用在卷积操作和激活操作之间。
批标准化后,第 k 个卷积核的输出特征图中第 i 个像素点的值:
$$H_i^{'k}=\frac{H_i^k-\mu_{batch}^k}{\sigma_{batch}^k}$$
+ $H_i^k$:批标准化前,第 k 个卷积核,输出特征图中第 i 个像素点的值
+ $\mu_{batch}^k$:批标准化前,第 k 个卷积核,batch 张输出特征图中所有像素点平均值 $\mu_{batch}^k=\frac{1}{m}\sum_{i=1}^mH_i^k$
+ $\sigma_{batch}^k$:批标准化前,第 k 个卷积核,batch 张输出特征图中所有像素点标准差 $\sigma_{batch}^k=\sqrt{\delta+\frac{1}{m}\sum_{i=1}^m(H_i^k-\mu_{batch}^k)^2}$
BN 操作将原本偏移的特征数据重新拉回到 0 均值,使进入激活函数的数据分布在激活函数线性区,使得输入数据的微小变化,更明显的体现到激活函数的输出,提升了激活函数对输入数据的区分力。但是这种简单的特征数据标准化,使特征数据完全满足标准正态分布,集中在激活函数中心的线性区域。使激活函数丧失了非线性特性。因此在 BN 操作中为每个卷积核引入了两个可训练参数:缩放因子$\gamma$ 和 偏移因子$\beta$,在反向传播时会与其他参数一同被训练优化,使标准正态分布后的特征数据通过缩放因子和偏移因子优化了特征数据分布的宽窄和偏移量,保证了网络的非线性表达力。特征数据分布 $X_i^k$ 表示如下
$$X_i^k=\gamma H_i^{'k}+\beta_k$$
在 TensorFlow 中,使用 `tf.keras.layers.BatchNormalization()` 来表示批标准化层
### 1.3 池化层
池化(Pooling)操作用于减少卷积神经网络中的特征数量。主要法有最大池化和均值池化。最大值池化可以提取图片纹理,均值池化可以保留背景特征。如果用 $2\times2$ 的池化核对输入图片以 2 为步长进行池化,输出图片将变为输入图片的四分之一。

最大值池化就是把与池化核覆盖区域的最大值输出,如下图(a);均值池化就是把与池化核覆盖区域的均值输出,如下图(b)。

在 Tensorflow 中分别使用 的是`tf.keras.layers.MaxPool2D` 函数和 `tf.keras.layers.AveragePooling2D` 函数,具体的使用方法如下:
最大值池化
```python
tf.keras.layers.MaxPool2D(
pool_size=池化核尺寸, #正方形写核长整数,或(核高h,核宽w)
strides=池化步长, #步长整数, 或(纵向步长h,横向步长w),默认为pool_size
padding='same'或'valid')#使用全零填充是“same”,不使用是“valid”(默认)
```
均值池化
```python
tf.keras.layers.AveragePooling2D(
pool_size=池化核尺寸, #正方形写核长整数,或(核高h,核宽w)
strides=池化步长, #步长整数, 或(纵向步长h,横向步长w),默认为pool_size
padding='same'或'valid')#使用全零填充是“same”,不使用是“valid”(默认)
```
### 1.4 Dropout 层
为了缓解神经网络过拟合,在训练过程中,将一部分隐藏层的神经元按照一定比例从神经网络中临时舍弃,在使用时,再把舍弃的神经元恢复链接。如下图所示,图(a)为标准的全连接神经网络,图(b)为使用 dropout 后的网络:

在 TensorFlow 中,使用 `tf.keras.layers.Dropout(舍弃的概率)` 来表示 dropout 层。如果要临时舍弃 20% 的神经元,可以把舍弃的概率设为 0.2。
### 1.5 总结
以上简单的介绍了卷积神经网络基本操作步骤。卷积神经网络在完成特征提取后,就会把数据送入到全连接网络中,因此对于卷积神经网络的整个基本流程可以概括如下:
> 卷积 —》 批标准化 -》 激活 -》 池化 -》 Dropout -》 全连接
>
> **C**onvolutional —》 **B**atch Normal -》 **A**ctivation -》 **P**ooling -》 **D**ropout -》 全连接
说完了卷积神经网络,那如何用一句话总结“卷积”到底是什么呢?
说白了,**卷积就是特征提取器**,就是将数据送入全连接网络之前,对图像进行特征提取的操作。依据上面的执行流程,就是大写加粗的 **CBAPD**。
体现到用 TensorFlow 来搭建网络结构模型中,就如下所示:
```python
model = tf.keras.models.Sequential([
Conv2D(filters=6, kernel_size=(5, 5), padding='same'), # 卷积层 C
BatchNormalization(), # BN 层 B
Activation('relu'), # 激活层 A
MaxPool2D(pool_size=(2, 2), strides=2, padding='same'),# 池化层 P
Dropout(0.2), # Dropout层 D
])
```
| github_jupyter |
# Data Prediction
#### Importing Libraries
```
import tensorflow as tf
from tensorflow.keras import models
import numpy as np
from PIL import Image
import cv2
import imutils
```
#### Global Variables
```
bg = None
temp_image = 'temp.png'
```
### Resize Image
Used to resize the image given as input.
```
def resizeImage(imageName):
basewidth = 100
img = Image.open(imageName)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img.save(imageName)
```
## Running Average
```
def run_avg(image, aWeight):
global bg
# initialize the background
if bg is None:
bg = image.copy().astype("float")
return
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(image, bg, aWeight)
```
## Segimentation
```
def segment(image, threshold=25):
global bg
# find the absolute difference between background and current frame
diff = cv2.absdiff(bg.astype("uint8"), image)
# threshold the diff image so that we get the foreground
thresholded = cv2.threshold(diff,
threshold,
255,
cv2.THRESH_BINARY)[1]
# get the contours in the thresholded image
(cnts, _) = cv2.findContours(thresholded.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# return None, if no contours detected
if len(cnts) == 0:
return
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
return (thresholded, segmented)
```
## Getting Predicted Class
```
def getPredictedClass():
# read the image
image = cv2.imread(temp_image)
# convert to greyscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# change to numpy array
gray_image = gray_image.reshape(89, 100, 1)
# change to numpy array with shape (1, 89, 100, 1) so model can receive it
gray_image = np.array([gray_image])
# predict the image
prediction = model.predict(gray_image)
# return a numpy array with all values for layers
return prediction
```
## Displaying Result
```
def showStatistics(prediction):
gestures = ['fist', 'palm', 'swing', 'ok']
n = len(gestures)
x = 30
y = 30
height = (n+3)*y
width = 500
textImage = np.zeros((height, width, 3), np.uint8)
for i in range(0, len(gestures)):
cv2.putText(textImage,
gestures[i] + ' : ' + f"{prediction[0][i]:.2f}" ,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255, 255, 255),
2)
y = y + 30
predicted_gesture = gestures[np.argmax(prediction)]
sum = 0.00
for i in prediction[0]:
sum += i
confidence = (np.amax(prediction) / sum) * 100
cv2.putText(textImage,
"Gesture: " + predicted_gesture,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255, 255, 255),
2)
y += 30
cv2.putText(textImage,
"Confidence: " + str(confidence) + "%",
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255, 255, 255),
2)
cv2.imshow("Statistics ", textImage)
```
#### (BUG) GPU memory overflow
> this is a workaround
```
#
method1 = False
if method1:
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
tf.compat.v1.reset_default_graph()
gpu_options.allow_growth = True
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#
method2 = False
if method2:
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
tf.compat.v1.keras.backend.set_session(
tf.compat.v1.Session(config=config))
#
method3 = False
if method3:
physical_devices = tf.config.list_physical_devices('GPU')
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, True)
```
## Loading Model
```
tf.compat.v1.reset_default_graph()
model = models.load_model('./TrainedModel/')
```
## Predicting
```
# initialize weight for running average
aWeight = 0.5
# region of interest (ROI) coordinates
top, right, bottom, left = 10, 350, 225, 590
# initialize num of frames
num_frames = 0
start_recording = False
# get the reference to the webcam
camera = cv2.VideoCapture(0)
```
Press `s` to start recording, and press `q` to quit.
Please wait for some time till black background is formed.
In case you get camera not found error, intialize the camera again.
```
# keep looping, until interrupted
while(True):
# get the current frame
grabbed, frame = camera.read()
if grabbed:
# resize the frame
frame = imutils.resize(frame, width=700)
# flip the frame so that it is not the mirror view
frame = cv2.flip(frame, 1)
# clone the frame
clone = frame.copy()
# get the ROI
roi = frame[top:bottom, right:left]
# convert the roi to grayscale and blur it
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# to get the background, keep looking till a threshold is reached
# so that our running average model gets calibrated
if num_frames < 30:
run_avg(gray, aWeight)
else:
# segment the hand region
hand = segment(gray)
# check whether hand region is segmented
if hand is not None:
# if yes, unpack the thresholded image and
# segmented region
(thresholded, segmented) = hand
# draw the segmented region and display the frame
cv2.drawContours(
clone, [segmented + (right, top)], -1, (0, 0, 255))
if start_recording:
cv2.imwrite(temp_image, thresholded)
resizeImage(temp_image)
# predictedClass, confidence = getPredictedClass()
prediction = getPredictedClass()
showStatistics(prediction)
cv2.imshow("Thesholded", thresholded)
# draw the segmented hand
cv2.rectangle(clone, (left, top), (right, bottom), (0, 255, 0), 2)
# increment the number of frames
num_frames += 1
# display the frame with segmented hand
cv2.imshow("Video Feed", clone)
# observe the keypress by the user
keypress = cv2.waitKey(1) & 0xFF
# if the user pressed "q", then stop looping
if keypress == ord("q"):
break
if keypress == ord("s"):
start_recording = True
else:
print("Error, Please check your camera")
print(camera)
break
# relaease the resources
camera.release()
cv2.destroyAllWindows()
# remove temporary image file
import os
os.remove(temp_image)
```
| github_jupyter |
# Import libraries needed to plot data
```
import math
import numpy as np
import pandas as pd
import scipy.special
from bokeh.layouts import gridplot
from bokeh.io import show, output_notebook, save, output_file
from bokeh.plotting import figure
from bokeh.models import BoxAnnotation, HoverTool, ColumnDataSource, NumeralTickFormatter
from scipy.stats import lognorm, norm
```
Set plots to ouput in notebook instead of as a new tab in the browser, comment out or delete if you want the output as a new browser tab
```
# Bokeh output to notebook setting
output_notebook()
```
# Create main functions used to plot the different outputs, CHANGE AT YOUR OWN RISK
```
# Find P10, P50, and P90
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return (array[idx], idx)
def make_plot_cdf(title, hist, edges, x, pdf, cdf, x_label):
p = figure(title=title, background_fill_color="#fafafa", x_axis_type='log')
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="navy", line_color="white", alpha=0.5)
p.line(x, cdf, line_color="orange", line_width=2, alpha=0.7, legend="CDF")
p.x_range.start = 1
p.y_range.start = 0
p.legend.location = "center_right"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = x_label
p.yaxis.axis_label = 'Pr(x)'
p.grid.grid_line_color = "white"
p.left[0].formatter.use_scientific = False
p.xaxis[0].formatter = NumeralTickFormatter(format="0,0")
return p
def make_plot_probit(title, input_data, x_label):
'''Creates Probit plot for EUR and data that has a log-normal distribution.
'''
# Calculate log-normal distribtion for input data
sigma, floc, scale = lognorm.fit(input_data, floc=0)
mu = math.log(scale)
x = np.linspace(0.001, np.max(input_data) + np.mean(input_data), 1000)
pdf = 1/(x * sigma * np.sqrt(2*np.pi)) * \
np.exp(-(np.log(x)-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((np.log(x)-mu)/(np.sqrt(2)*sigma)))/2
p = figure(title=title, background_fill_color="#fafafa", x_axis_type='log')
# Prepare input data for plot
input_data_log = np.log(input_data)
# Get percentile of each point by getting rank/len(data)
input_data_log_sorted = np.argsort(input_data_log)
ranks = np.empty_like(input_data_log_sorted)
ranks[input_data_log_sorted] = np.arange(len(input_data_log))
# Add 1 to length of data because norm._ppf(1) is infinite, which will occur for highest ranked value
input_data_log_perc = [(x + 1)/(len(input_data_log_sorted) + 1)
for x in ranks]
input_data_y_values = norm._ppf(input_data_log_perc)
# Prepare fitted line for plot
x_y_values = norm._ppf(cdf)
# Values to display on y axis instead of z values from ppf
y_axis = [1 - x for x in cdf]
# Plot input data values
p.scatter(input_data, input_data_y_values, size=15,
line_color="navy", legend="Input Data", marker='circle_cross')
p.line(x, x_y_values, line_width=3, line_color="red", legend="Best Fit")
# calculate P90, P50, P10
p10_param = find_nearest(cdf, 0.9)
p10 = round(x[p10_param[1]])
p50_param = find_nearest(cdf, 0.5)
p50 = round(x[p50_param[1]])
p90_param = find_nearest(cdf, 0.1)
p90 = round(x[p90_param[1]])
# Add P90, P50, P10 markers
p.scatter(p90, norm._ppf(0.10), size=15, line_color="black",
fill_color='darkred', legend=f"P90 = {int(p90)}", marker='square_x')
p.scatter(p50, norm._ppf(0.50), size=15, line_color="black",
fill_color='blue', legend=f"P50 = {int(p50)}", marker='square_x')
p.scatter(p10, norm._ppf(0.90), size=15, line_color="black",
fill_color='red', legend=f"P10 = {int(p10)}", marker='square_x')
# Add P90, P50, P10 segments
# p.segment(1, norm._ppf(0.10), np.max(x), norm._ppf(0.10), line_dash='dashed', line_width=2, line_color='black', legend="P90")
# p.segment(1, norm._ppf(0.50), np.max(x), norm._ppf(0.50), line_dash='dashed', line_width=2, line_color='black', legend="P50")
# p.segment(1, norm._ppf(0.90), np.max(x), norm._ppf(0.90), line_dash='dashed', line_width=2, line_color='black', legend="P10")
p.segment(p90, -4, p90, np.max(x_y_values), line_dash='dashed',
line_width=2, line_color='darkred', legend=f"P90 = {int(p90)}")
p.segment(p50, -4, p50, np.max(x_y_values), line_dash='dashed',
line_width=2, line_color='blue', legend=f"P50 = {int(p50)}")
p.segment(p10, -4, p10, np.max(x_y_values), line_dash='dashed',
line_width=2, line_color='red', legend=f"P10 = {int(p10)}")
# Find min for x axis
x_min = int(np.log10(np.min(input_data)))
power_of_10 = 10**(x_min)
# Plot Styling
p.x_range.start = power_of_10
p.y_range.start = -3
p.legend.location = "top_left"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = x_label
p.yaxis.axis_label = 'Z'
p.left[0].formatter.use_scientific = False
p.xaxis[0].formatter = NumeralTickFormatter(format="0,0")
p.yaxis.visible = False
p.title.text = title
p.title.align = 'center'
p.legend.click_policy = "hide"
return p
def make_plot_pdf(title, hist, edges, x, pdf, x_label):
source = ColumnDataSource(data = {
'x' : x,
'pdf': pdf,
})
p = figure(background_fill_color="#fafafa", )
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="navy", line_color="white", alpha=0.5)
p.line('x', 'pdf', line_color="black", line_width=4, alpha=0.8, legend="PDF",
hover_alpha=0.4, hover_line_color="black", source=source)
# calculate P90, P50, P10
p10_param = find_nearest(cdf, 0.9)
p10 = round(x[p10_param[1]])
p50_param = find_nearest(cdf, 0.5)
p50 = round(x[p50_param[1]])
p90_param = find_nearest(cdf, 0.1)
p90 = round(x[p90_param[1]])
p.line((p90, p90), [0, np.max(pdf)],
line_color='darkred', line_width=3, legend=f"P90 = {int(p90)}")
p.line((p50, p50), [0, np.max(pdf)],
line_color='blue', line_width=3, legend=f"P50 = {int(p50)}")
p.line((p10, p10), [0, np.max(pdf)],
line_color='red', line_width=3, legend=f"P10 = {int(p10)}")
lower = BoxAnnotation(left=p90, right=p50,
fill_alpha=0.1, fill_color='darkred')
middle = BoxAnnotation(left=p50, right=p10,
fill_alpha=0.1, fill_color='blue')
upper = BoxAnnotation(
left=p10, right=x[-1], fill_alpha=0.1, fill_color='darkred')
# Hover Tool
p.add_tools(HoverTool(
tooltips=[
( x_label, '@x{f}' ),
( 'Probability', '@pdf{%0.6Ff}' ), # use @{ } for field names with spaces
]))
# Plot Styling
p.add_layout(lower)
p.add_layout(middle)
p.add_layout(upper)
p.y_range.start = 0
p.x_range.start = 0
p.legend.location = "center_right"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = x_label
p.yaxis.axis_label = 'Pr(x)'
p.grid.grid_line_color = "white"
p.left[0].formatter.use_scientific = False
p.xaxis[0].formatter = NumeralTickFormatter(format="0,0")
p.title.text = title
p.title.align = 'center'
return p
```
## The data you want to analyze needs to be set equal to the **input_data** variable below. This example uses the dataset supplied in the /Test_Data tab. The input data can be a list, numpy array, pandas series, or DataFrame Column.
```
data = pd.read_csv(
"https://raw.githubusercontent.com/mwentzWW/petrolpy/master/petrolpy/Test_Data/EUR_Data.csv")
data
input_data = data["CUM_MBO"]
```
The **input_data** is fit to a log normal model
```
# lognorm.fit returns (shape, floc, scale)
# shape is sigma or the standard deviation, scale = exp(median)
sigma, floc, scale = lognorm.fit(input_data, floc=0)
mu = math.log(scale)
```
The model parameters are used to construct the histogram, probability density function (pdf) and cumulative density function (cdf)
```
hist, edges = np.histogram(input_data, density=True, bins='auto')
x = np.linspace(0.001, np.max(input_data) + np.mean(input_data), 1000)
pdf = 1/(x * sigma * np.sqrt(2*np.pi)) * \
np.exp(-(np.log(x)-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((np.log(x)-mu)/(np.sqrt(2)*sigma)))/2
mean = np.exp(mu + 0.5*(sigma**2))
```
Now we create one of each plot, for basic use the only thing you will want to change is the label argument. Replace 'Cum MBO' with whatever label you want for your data.
```
plot_cdf = make_plot_cdf("Log Normal Distribution (n = {}, mean = {}, σ = {})".format(round(len(
input_data), 2), int(mean), round(sigma, 2)), hist, edges, x, pdf, cdf, 'Cum MBO')
plot_pdf = make_plot_pdf("Log Normal Distribution (n = {}, mean = {}, σ = {})".format(round(
len(input_data), 2), int(mean), round(sigma, 2)), hist, edges, x, pdf, 'Cum MBO')
plot_dist = make_plot_probit("Log Normal Distribution (n = {}, mean = {}, σ = {})".format(
round(len(input_data), 2), int(mean), round(sigma, 2)), input_data, 'Cum MBO')
show(plot_cdf)
```
# The show function will return the plot generated. If you want to save the output as an html file, remove the # from the lines below.
```
#output_file("plot_pdf.html")
#save(plot_pdf)
show(plot_pdf)
#output_file("plot_dist.html")
#save(plot_dist)
show(plot_dist)
```
Below are examples of how to calculate the value of each percentile in the cdf. The P50, P10, and P90 are calculated below.
```
# P50 value
p50_param = find_nearest(cdf, 0.5)
p50_value = round(x[p50_param[1]])
p50_value
# P10 value, only 10% of values will have this value or more
p10_param = find_nearest(cdf, 0.9)
p10_value = round(x[p10_param[1]])
p10_value
# P90 value, 90% of values will have this value or more
p90_param = find_nearest(cdf, 0.1)
p90_value = round(x[p90_param[1]])
p90_value
```
| github_jupyter |
# TreeDLib
```
%load_ext autoreload
%autoreload 2
%load_ext sql
#from treedlib import *
# Note: reloading for submodules doesn't work, so we load directly here
from treedlib.util import *
from treedlib.structs import *
from treedlib.templates import *
from treedlib.features import *
import lxml.etree as et
import numpy as np
```
We define three classes of operators:
* _NodeSets:_ $S : 2^T \mapsto 2^T$
* _Indicators:_ $I : 2^T \mapsto \{0,1\}^F$
* _Combinators:_ $C : \{0,1\}^F \times \{0,1\}^F \mapsto \{0,1\}^F$
where $T$ is a given input tree, and $F$ is the dimension of the feature space.
## Binning
```
%sql postgresql://ajratner@localhost:6432/genomics_ajratner2
res_seq = %sql SELECT * FROM genepheno_features WHERE feature LIKE '%SEQ%'
res_dep = %sql SELECT * FROM genepheno_features WHERE feature NOT LIKE '%SEQ%'
%matplotlib inline
import matplotlib.pyplot as plt
seq_lens = [len(rs.feature.split('_')) for rs in res_seq]
n, bins, patches = plt.hist(seq_lens, 50, normed=1, facecolor='green', alpha=0.75)
print([np.percentile(seq_lens, p) for p in [25,50,75]])
n, bins, patches = plt.hist(dep_lens, 50, normed=1, facecolor='green', alpha=0.75)
dep_lens = [len(rs.feature.split('_')) for rs in res_dep]
print([np.percentile(dep_lens, p) for p in [25,50,75]])
```
## Adding new feature types...
```
ds = {'GENE': ['TFB1M']}
gen_feats = compile_relation_feature_generator()
for f in gen_feats(xt.root, gidxs, pidxs):
print(f)
```
# Genomics Debugging Pipeline
* Fix this!
* _Features to add:_
* modifiers of VBs in between
* candidates in between?
* Better way to do siblings, when siblings have children...?
* LeftAll / RightAll
* Also, get unigrams, etc.
* **Use wildcard, e.g. "ABC now causes" --> WORD:LEFT-OF-MENTION[?_causes]**?
* Modifiers before e.g. "We investigated whether..." / NEGATIONS (see Johannes's email / list)
* Handle negation words explicitly?
```
from random import shuffle
RESULTS_ROOT = '/lfs/raiders7/hdd/ajratner/dd-genomics/alex-results/'
def get_exs(header, rel_path, root=RESULTS_ROOT):
rids = []
in_section = False
with open(root + rel_path, 'rb') as f:
for line in f:
if in_section and len(line.strip()) == 0:
break
elif in_section:
rids.append('_'.join(map(lambda x : x[0].upper() + x[1:], line.strip().split('_'))))
elif line.strip() == header:
in_section = True
return rids
false_pos = get_exs('False Positives:', '02-01-16/stats_causation_1500.tsv')
false_negs = get_exs('False Negatives:', '02-01-16/stats_causation_1500.tsv')
#shuffle(false_pos)
#shuffle(false_negs)
#relation_id = false_negs[12]
#print(relation_id)
#relation_id = '20396601_Body.0_287_0_20396601_Body.0_287_25'
relation_id = '18697824_Abstract.0_1_24_18697824_Abstract.0_1_6'
# Connect to correct db
%sql postgresql://ajratner@localhost:6432/genomics_ajratner
# Getting the component IDs
id = relation_id.split('_')
doc_id = id[0]
section_id = id[1][0].upper() + id[1][1:]
sent_id = int(id[2])
gidxs = map(int, relation_id.split(doc_id)[1].strip('_').split('_')[-1].split('-'))
pidxs = map(int, relation_id.split(doc_id)[2].strip('_').split('_')[-1].split('-'))
cids = [gidxs, pidxs]
# Get sentence from db + convert to XMLTree
res = %sql SELECT words, lemmas, poses, ners, dep_paths AS "dep_labels", dep_parents FROM sentences_input WHERE doc_id = :doc_id AND section_id = :section_id AND sent_id = :sent_id;
rows = [dict((k, v.split('|^|')) for k,v in dict(row).items()) for row in res]
xts = map(corenlp_to_xmltree, rows)
xt = xts[0]
# Show XMLTree
xt.render_tree(highlight=[gidxs, pidxs])
# Print TreeDLib features
#print_gen(get_relation_features(xt.root, gidxs, pidxs))
RightNgrams(RightSiblings(Mention(0)), 'lemma').print_apply(xt.root, cids)
seen.add("blah")
"blah" in seen
dict_sub = compile_dict_sub(brown_clusters_path="clusters_VB_NN.lemma.tsv")
Ngrams(Between(Mention(0), Mention(1)), 'word', 2).print_apply(xt.root, cids, dict_sub=dict_sub)
xt.root.xpath("//*[@dep_label='dobj']/@word")
Indicator(Between(Mention(0), Mention(1)), 'dep_label').print_apply(xt.root, cids)
Ngrams(Between(Mention(0), Mention(1)), 'word', 2).print_apply(xt.root, cids)
dict_sub = compile_dict_sub([
('FOUND', set(['found', 'identified', 'discovered'])),
('CAUSES', set(['causes']))
])
Ngrams(Between(Mention(0), Mention(1)), 'word', 2).print_apply(xt.root, cids, dict_sub=dict_sub)
Ngrams(Children(Filter(Between(Mention(0), Mention(1)), 'pos', 'VB')), 'word', 1).print_apply(xt.root, cids)
Ngrams(Children(Filter(Between(Mention(0), Mention(1)), 'pos', 'VB')), 'word', 1).print_apply(xt.root, cids)
```
## Error analysis round 4
### False negatives:
* [0] `24065538_Abstract.0_2_8_24065538_Abstract.0_2_14`:
* **Should this be association instead?**
* "... have been found... however studies of the association between ... and OSA risk have reported inconsistent findings"
* [1] `8844207_Abstract.0_5_6_8844207_Abstract.0_5_1`:
* **"known {{G}} mutations"**
* [2] `24993959_Abstract.1_3_36_24993959_Abstract.1_3_46`:
* `UnicodeDecodeError`!
* [3] `22653594_Abstract.0_1_5_22653594_Abstract.0_1_25-26-27`:
* **Incorrectly labeled**
* [4] `21282350_Abstract.0_1_13_21282350_Abstract.0_1_20`:
* `UnicodeDecodeError`!
* [5] `11461952_Abstract.0_10_8_11461952_Abstract.0_10_15-16`:
* "This study deomstrates that ... can be responsible for ..."
* "{{G}} responsible for {{P}}"
* [6] `25110572_Body.0_103_42_25110572_Body.0_103_18-19`:
* **Incorrectly labeled??**
* [7] `22848613_Body.0_191_7_22848613_Body.0_191_15`:
* **Incorrectly labeled??**
* [8] `19016241_Abstract.0_2_29_19016241_Abstract.0_2_34-35`:
* **Incorrectly labeled??**
* "weakly penetrant"
* [9] `19877056_Abstract.0_2_37_19877056_Abstract.0_2_7`:
* **"{{P}} attributable to {{G}}"**
* [10] `11079449_Abstract.0_5_48_11079449_Abstract.0_5_41`:
* **_Tough example: ref to a list of pairs!_**
* [11] `11667976_Body.0_6_31_11667976_Body.0_6_34-35`:
* **Is this correctly labeled...?**
* [12] `11353725_Abstract.0_7_13_11353725_Abstract.0_7_9`:
* **Is this correctly labeled...?**
* [13] `20499351_Body.0_120_6_20499351_Body.0_120_10-11-12`:
* "Patients homozygous for {{g}} mutation had"
* "had" on path between
* [14] `10511432_Abstract.0_1_12_10511432_Abstract.0_1_23`:
* **Incorrectly labeled...??**
* [15] `17033686_Abstract.0_4_4_17033686_Abstract.0_4_12`:
* "misense mutation in {{G}} was described in a family with {{P}}"
* **_Incorrectly labeled...?_**
* [16] `23288328_Body.0_179_20_23288328_Body.0_179_24-25`:
* **{{G}} - related {{P}}**
* [17] `21203343_Body.0_127_4_21203343_Body.0_127_19`:
* "have been reported in"- **Incorrectly labeled?**
* [18] `9832037_Abstract.0_2_13_9832037_Abstract.0_2_26-27-28`:
* "{{G}} sympotms include {{P}}", "include"
* [19] `18791638_Body.0_8_6_18791638_Body.0_8_0`:
* "{{P}} results from {{G}}"
```
%%sql
-- Get the features + weights for an example
SELECT f.feature, w.weight
FROM
genepheno_features f,
dd_inference_result_variables_mapped_weights w
WHERE
f.relation_id = :relation_id
AND w.description = 'inf_istrue_genepheno_causation_inference--' || f.feature
ORDER BY w.weight DESC;
res = _
sum(r[1] for r in res)
%sql SELECT expectation FROM genepheno_causation_inference_label_inference WHERE relation_id = :relation_id;
```
## Error analysis round 3
### False Positives:
* [0] `18478198_Abstract.0_2_29_18478198_Abstract.0_2_11-12`:
* "our aim was to establish whether"
* [1] `17508172_Abstract.0_4_21_17508172_Abstract.0_4_32`:
* "role"
* "sodium ion channel"
* [2] `19561293_Abstract.0_3_7_19561293_Abstract.0_3_10-11`:
* "are currently unknown"
* [3] `19956409_Abstract.0_1_8_19956409_Abstract.0_1_21`:
* r'^To evaluate'
* "the possible role"
* [4] `19714249_Body.0_130_10_19714249_Body.0_130_18`:
* '^Although"
* "potential role"
* "needs to be replicated"
* "suggests", "possible", "role"
* [5] `16297188_Title.0_1_5_16297188_Title.0_1_14`:
* "role"
* **Incorrectly supervised...?**
* [6] `24412566_Body.0_70_72_24412566_Body.0_70_6`:
* **_Long one with other genes in between..._**
* [7] `16837472_Abstract.3_1_19_16837472_Abstract.3_1_10`:
* "needs to be further studied"
* "associated"
* [8] `14966353_Abstract.0_1_41_14966353_Abstract.0_1_5`:
* `UnicodeError`!
* [9] `15547491_Abstract.0_1_23_15547491_Abstract.0_1_7-8-9-10`:
* r'^To analyze'
## Error analysis round 2
With new DSR code:
### False Positives
* [0] `17183713_Body.0_111_12_17183713_Body.0_111_25`:
* **"unlikely" on path between**
* [1] `19561293_Abstract.0_3_7_19561293_Abstract.0_3_10-11`:
* _"are unknown"- not on dep path between..._
* **Labeling error- doesn't this imply that there is a causal relation??**
* [2] `17167409_Abstract.3_2_5_17167409_Abstract.3_2_13`:
* **"is _not_ a common cause of..." - NEG modifying primary VB on path between!!!**
* [3] `18538017_Body.0_12_5_18538017_Body.0_12_17`:
* **Labeling error!? (marked because only partial P...?)**
* [4] `20437121_Abstract.0_1_30_20437121_Abstract.0_1_15`:
* "to determine" - in phrase between
* [5] `10435725_Abstract.0_1_14_10435725_Abstract.0_1_20`:
* "in mice" - off the main VB
* [6] `23525542_Abstract.0_7_12_23525542_Abstract.0_7_24`:
* **is _not_ due to..."- NEG modifying primary VB on path between!!!**
* [7] `19995275_Abstract.0_1_2_19995275_Abstract.0_1_18`:
* "has been implicated... in various studies with conflicting results"
### False Negatives
* [0] `23874215_Body.0_172_3_23874215_Body.0_172_23-24-25-26`:
* "role", "detected" - dep path between
* [1] `17507029_Abstract.0_2_13_17507029_Abstract.0_2_6-7-8-9-10`:
* "caused by" but also "association"... should do dep path in between...?
* _a tough one..._
* [2] `15219231_Body.0_121_8_15219231_Body.0_121_35`:
* **Incorrect label**
* [3] `25110572_Body.0_103_42_25110572_Body.0_103_18-19`:
* **Incorrect label- should be association?**
* [4] `17909190_Abstract.0_3_16_17909190_Abstract.0_3_25`:
* **Incorrectly labeled...?**
* [5] `22803640_Abstract.0_3_14_22803640_Abstract.0_3_24-25`:
* **Incorrectly labeled- should be association?**
* [6] `11170071_Abstract.0_1_3_11170071_Abstract.0_1_21`:
* **Incorrectly labeled- wrong mention**
* [7] `10511432_Abstract.0_1_12_10511432_Abstract.0_1_23`:
* "A variety of mutations have been detected in patients with..."- should this be association?
* [8] `10797440_Abstract.0_3_16_10797440_Abstract.0_3_3`:
* _This one seems like should be straight-forwards..._
* **{{P}} are due to {{G}}**
* [9] `23275784_Body.0_82_29_23275784_Body.0_82_13`:
* _This one seems like should be straight-forwards..._
* **{{P}} result of / due to mutations in {{G}}**
```
# Filler
```
### To investigate:
1. Correlation with length of sentence? - **_No._**
2. Low-MI words like '\_', 'the', 'gene'?
3. _[tdl] Include sequence patterns too?_
### FNs / recall analysis notes
* `10982191_Title.0_1_8_10982191_Title.0_1_21-22-23`:
* Shorter sentence
* neg. weight from "gene" in between... is this just super common?
* `19353431_Abstract.0_2_12_19353431_Abstract.0_2_1`:
* Shorter sentence
* neg. weight from "gene" in between... is this just super common?
* `23285148_Body.0_4_32_23285148_Body.0_4_3`:
* **Incorrectly labeled: should be false**
* `23316347_Body.0_202_25_23316347_Body.0_202_54`:
* _Longer sentence..._
* **BUG: Missing a left-of-mention (G: "mutation")!**
* neg. weight from "\_" in betweeen
* **BUG: left-of-mention[delay] happens twice!**
* A lot of negative weight from "result"...?
* `21304894_Body.0_110_4_21304894_Body.0_110_9-10-11`:
* Shorter sentence
* A lot of negative weight from "result"...?
* **Is this just from a low-quality DSR?**
* Duplicated features again!
* `21776272_Body.0_60_46_21776272_Body.0_60_39-40`:
* Longer sentence
* A slightly tougher example: an inherited disorder ... with mutations in gene...
* neg. weight from "gene" in between... is this just super common?
* `19220582_Abstract.0_2_20_19220582_Abstract.0_2_5`:
* 'We identified a mutation in a family with...' - should this be a positive example??
* neg. weight from "gene" in between... is this just super common?
* neg. weight from "identify" and "affect"...?
* **'c. mutation' - mutation doesn't get picked up as it's a child off the path...**
* `23456818_Body.0_148_9_23456818_Body.0_148_21-22`:
* `LEMMA:PARENTS-OF-BETWEEN-MENTION-and-MENTION[determine]` has huge negative weight
* gene, patient, distribution, etc. - neg weight
* negative impact from `PARENTS OF`...
* `20429427_Abstract.0_1_2_20429427_Abstract.0_1_14`:
* **Key word like "mutation" is off main path... ("responsible -> mutation -> whose")**
* **STOPWORDS: "the"**
* **BUG: dep_path labels are all None...**, **BUG: left-siblings doubled**
* `21031598_Body.0_24_25_21031598_Body.0_24_9`:
* Need a feature like `direct parent of mention`
* NEG: 'site', 'gene'
* `INV_`
* `22670894_Title.0_1_16_22670894_Title.0_1_7-8`:
* NEG: 'the', 'gene', 'locus'
* **'due to' just dropped from the dep tree!**
* `22887726_Abstract.0_5_33_22887726_Abstract.0_5_54-55`:
* **Incorrectly labeled for causation?**
* `19641605_Abstract.0_3_14_19641605_Abstract.0_3_22`:
* This one has "cause", exp = 0.89, seems like dead match...
* **BUG: doubles of stuff!!!!!**
* `23879989_Abstract.0_1_3_23879989_Abstract.0_1_12-13`:
* This one has "cause", exp = 0.87, seems like dead match...
* **BUG: doubles of stuff!!!!!**
* `LEMMA:FILTER-BY(pos=NN):BETWEEN-MENTION-and-MENTION[_]`
* 'distinct', 'mutation _ cause'...
* **_Why does '\_' have such negative weight??_**
* `21850180_Body.0_62_14_21850180_Body.0_62_26-27`:
* This one again seems like should be a dead match...
* **BUG: Double of word "three"!**
* Key word "responsible" not included...?
* NEG: 'identify', 'i.e.', '_ _ _'
* `20683840_Abstract.0_4_12_20683840_Abstract.0_4_33`:
* UnicodeError!
* `17495019_Title.0_1_5_17495019_Title.0_1_18`:
* **Incorrectly labeled for causation?**
* _Why is '% patients' positive...?_
* `18283249_Abstract.0_3_2_18283249_Abstract.0_3_16-17-18`:
* **'are one of the factors' - is this correctly labeled for causation?**
* `21203343_Body.0_10_3_21203343_Body.0_10_20`:
* **'are described in...' - this at least seems on the border of "causation"**
* expectation 0.85
* **BUG: doubles**
* NEG: `_`
* `24312213_Body.0_110_66_24312213_Body.0_110_73`:
* **Interesting example of isolated subtree which should be direct match!**
* Expectation 0.42???
* NEG: 'mutation result', `_`, 'result', 'influence'
### Final tally:
* 55%: Negative weight from features that seem like they should be stop words
* 25%: Incorrectly labeled or on the border
* 40%: Bug of some sort in TreeDLib
* 30%: Features that seems suprisingly weighted- due to low-quality DSRs?
## TODO:
1. Fix bugs in treedlib - DONE
2. Filter "stopwords" i.e. low-Chi-squared features - DONE
3. Add manual weights to DSRs in `config.py`
## Testing the low-Chi-squared hypothesis
```
%sql SELECT COUNT(*) FROM genepheno_features;
%sql SELECT COUNT(DISTINCT(feature)) FROM genepheno_features;
%%sql
SELECT
gc.is_correct, COUNT(*)
FROM
genepheno_causation gc,
genepheno_features gf
WHERE
gc.relation_id = gf.relation_id
AND gf.feature LIKE '%the%'
GROUP BY
gc.is_correct;
%sql SELECT is_correct, COUNT(*) FROM genepheno_causation GROUP BY is_correct;
P_T = 40022.0/(116608.0+40022.0)
P_F = 116608.0/(116608.0+40022.0)
print(P_T)
print(P_F)
from collections import defaultdict
feats = defaultdict(lambda : [0,0])
with open('/lfs/raiders7/hdd/ajratner/dd-genomics/alex-results/chi-sq/chi-sq-gp.tsv', 'rb') as f:
for line in f:
feat, label, count = line.split('\t')
b = 0 if label == 't' else 1
feats[feat][b] = int(count)
feats['INV_DEP_LABEL:BETWEEN-MENTION-and-MENTION[nsubj_vmod_prepc_by]']
chi_sqs = []
for feat, counts in feats.items():
total = float(counts[0] + counts[1])
chi_sqs.append([
(P_T-(counts[0]/total))**2 + (P_F-(counts[1]/total))**2,
feat
])
chi_sqs.sort()
with open('/lfs/raiders7/hdd/ajratner/dd-genomics/alex-results/chi-sq/chi-sq-gp-computed.tsv', 'wb') as f:
for x in chi_sqs:
f.write('\t'.join(map(str, x[::-1]))+'\n')
len(chi_sqs)
chi_sqs[500000]
thes = filter(lambda x : 'the' in x[1], chi_sqs)
len(thes)
thes[:100]
```
## Testing the length-bias hypothesis
Is their a bias towards longer sentences (because more high-weight keywords?)
```
rows = []
with open('/lfs/raiders7/hdd/ajratner/dd-genomics/alex-results/test-len-corr/all_rel_sents.tsv', 'rb') as f:
for line in f:
r = line.rstrip().split('\t')
rows.append([float(r[1]), len(r[2].split('|^|'))])
print(len(rows))
from scipy.stats import pearsonr
exps, lens = zip(*filter(lambda r : r[0] > 0.7, rows))
pearsonr(exps, lens)
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
exps, lens = zip(*random.sample(filter(lambda r : r[0] > 0.5, rows), 1000))
plt.scatter(lens, exps)
```
## Debugging pipeline
We'll debug here, also to show the general most current procedure for debugging treedlib on examples in a SQL database (e.g. from DeepDive)
```
%sql postgresql://ajratner@localhost:5432/deepdive_spouse
%%sql
SELECT sentence_text
FROM sentences
WHERE doc_id = '79205745-b593-4b98-8a94-da6b8238fefc' AND sentence_index = 32;
res = %sql SELECT tokens AS "words", lemmas, pos_tags, ner_tags, dep_types AS "dep_labels", dep_tokens AS "dep_parents" FROM sentences WHERE doc_id = '79205745-b593-4b98-8a94-da6b8238fefc' AND sentence_index = 32;
xts = map(corenlp_to_xmltree, res)
xt = xts[0]
xt.render_tree(highlight=[[21,22], [33,34]])
print_gen(get_relation_features(xt.root, [21,22], [33,34]))
```
## Feature focus: Preceding statements which nullify or negate meaning
Example:
> _Ex1:_ To investigate whether mutations in the SURF1 gene are a cause of Charcot-Marie-Tooth -LRB- CMT -RRB- disease
> _Ex2:_ To investigate the genetic effect of a new mutation found in exon 17 of the myophosphorylase -LRB- PYGM -RRB- gene as a cause of McArdle disease -LRB- also known as type 5 glycogenosis -RRB-.
Notes:
* These seem to mostly be **_modifiers of the primary verb_**?
* We are only sampling from a limited set of patterns of sentences (due to narrow DSR set) currently...
* Modifiers in general...?
* _I know how RNNs claim to / do handle this phenomenon..._ *
```
%%sql
SELECT relation_id
FROM genepheno_causation
WHERE doc_id = '15262743' AND section_id = 'Abstract.0' AND sent_id = 1;
ex1_id = ('24027061', 'Abstract.0', 1)
ex1_raw="""
<node dep_parent="0" lemma="investigate" ner="O" pos="VB" word="investigate" word_idx="1"><node dep_parent="2" dep_path="aux" lemma="to" ner="O" pos="TO" word="To" word_idx="0"/><node dep_parent="2" dep_path="ccomp" lemma="cause" ner="O" pos="NN" word="cause" word_idx="10"><node dep_parent="11" dep_path="mark" lemma="whether" ner="O" pos="IN" word="whether" word_idx="2"/><node dep_parent="11" dep_path="nsubj" lemma="mutation" ner="O" pos="NNS" word="mutations" word_idx="3"><node dep_parent="4" dep_path="prep_in" lemma="gene" ner="O" pos="NN" word="gene" word_idx="7"><node dep_parent="8" dep_path="det" lemma="the" ner="O" pos="DT" word="the" word_idx="5"/><node dep_parent="8" dep_path="nn" lemma="surf1" ner="O" pos="NN" word="SURF1" word_idx="6"/></node></node><node dep_parent="11" dep_path="cop" lemma="be" ner="O" pos="VBP" word="are" word_idx="8"/><node dep_parent="11" dep_path="det" lemma="a" ner="O" pos="DT" word="a" word_idx="9"/><node dep_parent="11" dep_path="prep_of" lemma="Charcot-Marie-Tooth" ner="O" pos="NNP" word="Charcot-Marie-Tooth" word_idx="12"/><node dep_parent="11" dep_path="dep" lemma="disease" ner="O" pos="NN" word="disease" word_idx="16"><node dep_parent="17" dep_path="appos" lemma="CMT" ner="O" pos="NNP" word="CMT" word_idx="14"/></node></node></node>
"""
xt1 = XMLTree(et.fromstring(ex1_raw))
ex2_id = ('15262743', 'Abstract.0', 1)
ex2_raw="""
<node dep_parent="0" lemma="investigate" ner="O" pos="VB" word="investigate" word_idx="1"><node dep_parent="2" dep_path="aux" lemma="to" ner="O" pos="TO" word="To" word_idx="0"/><node dep_parent="2" dep_path="dobj" lemma="effect" ner="O" pos="NN" word="effect" word_idx="4"><node dep_parent="5" dep_path="det" lemma="the" ner="O" pos="DT" word="the" word_idx="2"/><node dep_parent="5" dep_path="amod" lemma="genetic" ner="O" pos="JJ" word="genetic" word_idx="3"/><node dep_parent="5" dep_path="prep_of" lemma="mutation" ner="O" pos="NN" word="mutation" word_idx="8"><node dep_parent="9" dep_path="det" lemma="a" ner="O" pos="DT" word="a" word_idx="6"/><node dep_parent="9" dep_path="amod" lemma="new" ner="O" pos="JJ" word="new" word_idx="7"/><node dep_parent="9" dep_path="vmod" lemma="find" ner="O" pos="VBN" word="found" word_idx="9"><node dep_parent="10" dep_path="prep_in" lemma="exon" ner="O" pos="NN" word="exon" word_idx="11"><node dep_parent="12" dep_path="num" lemma="17" ner="NUMBER" pos="CD" word="17" word_idx="12"/><node dep_parent="12" dep_path="prep_of" lemma="gene" ner="O" pos="NN" word="gene" word_idx="19"><node dep_parent="20" dep_path="det" lemma="the" ner="O" pos="DT" word="the" word_idx="14"/><node dep_parent="20" dep_path="nn" lemma="myophosphorylase" ner="O" pos="NN" word="myophosphorylase" word_idx="15"/><node dep_parent="20" dep_path="nn" lemma="pygm" ner="O" pos="NN" word="PYGM" word_idx="17"/></node></node><node dep_parent="10" dep_path="prep_as" lemma="cause" ner="O" pos="NN" word="cause" word_idx="22"><node dep_parent="23" dep_path="det" lemma="a" ner="O" pos="DT" word="a" word_idx="21"/><node dep_parent="23" dep_path="prep_of" lemma="disease" ner="O" pos="NN" word="disease" word_idx="25"><node dep_parent="26" dep_path="nn" lemma="McArdle" ner="PERSON" pos="NNP" word="McArdle" word_idx="24"/><node dep_parent="26" dep_path="vmod" lemma="know" ner="O" pos="VBN" word="known" word_idx="28"><node dep_parent="29" dep_path="advmod" lemma="also" ner="O" pos="RB" word="also" word_idx="27"/><node dep_parent="29" dep_path="prep_as" lemma="glycogenosis" ner="O" pos="NN" word="glycogenosis" word_idx="32"><node dep_parent="33" dep_path="nn" lemma="type" ner="O" pos="NN" word="type" word_idx="30"/><node dep_parent="33" dep_path="num" lemma="5" ner="NUMBER" pos="CD" word="5" word_idx="31"/></node></node></node></node></node></node></node></node>
"""
xt2 = XMLTree(et.fromstring(ex2_raw))
xt1.render_tree()
xt2.render_tree()
```
### Testing XML speeds
How does it compare between:
* parse to XML via this python code, store as string, then parse from string at runtime
* just parse to XML at runtime via this python code?
```
# Map sentence to xmltree
%time xts = map(corenlp_to_xmltree, rows)
# Pre-process to xml string
xmls = [xt.to_str() for xt in map(corenlp_to_xmltree, rows)]
# Parse @ runtime using lxml
%time roots = map(et.fromstring, xmls)
```
### Table example
```
# Some wishful thinking...
table_xml = """
<div class="table-wrapper">
<h3>Causal genomic relationships</h3>
<table>
<tr><th>Gene</th><th>Variant</th><th>Phenotype</th></tr>
<tr><td>ABC</td><td><i>AG34</i></td><td>Headaches during defecation</td></tr>
<tr><td>BDF</td><td><i>CT2</i></td><td>Defecation during headaches</td></tr>
<tr><td>XYG</td><td><i>AT456</i></td><td>Defecasomnia</td></tr>
</table>
</div>
"""
from IPython.core.display import display_html, HTML
display_html(HTML(table_xml))
```
| github_jupyter |
# Week 10 - Create and manage a digital bookstore collection
*© 2021 Colin Conrad*
Welcome to Week 10 of INFO 6270! Last week marked an important milestone, in the sense that you completed the second course unit on core data science skills. Starting this week, we will have three labs on "other skills" that are valuable to data scientists but do not constitute the core practices. This week we will change gears into a topic that is important to most data-related careers: SQL. Many former students have stressed the importance of having basic knowledge of this topic in the workforce.
Many of you are likely to have covered SQL in other courses and this lab assumes that you have seen it before. If you have not explored SQL before, I strongly recommend that you spend a few hours working your way through this week's reading. The Khan Academy [Intro to SQL: Querying and managing data](https://www.khanacademy.org/computing/computer-programming/sql) is an excellent course and may be interesting to you even if you have covered it before. It is important to qualify that they use MySQL, which is ever-so slightly different from the tool that we will use this week: SQLite. The principles are the same regardless.
**This week, we will achieve the following objectives:**
- Create a SQL table with Python
- Conduct simple SQL queries
- Create relations between tables
- Conduct a more complex SQL query
- Create and query a relational table
# Case: Kobo Inc.
[Rakuten Kobo Inc.]( https://www.kobo.com/) is a Canadian company which sells e-books, audiobooks and e-readers and is a competitor to Amazon's bookselling business. Founded as a subsidiary of the Indigo Books and Music Inc., in 2010 Kobo was spun off as an independent company in order to form partnerships with other book retailors which whished to compete with Amazon. The company was later acquired by Japanese e-commerce giant Rakuten and today competes with Amazon for e-book dominance in Canada and Japan.
Like many e-commerce companies, Rakuten Kobo keeps a database of products for download. Though it is unclear whether they use an SQL database specifically, it is likely that the company stores its inventories using a relational database framework. As an e-commerce company, they maintain an inventory of thousands of items, many of which are related to transactions which happen every day. In this lab we will explore hypothetical data structures which may be similar to the technology that Rakuten Kobo uses on the backend.
# Objective 1: Create an SQL table with Python
If you have [kept up with this week's readings](https://www.khanacademy.org/computing/computer-programming/sql), you likely already know that SQL stands for _Structured Query Language_ and is the standard method for communicating with a relational database. Databases are the backbone of virtually every information system used in businesses and organizations. Relational databases are the most common variety of database; it is not difficult to see why using and managing SQL is important. In fact, _**MI, MDI and MBA students who work for companies or governments consistently express to me that this skill is among the most important skills for securing gainful employment*_. If you have not done a module on SQL, I strongly recommend that you at least look through the Khan Academy materials!
It will probably be no surprise to you by now that you can also connect to an SQL database using Python. In this lab we use a simple SQL database called [SQLite](https://www.sqlite.org/index.html). Without going into the details of SQLite, just know that this is a fully functional SQL database that is optimized for small datasets. We will use Python' `sqlite3` library to create, query and retrieve data from an SQLite database. You can read the [documentation for this library here](https://docs.python.org/2/library/sqlite3.html).
As usual, we will start by importing the `sqlite3` library. This library will allow us to connect to an SQLite database.
```
import sqlite3 # import SQLite
```
It is important to remember that `sqlite3` is not the database itself but the means to connect to the database. To make `sqlite3` work we must first create a *connection* to a database. Though it does not yet exist, we will connect to `kobo.db` in your data folder. When `sqlite3` connects to a database which does not yet exist, it will automatically create a new database for you.
If you are interested, you can open the `/data` folder and see the database that was just created!
```
conn = sqlite3.connect('data/kobo.db') # this is your database connection. We will create a database in the data folder
```
Normally we would connect to an SQL database using a command line, which would allow us to execute SQL commands, similarly to the Khan Academy example. Similarly, SQLite must connect to the database and it does so using a series of `cursor()` methods. We can create a cursor object by declaring it as below. This will allow us to use python to execute SQL queries of our database.
```
c = conn.cursor() # this is the tool for interfacing between SQLite and Python
```
Good work! We are now connected to a database. Let's talk about that a bit before proceeding.
### A bit more about relational databases
Relational databases have been around a long time (in computer years) and were originally proposed by E. F. Codd (1970). What makes relational databases different from data tables (which we have explored so far) is that they actually consist of many tables which *relate* to one another in a variety of ways. For example:
- The Government of Canada may maintain a table of citizens and a table of passports. Each passport belongs to exactly one citizen and each citizen may have a passport. This is an example of a one-to-one relationship.
- A library database could have table of patrons and a table of books. This is a one-to-many relationship. Each patron could borrow many books, though each book only lent to one patron at a given time.
- A university database may have table consisting of courses and a table consisting of students. This is a many-to-many relationship. Each course contains many students and each student takes many courses.
Each entity in a relational database is typically represented by a key. For now, we will only focus on the `primary key`, a unique indicator of each entry in a table. Primary keys are the way that you can navigate relationships between tables.
Let's start by creating a table. In SQL you can create a table using the CREATE TABLE command. We will create a table for e-readers called *readers* which will contain the following information:
- id (integer, primary key)
- date_added
- model (unique)
- description
- quantity
- price
### SQLite data types
The following command executes the order to create this table. Note that the command to create table column must also specify the data type. The following are some SQL data types for your reference with their python equivalent in brackets:
- integer (integer ... aka complete numbers)
- real (float ... aka decimal value)
- text (string)
- blob (binary number)
- null (an empty value)
```
# this is the SQL query to create the readers table
c.execute('''CREATE TABLE readers
(id integer primary key, date_added text, model text unique, description text, quantity integer, price real)''')
```
Finally, after executing the table we must *commit* the change to the database and *close* our connection. The following code accomplishes this.
```
conn.commit() # commits the query to the database
conn.close # closes the connection to the database
```
The data will be saved for subsequent sessions.
## *Challenge Question 1 (2 points)*
Modify the code below to create a table for *books* which contains the information below. We will return to this later.
- id (integer, primary key)
- date_added (text)
- author (text)
- title (text)
- publisher (text)
- ISBN (text, unique)
- price (real)
```
# insert your code here
```
# Objective 2: Conduct simple SQL queries
Once we have some tables up and running we can start to add values to the tables. Similarly to other SQL databases, we do this by using the `INSERT INTO` command. Adding values to an SQLite database is a matter of using this command as well as the corresponding values.
The following command will `INSERT INTO` the database information about the [Kobo Forma](https://ca.kobobooks.com/products/kobo-forma?store=ca-en&utm_source=Kobo&utm_medium=TopNav&utm_campaign=Forma). Execute the cell below to add the command to the cursor.
```
conn = sqlite3.connect('data/kobo.db') # connect to the DB
c = conn.cursor() # create the cursor
c.execute("INSERT INTO readers VALUES (1, '2020-02-22', 'Forma', 'To make the reading experience better for ravenous booklovers who read for hours on end, and want a lightweight, portable alternative to heavy print books, we’ve delivered our most comfortable eReader yet with waterproof reliability, the choice of landscape or portrait mode, and the expanded access of book borrowing. Storage size available in 8GB and 32GB.',10000, 299.99)")
```
The `INSERT INTO` command will add an entry to the table as long as the entry exactly matches the requirements of the table columns. However, the way that we inserted the value is not ideal. Typically it is not advisable to specify the primary key value for an entry. Primary keys are designed to auto increment.
Let's delete the value that we just created by using the `DELETE` command. This command will remove a value from a table depending on whether that value meets the specified condition. The simplest way to delete the value is to tell SQL to delete all values from readers where id = 1.
```
c.execute('''DELETE FROM readers WHERE id=1''')
```
To properly add a value to the readers table we would typically specify the values that we wish to add using the INSERT INTO command. By doing this there is less room for error.
```
c.execute('''INSERT INTO readers(date_added, model, description, quantity, price)
VALUES ('2020-02-22', 'Forma', 'To make the reading experience better for ravenous booklovers who read for hours on end, and want a lightweight, portable alternative to heavy print books, we’ve delivered our most comfortable eReader yet with waterproof reliability, the choice of landscape or portrait mode, and the expanded access of book borrowing. Storage size available in 8GB and 32GB.',10000, 299.99)''')
```
So far so good, however manually specifying an INSERT INTO command can become burdensome. In SQL it is often important to enter many entries into the database. In SQLite and Python it is a matter of creating a list of queries. For example, we could add information about Kobo's other reading products by creating one such list.
```
readers_to_add = [
('2020-02-22', 'Libra H20', 'The perfect balance between innovative digital technology, a comfortable reading experience, and modern design is here. Offered in black or white, and with four colourful accessories to pair with, Kobo Libra H2O lets you choose the model that best suits you. Read longer than ever before with its comfortable ergonomic design, the option for landscape orientation, and easy-to-use page-turn buttons. With ComfortLight PRO and full waterproofing, Kobo Libra H2O lets you enjoy your books day or night, in any environment. Kobo Libra H2O is designed for a better reading life.',10000, 199.99),
('2020-02-22', 'Clara HD', 'Kobo Clara HD is the perfect reading companion for any booklover. It always provides the best light to read by with ComfortLight PRO, and a natural, print-like reading experience on its superior 6” HD screen. Easily customizable features help customers new to eReading to read the way they prefer. With 8 GB of on-board memory and the ability to carry up to 6,000 eBooks, Kobo Clara HD always has room for your next escape',10000, 139.99)
]
```
We then want to use the sqlite3 cursor's `executemany` method to execute multiple queries. The following code achieves this.
```
c.executemany('INSERT INTO readers(date_added, model, description, quantity, price) VALUES (?,?,?,?,?)', readers_to_add)
conn.commit() # commits the query to the database
```
## Selecting data
When working as a data scientist or analyst, perhaps the most important SQL skill to have is to query the database. In SQL, queries are typically executed using the `SELECT` command. The command `SELECT * FROM readers`, for instance will retrieve all of the entries from the `readers` table. Try executing the cell below.
```
c.execute("SELECT * FROM readers")
print(c.fetchone())
```
**Gotchya!** This code indeed retrieves all of the entries, but only prints one of them. This is because I used the `fetchone()` method to print only a single entry from the database. This is a surprisingly helpful function in most circumstances because you will not want to print all of the contents of a hundred-thousand entry database!
When the cursor retrieves query entries it saves them in a list behind the scene, similarly to the `csv` library previously explored. If we wanted to print multiple entries we would loop through them using a `for` loop, just like in previous weeks. Try executing the cell below to retrieve all of the readers entered so far.
```
for row in c.execute("SELECT * FROM readers"):
print(row)
```
In addition to executing entire entries, SQL can be used to select only specific columns. To do this, you would replace the `*` with the fields that you desire to retrieve. The following code retrieves the `id` and `model` from the `readers` table.
```
for row in c.execute("SELECT id, model FROM readers"):
print(row)
```
SQL does not order itself the same way as a CSV spreadsheet, so often you need to specify the order that you desire to retrieve the information in. You can use the ORDER BY command to achieve this.
```
for row in c.execute("SELECT id, model FROM readers ORDER BY id"):
print(row)
```
## *Challenge Question 2 (1 point)*
Write a script that [inserts information about this book](https://www.kobo.com/ca/en/ebook/pride-and-prejudice-32) into your database. Consider today to be the `date_added`; you can retrieve the rest of the necessary data from the web page using the link provided.
```
# insert your code here
```
## *Challenge Question 3 (1 point)*
Print a line that retrieves the `author` and `title` of the book that you just entered. Refer back to the examples for more information on how to do this.
```
# insert your code here
```
## *Challenge Question 4 (1 point)*
Using a list, add two more books to add to this table. They can be any books from the Kobo website.
```
# insert your code here
c.executemany('INSERT INTO books VALUES (?,?,?,?,?,?,?)', books_to_add)
conn.commit() # commits the query to the database
```
# Objective 3: Create relations between tables
As mentioned earlier, perhaps the most powerful feature of relational databases are the relationships that tables have to one another. So far, we have not specified relations between the `readers` and `books` tables. We do not need to because these two entities do not interact in meaningful way.
Users with Kobo accounts will contain information about the e-readers that they own. If we wanted to create a table for users, we would probably create a table that looks something like the following.
```
c.execute('''CREATE TABLE users(id integer primary key, date_joined text, email text)''')
```
However, this does not contain any information about other tables! What we need to relate to other tables is a `FOREIGN KEY`, a value from another table. Foreign keys are usually primary keys from another table that can be used to link two tables together. They need to be specified when creating a table that relates to another.
Let's drop that bad table before proceeding.
```
c.execute('''DROP TABLE users''')
```
To specify a foreign key in SQLite, you must first specify the value in your table and then declare it as a foreign key. The code below creates an integer called userreader, and then declares it to be a foreign key and `REFERENCES` it to the id column in the `readers` table.
```
c.execute('''CREATE TABLE users(
id integer primary key,
date_joined text,
email text,
userreader integer,
FOREIGN KEY(userreader) REFERENCES readers(id))''')
```
We can now enter an entry into the users table which identifies the users' primary e-reader. The following line creates an a user entry and connects that user to the e-reader with the ID 2 (aka Libra H20).
```
c.execute('''INSERT INTO users VALUES (1, '2020-02-22','colin.conrad@dal.ca', 2)''')
```
We can also query the user table to check our sanity. Let's do that before proceeding.
```
c.execute('''SELECT * FROM users''')
for row in c:
print(row)
```
## *Challenge Question 5 (1 point)*:
Take a momemnt to generate some more data. Create three more users and add them to the database. At least one of the users should have the Clara HD reader assigned to them. We will return to this later.
```
# insert your code here
```
# Objective 4: Conduct a complex SQL query
Great! It's now time to move on to something slightly more complex. Let's start by adding a few more users. I am sure that these email addresses may be familiar to some of you!
```
users_to_add = [
('2010-01-26','harry@hogwarts.co.uk', 1),
('2010-01-26','hermione@hogwarts.co.uk', 3),
('2010-01-26','ron@hogwarts.co.uk', 2),
('2010-01-26','ginny@hogwarts.co.uk', 2),
('2010-01-26','severus@hogwarts.co.uk', 2),
('2010-01-26','dumbledore@hogwarts.co.uk', 2),
('2010-01-26','luna@hogwarts.co.uk', 3)
]
c.executemany('INSERT INTO users(date_joined, email, userreader ) VALUES (?,?,?)', users_to_add)
c.execute('''select * from users''')
for r in c:
print(r)
```
We are now ready to see relational tables in action. So far we have only executed queries from single table. Now that we have a table with a foreign key we can create a JOIN query. These types of queries draw data from multiple tables.
Let's create a query that shows us the `date_joined`, `email` and the name of the `model` that they own. The query below achieves this.
```
c.execute('''SELECT users.date_joined, users.email, readers.model
FROM users
JOIN readers
ON users.userreader = readers.id''')
for r in c: # print the query results
print(r)
```
_Magic right?_ The beauty of relational databases is that you do not have to duplicate data because you are can retrieve the necessary data from other tables. This is extremely helpful when managing larger databases.
Let's unpack this query a bit. The query contained:
- `SELECT` which specified the data that you wished to retrieve and the table the data belongs to
- `FROM` specifies the main table (we could have chosen the `readers` table here as well in this case)
- `JOIN` specifies the table that you wish to match
- `ON` specifies the relation between the two tables, in this case they are linked by the `userreader` foreign key
Take a while to study this before proceeding. Consider trying different `JOIN` queries.
### Constraining your query with `WHERE`
There is one more thing that we should discuss before wrapping up this objective. Nearly every `SELECT` query can be limited by specifying a `WHERE` condition, which helps you limit the amount of data retrieved. For instance, we might wish to query only those users who own the Clara HD model of reader. To do this we would change the query to something like the following:
```
c.execute('''SELECT users.email, readers.model
FROM users
JOIN readers
ON users.userreader = readers.id
WHERE readers.id=3''') # you could also specify readers.model="Clara HD" in this instance
for r in c:
print(r)
```
`SELECT` queries constrained by `JOIN` and `WHERE` are among the most common type of queries used by business analysts in industry. Using these queries, you can retrieve desired data and generate reports for analysis.
## *Challenge Question 6 (2 points)*:
Create a query which retrieves the following data:
- The users' email address
- The model that they purchased
- The price of the model that they purchased
- Only select users who have purchased the Libra H20 model
Be sure to print your results for easy grading!
```
# insert your code here
```
# Objective 5: Create and query a relational table
The final thing worth mentioning is that SQL databases do not always manage so-called "one-to-many" relationships, such as the relationship between users and e-readers. These types of relationships can be managed with a foreign key and they are much simpler.
Often however, you will be faced with a "many-to-many" relationship. In our Kobo example, we could envision a scenario where each owns many books, but each book is owned by many people. A foreign key alone will not help us here. To illustrate this issue, let's start by adding some more books. Execute the cell below.
```
books_to_add = [
('2020-02-22','J. K. Rowling','Harry Potter and the Philosophers Stone', 'Pottermore Publishing','9781781100219', 10.99),
('2020-02-22','J. K. Rowling','Harry Potter and the Chamber of Secrets', 'Pottermore Publishing','9781781100226', 10.99),
('2020-02-22','J. K. Rowling','Harry Potter and the Prisoner of Azkaban', 'Pottermore Publishing','9781781100233', 10.99),
('2020-02-22','J. K. Rowling','Harry Potter and the Goblet of Fire', 'Pottermore Publishing','9781781105672', 10.99),
('2020-02-22','J. K. Rowling','Harry Potter and the Order of the Phoenix', 'Pottermore Publishing','9781781100240', 10.99),
('2020-02-22','J. K. Rowling','Harry Potter and the Half-Blood Prince', 'Pottermore Publishing','9781781100257', 10.99),
('2020-02-22','J. K. Rowling','Harry Potter and the Deathly Hallows', 'Pottermore Publishing','9781781100264', 10.99)
]
c.executemany('INSERT INTO books(date_added, author, title, publisher, isbn, price ) VALUES (?,?,?,?,?,?)', books_to_add)
c.execute('''select * from books''')
for r in c:
print(r)
```
Many-to-many relationships cannot be expressed with two tables. When faced with these sort of situations, you must create an intemediary table that contains records of the relationships between the entities. Let's create a new table called `userbooks`. This table can consist of a series of `user.id` and `book.id` pairings held as foreign keys. We can keep this one simple because its entire purpose is to hold those relationships.
```
# creates the intermediary table
c.execute('''CREATE TABLE userbooks(
userid integer,
bookid integer,
FOREIGN KEY(userid) REFERENCES users(id),
FOREIGN KEY(bookid) REFERENCES books(id))''')
```
Just like any other table, we can `INSERT INTO` the userbooks table values which correspond to the users' books. The following code should assign a relationship between user 1 (likely `colin.conrad@dal.ca`) and book 4, likely to be Harry Potter, if implemented correctly.
```
c.execute('''INSERT INTO userbooks VALUES (1, 4)''') # Colin owns Harry Potter and the Philosophers Stone
```
Similarly you can express the books which you would like to add using a list.
```
userbooks_to_add = [
('1', '5'), # Colin owns Harry Potter and the Chamber of Secrets
('2', '4'), # Justin Trudeau owns Harry Potter and the Philosophers Stone
('2', '5'), # ... etc
('2', '6'),
('2', '7'),
('2', '8'),
('3', '4'),
('3', '5')
]
c.executemany('INSERT INTO userbooks(userid, bookid ) VALUES (?,?)', userbooks_to_add)
c.execute('''select * from userbooks''')
for r in c:
print(r)
```
Finally, we can retrieve records from a many-to-many relationship by `SELECTING` from the intermediary table and `JOIN`ing on the other two tables. The code below should retrieve records for `colin.conrad@dal.ca`.
```
c.execute('''SELECT users.email, books.title
FROM userbooks
JOIN users
ON users.id = userbooks.userid
JOIN books
ON books.id = userbooks.bookid
WHERE users.id=1''') # you could also specify readers.model="Clara HD" in this instance
for r in c:
print(r)
```
## *Challenge Question 7 (2 points)*
Let's try another complex query. Select the `user.email` and `books.title` for all users who own the Forma reader. Execute the cell below before conducting this query. **Hint:** the Forma reader's ID is 1.
### Execute this cell
```
userbooks_to_add = [
('4', '1'),
('4', '2'),
('4', '3')
]
c.executemany('INSERT INTO userbooks(userid, bookid ) VALUES (?,?)', userbooks_to_add)
```
### Enter your code here
```
# insert your code here
```
## Other Stuff - Connecting to MySQL (not just SQLite)
Finally, a final note on connecting to MySQL databases or other related tools. You can use Python to connect to MySQL environments easily using similar skills to what you have explored here. Python provides a [MySQL connector](https://www.w3schools.com/python/python_mysql_getstarted.asp) to establish a connection to a remote MySQL server. For example, the call below could be used to connect to establish a connection to a remove SQL server.
I will provide a video demonstration of a connection to one of our research servers. You could similarly use this code to connect to your own MySQL server if you have one.
```
# connect to a MySQL db by entering a host name. Be sure to install the MySQL connector before attempting it
import mysql.connector
# establish the connection
mydb = mysql.connector.connect(
host="qsslab.mysql.database.azure.com",
user="conradc@qsslab.mysql.database.azure.com",
password="Zt9!L#lG2fsM",
database="userdb_conradc"
)
print(mydb)
```
With the remote connection established, you could then execute commands as you normally would. The code below was used to select all of the `tweets` from Colin's `tweets` table.
```
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM tweets")
myresult = mycursor.fetchall()
for x in myresult:
print(x)
```
## References
Codd, E. F. (1970). A Relational Model of Data for Large Shared Data Banks. *Communications of the ACM. 13*(6), 377–387. doi:10.1145/362384.362685.
W3SChools (2021). Python MySQL. Retrieved from: https://www.w3schools.com/python/python_mysql_getstarted.asp
Khan Academy (2019). Intro to SQL: Querying and managing data. Retrieved from: https://www.khanacademy.org/computing/computer-programming/sql
| github_jupyter |
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/hbayes_binom_rats_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
We fit a hierarchical beta-binomial model to some count data derived from rat survival. (In the book, we motivate this in terms of covid incidence rates.)
Based on https://docs.pymc.io/notebooks/GLM-hierarchical-binominal-model.html
```
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
#!pip install pymc3 # colab uses 3.7 by default (as of April 2021)
# arviz needs 3.8+
#!pip install pymc3>=3.8 # fails to update
!pip install pymc3==3.11
import pymc3 as pm
print(pm.__version__)
import arviz as az
print(az.__version__)
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
import pandas as pd
#import seaborn as sns
import pymc3 as pm
import arviz as az
import theano.tensor as tt
np.random.seed(123)
# rat data (BDA3, p. 102)
y = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2,
5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4,
10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15,
15, 9, 4
])
n = np.array([
20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20,
20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19,
46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20,
48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46,
47, 24, 14
])
N = len(n)
def logp_ab(value):
''' prior density'''
return tt.log(tt.pow(tt.sum(value), -5/2))
with pm.Model() as model:
# Uninformative prior for alpha and beta
ab = pm.HalfFlat('ab',
shape=2,
testval=np.asarray([1., 1.]))
pm.Potential('p(a, b)', logp_ab(ab))
alpha = pm.Deterministic('alpha', ab[0])
beta = pm.Deterministic('beta', ab[1])
X = pm.Deterministic('X', tt.log(ab[0]/ab[1]))
Z = pm.Deterministic('Z', tt.log(tt.sum(ab)))
theta = pm.Beta('theta', alpha=ab[0], beta=ab[1], shape=N)
p = pm.Binomial('y', p=theta, observed=y, n=n)
#trace = pm.sample(1000, tune=2000, target_accept=0.95)
trace = pm.sample(1000, tune=500)
#az.plot_trace(trace)
#plt.savefig('../figures/hbayes_binom_rats_trace.png', dpi=300)
print(az.summary(trace))
J = len(n)
post_mean = np.zeros(J)
samples = trace[theta]
post_mean = np.mean(samples, axis=0)
print('post mean')
print(post_mean)
alphas = trace['alpha']
betas = trace['beta']
alpha_mean = np.mean(alphas)
beta_mean = np.mean(betas)
hyper_mean = alpha_mean/(alpha_mean + beta_mean)
print('hyper mean')
print(hyper_mean)
mle = y / n
pooled_mle = np.sum(y) / np.sum(n)
print('pooled mle')
print(pooled_mle)
#axes = az.plot_forest(
# trace, var_names='theta', credible_interval=0.95, combined=True, colors='cycle')
axes = az.plot_forest(
trace, var_names='theta', hdi_prob=0.95, combined=True, colors='cycle')
y_lims = axes[0].get_ylim()
axes[0].vlines(hyper_mean, *y_lims)
#plt.savefig('../figures/hbayes_binom_rats_forest95.pdf', dpi=300)
J = len(n)
fig, axs = plt.subplots(4,1, figsize=(10,10))
plt.subplots_adjust(hspace=0.3)
axs = np.reshape(axs, 4)
xs = np.arange(J)
ax = axs[0]
ax.bar(xs, y)
ax.set_title('number of postives')
ax = axs[1]
ax.bar(xs, n)
ax.set_title('popn size')
ax = axs[2]
ax.bar(xs, mle)
ax.set_ylim(0, 0.5)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
ax = axs[3]
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_ylim(0, 0.5)
ax.set_title('posterior mean (red line = hparam)')
#plt.savefig('../figures/hbayes_binom_rats_barplot.pdf', dpi=300)
J = len(n)
xs = np.arange(J)
fig, ax = plt.subplots(1,1)
ax.bar(xs, y)
ax.set_title('number of postives')
#plt.savefig('../figures/hbayes_binom_rats_outcomes.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, n)
ax.set_title('popn size')
#plt.savefig('../figures/hbayes_binom_rats_popsize.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, mle)
ax.set_ylim(0, 0.5)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
#plt.savefig('../figures/hbayes_binom_rats_MLE.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_ylim(0, 0.5)
ax.set_title('posterior mean (red line = hparam)')
#plt.savefig('../figures/hbayes_binom_rats_postmean.pdf', dpi=300)
```
| github_jupyter |
# Basics and Package Structure
If you're just interested in pulling data, you will primarily be using `nba_api.stats.endpoints`.
This submodule contains a class for each API endpoint supported by stats.nba.com.
For example, [the PlayerCareerStats class](https://github.com/swar/nba_api/blob/master/nba_api/stats/endpoints/playercareerstats.py) is initialized with a player ID and returns some career statistics for the player.
```
from nba_api.stats.endpoints import playercareerstats
# Anthony Davis
career = playercareerstats.PlayerCareerStats(player_id='203076')
career.get_data_frames()[0]
```
`career`, above, is a `PlayerCareerStats` object.
This class (and the other endpoint classes) supports several methods of accessing the data: `get_dict()`, `get_json()`, `get_data_frames()`, and more.
`get_data_frames()` returns a list of pandas DataFrames, and when working in notebooks, this is often your best option for viewing data.
In general, the first DataFrame in this list is the primary returned data structure and the one you'll want to look at.
Almost all of the endpoint classes take at least one required argument, along with several optional ones.
In the case of `PlayerCareerStats`, a player ID is required, but the user may also specify a league ID.
At the time of writing this notebook, these are the endpoints available:
<table><tr></tr><tr><td>boxscoreadvancedv2</td><td>boxscorefourfactorsv2</td><td>boxscoremiscv2</td><td>boxscoreplayertrackv2</td></tr><tr><td>boxscorescoringv2</td><td>boxscoresummaryv2</td><td>boxscoretraditionalv2</td><td>boxscoreusagev2</td></tr><tr><td>commonallplayers</td><td>commonplayerinfo</td><td>commonplayoffseries</td><td>commonteamroster</td></tr><tr><td>commonteamyears</td><td>defensehub</td><td>draftcombinedrillresults</td><td>draftcombinenonstationaryshooting</td></tr><tr><td>draftcombineplayeranthro</td><td>draftcombinespotshooting</td><td>draftcombinestats</td><td>drafthistory</td></tr><tr><td>franchisehistory</td><td>homepageleaders</td><td>homepagev2</td><td>infographicfanduelplayer</td></tr><tr><td>leaderstiles</td><td>leaguedashlineups</td><td>leaguedashplayerbiostats</td><td>leaguedashplayerclutch</td></tr><tr><td>leaguedashplayerptshot</td><td>leaguedashplayershotlocations</td><td>leaguedashplayerstats</td><td>leaguedashptdefend</td></tr><tr><td>leaguedashptstats</td><td>leaguedashptteamdefend</td><td>leaguedashteamclutch</td><td>leaguedashteamptshot</td></tr><tr><td>leaguedashteamshotlocations</td><td>leaguedashteamstats</td><td>leaguegamefinder</td><td>leaguegamelog</td></tr><tr><td>leagueleaders</td><td>leaguestandings</td><td>playbyplay</td><td>playbyplayv2</td></tr><tr><td>playerawards</td><td>playercareerstats</td><td>playercompare</td><td>playerdashboardbyclutch</td></tr><tr><td>playerdashboardbygamesplits</td><td>playerdashboardbygeneralsplits</td><td>playerdashboardbylastngames</td><td>playerdashboardbyopponent</td></tr><tr><td>playerdashboardbyshootingsplits</td><td>playerdashboardbyteamperformance</td><td>playerdashboardbyyearoveryear</td><td>playerdashptpass</td></tr><tr><td>playerdashptreb</td><td>playerdashptshotdefend</td><td>playerdashptshots</td><td>playerfantasyprofile</td></tr><tr><td>playerfantasyprofilebargraph</td><td>playergamelog</td><td>playergamestreakfinder</td><td>playernextngames</td></tr><tr><td>playerprofilev2</td><td>playersvsplayers</td><td>playervsplayer</td><td>playoffpicture</td></tr><tr><td>scoreboard</td><td>scoreboardv2</td><td>shotchartdetail</td><td>shotchartlineupdetail</td></tr><tr><td>teamdashboardbyclutch</td><td>teamdashboardbygamesplits</td><td>teamdashboardbygeneralsplits</td><td>teamdashboardbylastngames</td></tr><tr><td>teamdashboardbyopponent</td><td>teamdashboardbyshootingsplits</td><td>teamdashboardbyteamperformance</td><td>teamdashboardbyyearoveryear</td></tr><tr><td>teamdashlineups</td><td>teamdashptpass</td><td>teamdashptreb</td><td>teamdashptshots</td></tr><tr><td>teamdetails</td><td>teamgamelog</td><td>teamgamestreakfinder</td><td>teamhistoricalleaders</td></tr><tr><td>teaminfocommon</td><td>teamplayerdashboard</td><td>teamplayeronoffdetails</td><td>teamplayeronoffsummary</td></tr><tr><td>teamvsplayer</td><td>teamyearbyyearstats</td><td>videodetails</td><td>videoevents</td></tr><tr><td>videostatus</td></tr></table>
### Getting Team and Player IDs
The package also includes utilities for fetching player and team information available under `nba_api.stats.static`.
You can use this to fetch player IDs and team IDs, which are often used as inputs to API endpoints.
```
from nba_api.stats.static import teams
# get_teams returns a list of 30 dictionaries, each an NBA team.
nba_teams = teams.get_teams()
print('Number of teams fetched: {}'.format(len(nba_teams)))
nba_teams[:3]
from nba_api.stats.static import players
# get_players returns a list of dictionaries, each representing a player.
nba_players = players.get_players()
print('Number of players fetched: {}'.format(len(nba_players)))
nba_players[:5]
```
To search for an individual team or player by its name (or other attribute), dictionary comprehensions are your friend.
```
spurs = [team for team in nba_teams
if team['full_name'] == 'San Antonio Spurs'][0]
spurs
big_fundamental = [player for player in nba_players
if player['full_name'] == 'Tim Duncan'][0]
big_fundamental
```
| github_jupyter |
# Think Bayes
This notebook presents example code and exercise solutions for Think Bayes.
Copyright 2018 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import classes from thinkbayes2
from thinkbayes2 import Hist, Pmf, Suite, Beta
import thinkplot
import numpy as np
```
## The dinner party
Suppose you are having a dinner party with 10 guests and 4 of them are allergic to cats. Because you have cats, you expect 50% of the allergic guests to sneeze during dinner. At the same time, you expect 10% of the non-allergic guests to sneeze. What is the distribution of the total number of guests who sneeze?
```
# Solution
n_allergic = 4
n_non = 6
p_allergic = 0.5
p_non = 0.1
pmf = MakeBinomialPmf(n_allergic, p_allergic) + MakeBinomialPmf(n_non, p_non)
thinkplot.Hist(pmf)
# Solution
pmf.Mean()
```
## The Gluten Problem
[This study from 2015](http://onlinelibrary.wiley.com/doi/10.1111/apt.13372/full) showed that many subjects diagnosed with non-celiac gluten sensitivity (NCGS) were not able to distinguish gluten flour from non-gluten flour in a blind challenge.
Here is a description of the study:
>"We studied 35 non-CD subjects (31 females) that were on a gluten-free diet (GFD), in a double-blind challenge study. Participants were randomised to receive either gluten-containing flour or gluten-free flour for 10 days, followed by a 2-week washout period and were then crossed over. The main outcome measure was their ability to identify which flour contained gluten.
>"The gluten-containing flour was correctly identified by 12 participants (34%)..."
Since 12 out of 35 participants were able to identify the gluten flour, the authors conclude "Double-blind gluten challenge induces symptom recurrence in just one-third of patients fulfilling the clinical diagnostic criteria for non-coeliac gluten sensitivity."
This conclusion seems odd to me, because if none of the patients were sensitive to gluten, we would expect some of them to identify the gluten flour by chance. So the results are consistent with the hypothesis that none of the subjects are actually gluten sensitive.
We can use a Bayesian approach to interpret the results more precisely. But first we have to make some modeling decisions.
1. Of the 35 subjects, 12 identified the gluten flour based on resumption of symptoms while they were eating it. Another 17 subjects wrongly identified the gluten-free flour based on their symptoms, and 6 subjects were unable to distinguish. So each subject gave one of three responses. To keep things simple I follow the authors of the study and lump together the second two groups; that is, I consider two groups: those who identified the gluten flour and those who did not.
2. I assume (1) people who are actually gluten sensitive have a 95% chance of correctly identifying gluten flour under the challenge conditions, and (2) subjects who are not gluten sensitive have only a 40% chance of identifying the gluten flour by chance (and a 60% chance of either choosing the other flour or failing to distinguish).
Using this model, estimate the number of study participants who are sensitive to gluten. What is the most likely number? What is the 95% credible interval?
```
# Solution
# Here's a class that models the study
class Gluten(Suite):
def Likelihood(self, data, hypo):
"""Computes the probability of the data under the hypothesis.
data: tuple of (number who identified, number who did not)
hypothesis: number of participants who are gluten sensitive
"""
# compute the number who are gluten sensitive, `gs`, and
# the number who are not, `ngs`
gs = hypo
yes, no = data
n = yes + no
ngs = n - gs
pmf1 = MakeBinomialPmf(gs, 0.95)
pmf2 = MakeBinomialPmf(ngs, 0.4)
pmf = pmf1 + pmf2
return pmf[yes]
# Solution
prior = Gluten(range(0, 35+1))
thinkplot.Pdf(prior)
# Solution
posterior = prior.Copy()
data = 12, 23
posterior.Update(data)
# Solution
thinkplot.Pdf(posterior)
thinkplot.Config(xlabel='# who are gluten sensitive',
ylabel='PMF', legend=False)
# Solution
posterior.CredibleInterval(95)
```
| github_jupyter |
# The DataFetcher
The DataFetcher class is by detex to serve seismic data to other functions and classes. It is designed to use data from local directories as well as remote clients (like the [obspy FDSN client](https://docs.obspy.org/packages/obspy.fdsn.html)). In the future I hope to add functionality to the DataFetcher to allow it to check data availability and quality. We will start by looking at the DataFetcher class docs, using the DataFetcher on local data directories, and then setting up a DataFetcher to use a remote client.
## DataFetcher docs
Let's print the current version of detex and the docstring associated with the DataFetcher class in order to get an idea of what it does and what options are available.
```
import detex
print('Current detex version is %s' % (detex.__version__))
print ('-------------------------------')
print (detex.getdata.DataFetcher.__doc__)
```
Some of the more important parameters to pay attention to are the ones controlling the duration of files and the response removal.
* Parameters that control data duration, number of files, and file type:
1. timeBeforeOrigin
2. timeAfterOrigin
3. conDatDuration
4. secBuf
* Parameters that control response removal (more on obspy response removal [here](https://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.remove_response.html#obspy.core.stream.Stream.remove_response))
1. removeResponse (True or False)
2. opType ("DISP" (m), "VEL" (m/s), or "ACC" (m/s^2))
3. prefilt
Also, for less than perfect data, the fillZeros parameter can be very important to avoid discarding data with small gaps. More on this in the [clustering section](../Clustering/clustering.md).
## DataFetcher with local directories
Often it can be faster to download, preform some processing, and save data once rather than using clients each time detex needs seismic data. This is not always the case, however, if a database on the same network as your workstation is the remote client you wish to use. As an example, let's create a local data directory and then set up a DataFetcher instance to pull data from it.
### Create local directories
In order to create the data directories we first need to let Detex know which stations and events it should look for. To do this we use the template key and station key files (more on that in the [required files section](../RequiredFiles/required_files.md).
For this example lets use a subset of the template key and station key used in the intro tutorial.
```
import detex
stakey = detex.util.readKey('StationKey.csv', key_type='station')
stakey
temkey = detex.util.readKey('TemplateKey.csv', key_type='template')
temkey
```
Next we need to call makeDataDirectories (or getAllData which was kept for backward compatibility).
```
%time detex.getdata.makeDataDirectories() # make directories and time how long it takes (the %time magic only works in ipython)
```
Before we look at the downloaded data lets discuss some of the parameters that you should pay attention to when calling the makeDataDirectories function. You should notice that many of the makeDataDirectories function's input arguments are similar to DataFetchers arguments. This is because under the hood the makeDataDirectories function is simply using a DataFetcher attached to a client (IRIS by default). If you wanted to use something besides IRIS you would just need to pass a DataFetcher instance attached to another client as the fetch argument.
One unique argument that makeDataDirectories needs is the formatOut, which is the format to use when saving the data to disk. Any format obspy can read/write should be acceptable. Options are: 'mseed', 'sac', 'GSE2', 'sacxy', 'q', 'sh_asc', 'slist', 'tspair', 'segy', 'su', 'pickle', 'h5' (if obspyh5 is installed). Default is mseed, although the makeDataDirectories call by default will remove instrument response thus necessitating that the data are in a float format and therefore devaluing the mseed compression advantage.
I recommend you look at the entire doc string of the function, but I wont print it here. You should think about what parameters will work best for your data set before just using the defaults.
Now let's take a look at the newly created data directories. This is most easily accomplished by reading the SQLite database that was created to index the directories.
```
import os
index_file = os.path.join('ContinuousWaveForms', '.index.db')
ind = detex.util.loadSQLite(index_file, 'ind')
ind
```
The fields in the database table "ind" are as follows:
| Field | Description |
|:-----:| :---------: |
| Path | A list of indicies to reference values in the indkey table for building absolute paths|
| FileName | The name of the particular file represented by the current row |
| Starttime | time stamp of the start time in the file |
| Endtime | time stamp of the end time in the file |
| Gaps | The total number of gaps in the file |
| Nc | The number of unique channels |
| Nt | The number of traces (without gaps Nc = Nt) |
| Duration | Duration of seismic data in seconds |
| Station | network.station |
When the DataFetcher loads files from a directory it first reads the index to find the paths to load. Because of this, the directory structure not important. For example, if you already have a directory that contains some files in an obspy readable format you can index it with the detex.util.indexDirectory function. Once indexed the directory can be used by the DataFetcher class.
It can be useful to use the index for data quality checks. For example, let's look for files that are shorter than expected, that are missing channels, or that have gaps (even though we can see these TA data don't have any such issues).
```
# look for gaps
ind_gaps = ind[ind.Gaps > 0]
print("There are %d files with gaps" % len(ind_gaps))
# Look for durations at least 2 minutes less than the expected duration
expected_duration = 3720
ind_short = ind[3720 - ind.Duration > 120]
print("There are %d files with shorter than expected durations" % len(ind_short))
# look for missing channels
expected_channels = 3
ind_missing = ind[ind.Nc < expected_channels]
print("There are %d files with less than %d channels" % (len(ind_missing), expected_channels))
```
### Initiate DataFetcher
Now we are ready to create a DataFetcher instance and point it at the newly created directory. We will also explore some of the DataFetcher methods.
```
# Create two fetchers, one pointed at the continuous data and the other at the event data
con_fetcher = detex.getdata.DataFetcher('dir', directoryName='ContinuousWaveForms', removeResponse=False)
eve_fetcher = detex.getdata.DataFetcher('dir', directoryName='EventWaveForms', removeResponse=False)
```
As of version 1.0.4 the DataFetcher has 3 public methods:
1. getStream - fetches an a stream from an arbitrary network, station, channel, location (which the user must define). If no data are fetchable then None is returned.
2. getConData - creates a generator for fetching all data avaliable for the stations, channels, and date ranges found in a station key.
3. getTemData - fetches data related to those described by the template key, but also needs a station key to know which stations to look for.
Let's look at an example use of each method:
```
# getStream example
import obspy
## set variables
utc1 = obspy.UTCDateTime('2009-091T04-13-00') - 5
utc2 = utc1 + 60
net = 'TA'
sta = 'M17A'
chan = 'BH?'
## fetch
st = con_fetcher.getStream(utc1, utc2, net, sta, chan)
## plot the boring data
%pylab inline
st.plot()
# getConData example
## Read station key and use only TA M17A
stakey = detex.util.readKey('StationKey.csv', key_type='station')
stakey = stakey[stakey.STATION=='M17A']
## Create a generator for fetching continuous data
congen = con_fetcher.getConData(stakey) # note if we dont pass a duration the default is used
## loop over generator and calculate sta/lta values to see if we can find an event
from obspy.signal.trigger import classicSTALTA # for simplicity let's use the basic sta/lta
from obspy.signal.trigger import plotTrigger
sta = 0.5 # short term average in seconds
lta = 2 # long term average in seconds
ratio_max = 0 # int variables to keep track of max and time it occurs
time_max = 0
trace_max = None
cft_max = None
for st in congen: # iterate through the generator until it is exhausted
trace = st.select(component = 'z')[0] # select vertical component
trace.filter('bandpass', freqmin=1, freqmax=10, zerophase=True, corners=2) #filter
sr = trace.stats.sampling_rate # get sampling rate
starttime = trace.stats.starttime
cft = classicSTALTA(trace.data, int(sta * sr), int(lta * sr)) # run sta/lta
cft_max = max(cft) # get max value
if cft_max > ratio_max: # if the max is greater than old max
ratio_max = cft_max # set new max
time_max = starttime + cft.argmax()/float(sr) # set time max
trace_max = trace.copy()
cft_max = cft
print("The max sta/lta was %.2f occured at %s" % (ratio_max, time_max))
plotTrigger(trace, cft, ratio_max*.92, ratio_max/1.5)
## Let's get a closer look
st = con_fetcher.getStream(time_max-10, time_max+35, 'TA', 'M17A', 'BHZ')
st.filter('bandpass', freqmin=1, freqmax=5, zerophase=True, corners=2)
st.plot()
# getConData example
## Create stream generator
evegen = eve_fetcher.getTemData("TemplateKey.csv", "StationKey.csv")
# note: the temkey and stakey parameters can either be paths to csv files or DataFrames
## iterate through each of the known events plot a spectrogram of the one with highest amplitude
amp_max = 0
tr_max = None
for st, evename in evegen:
trace = st.select(component = 'z')[0]
trace.detrend('linear')
trace.filter('bandpass', freqmin=1, freqmax=10, zerophase=True, corners=2)
z_max = max(trace.data)
if z_max > amp_max:
amp_max = z_max
tr_max = trace.copy()
tr_max.plot()
tr_max.spectrogram()
```
## DataFetcher with clients
Detex should be able to handle a wide variety of obspy client types, including FDSN, NEIC, EARTHWORM, etc. However, as of version 1.0.4 I have only tested IRIS extensively so using other clients may take a bit of debugging. More tests and bug fixes will follow in future versions.
### IRIS FDSN client
In order to use the DataFetcher we first need to set up a client object. We will create an FDSN client then initiate an instance of the DataFetcher class and use the getStream function to fetch an obspy stream.
```
import detex
import obspy
from obspy.fdsn import Client
#setup client
client = Client("IRIS")
# setup fetcher
fetcher = detex.getdata.DataFetcher(method='client', client=client)
# set info
utc1 = obspy.UTCDateTime('2009-03-19T19-06-07') - 5
utc2 = utc1 + 60
net = 'TA'
sta = 'M17A'
chan = 'BH?'
# fetch a stream
st = fetcher.getStream(utc1, utc2, net, sta, chan)
# plot waveforms
%pylab inline
st.filter('bandpass', freqmin=1, freqmax=10, corners=2, zerophase=True)
st.plot()
```
If the waveforms look strange it is because this event is actually a blast at a surface coal mine.
The other methods demonstrated in previous sections also work with the DataFetcher attached to IRIS, so I wont illustrate them again here.
It should be noted that by default the instrument responses have been removed. This can be controlled with the removeResponse input argument which is either set to True or False.
This should give you all the information you need on how detex gets its data and how to set up a custom DataFetcher to be used by other detex classes.
# Next Section
The [next section](../Clustering/clustering.md) covers how to perform waveform similarity analysis in preparation for subspace detection.
| github_jupyter |
```
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
df = pd.read_csv("./data/users/user_survey_raw.csv")
col_names = ["ts","version","application","switch","share","choice","cyclone_confidence","cyclone_nps","cyclone_narrative",
"cyclone_over_fast","fast_confidence","fast_nps","fast_narrative","fast_over_cyclone"]
df.columns = col_names
df.head()
t = df["version"]
out = df.groupby(["choice"])
cyclone = out.get_group("Cyclone DDS")
test = cyclone.groupby("version").count()["ts"].tolist()
cyclone.groupby("version").count()["ts"].keys().tolist()
# Let's start with a pie chart for ROS version, one for All, Cyclone, Fast
fig, ax = plt.subplots(1,3,figsize=(20, 10))
fig.tight_layout()
names = df.groupby("version").count()["ts"].keys().tolist()
colors = ["firebrick","darkorange","gold","olivedrab","dodgerblue","royalblue","purple"]
color_assign = {}
for idx,name in enumerate(names):
color_assign[name] = colors[idx]
################################################
title = "ROS 2 Version Used"
ax[0].set_title(title)
names = df.groupby("version").count()["ts"].keys().tolist()
vals = df.groupby("version").count()["ts"].values
c_list = []
for name in names:
c_list.append(color_assign[name])
patches,text,auto =ax[0].pie(vals,autopct='%1.1f%%', colors=c_list, shadow=True,radius=0.9)
ax[0].legend(names)
ax[0].legend(names,loc='lower right')
################################################
title = "ROS 2 Version Used -- Cyclone DDS Users"
ax[1].set_title(title)
out = df.groupby(["choice"])
cyclone = out.get_group("Cyclone DDS")
names = cyclone.groupby("version").count()["ts"].keys().tolist()
vals = cyclone.groupby("version").count()["ts"].tolist()
c_list = []
for name in names:
c_list.append(color_assign[name])
patches,text,auto =ax[1].pie(vals,colors=c_list, autopct='%1.1f%%', shadow=True,radius=0.9)
ax[1].legend(names,loc='lower right')
################################################
title = "ROS 2 Version Used -- Fast DDS Users"
ax[2].set_title(title)
out = df.groupby(["choice"])
fast = out.get_group("Fast DDS")
names = fast.groupby("version").count()["ts"].keys().tolist()
vals = fast.groupby("version").count()["ts"].tolist()
c_list = []
for name in names:
c_list.append(color_assign[name])
patches,text,auto =ax[2].pie(vals, colors=c_list,autopct='%1.1f%%', shadow=True,radius=0.9)
ax[2].legend(names)
ax[2].legend(names,loc='lower right')
plt.suptitle("ROS Distro Used By Preferred DDS Implementation",fontsize="xx-large")
plt.savefig("./plots/ROS2Version.png")
plt.show()
cm = plt.get_cmap('Reds')
# Now do the user choice
fig, ax = plt.subplots(1,2,figsize=(20, 10))
plt.suptitle("Preferred DDS Implementation and Confidence in Selected Implementation",fontsize="xx-large")
################################################
title = "Preferred DDS Implementation"
ax[0].set_title(title,fontsize="x-large")
names = df.groupby("choice").count()["ts"].keys().tolist()
vals = df.groupby("choice").count()["ts"].values
print(vals)
patches,text,auto =ax[0].pie(vals,autopct='%1.1f%%', colors=["red","blue"], shadow=True,radius=0.9)
ax[0].legend(names)
################################################
title = "ROS 2 Version Used -- Cyclone DDS Users"
ax[1].set_title(title,fontsize="x-large")
out = df.groupby(["choice"])
cyclone = out.get_group("Cyclone DDS")
names = cyclone.groupby("cyclone_confidence").count()["ts"].keys().tolist()
vals = cyclone.groupby("cyclone_confidence").count()["ts"].tolist()
cm = plt.get_cmap('Reds')
cs = []
for v in names:
cs.append(cm((float(v)/10.0)))
out = df.groupby(["choice"])
fast = out.get_group("Fast DDS")
vals2 = fast.groupby("fast_confidence").count()["ts"].tolist()
names2 = fast.groupby("fast_confidence").count()["ts"].keys().tolist()
cm = plt.get_cmap('Blues')
for v in names2:
cs.append(cm((float(v)/10.0)))
vals = vals + vals2
names = names + names2
patches,text,auto =ax[1].pie(vals, autopct='%1.1f%%', colors=cs,shadow=True,radius=0.9)
ax[1].legend(names)
title = "Preferred DDS Implementation by Type and Confidence Score"
ax[1].set_title(title)
plt.savefig("./plots/ROS2Choice.png")
plt.show()
cm = plt.get_cmap('Reds')
# Now do the user choice
fig, ax = plt.subplots(1,2,figsize=(20, 10))
plt.suptitle("Percentage of Respondents that Tried both Implementations and Their Preferences",fontsize="xx-large")
################################################
title = "Did you try more than one DDS / RMW vendor this year?"
ax[0].set_title(title,fontsize="x-large")
names = df.groupby("switch").count()["ts"].keys().tolist()
vals = df.groupby("switch").count()["ts"].values
print(vals)
patches,text,auto =ax[0].pie(vals,autopct='%1.1f%%', colors=["red","blue"], shadow=True,radius=0.9)
ax[0].legend(names)
################################################
out = df.groupby(["switch"])
switch = out.get_group("Yes")
names = switch.groupby("choice").count()["ts"].keys().tolist()
vals = switch.groupby("choice").count()["ts"].tolist()
new_names = []
for name in names:
new_names.append("Tried both implementations, preferred " + name)
names = new_names
out = df.groupby(["switch"])
no_switch = out.get_group("No")
vals2 = no_switch.groupby("choice").count()["ts"].tolist()
names2 = no_switch.groupby("choice").count()["ts"].keys().tolist()
new_names2 = []
for name in names2:
new_names2.append("Has only tried " + name)
names2 = new_names2
vals = vals + vals2
names = names + names2
colors = ["darkred","royalblue","indianred","cornflowerblue"]
patches,text,auto =ax[1].pie(vals, autopct='%1.1f%%',colors=colors, shadow=True,radius=0.9)
ax[1].legend(names)
title = "User Preference and Experimentation"
ax[1].set_title(title,fontsize="x-large")
plt.savefig("./plots/SwitchChoice.png")
plt.show()
fig, ax = plt.subplots(2,1,figsize=(20, 20))
plt.suptitle("Net Promoter Score by Preferred DDS Implementation",fontsize="xx-large")
out = df.groupby(["choice"])
cyclone = out.get_group("Cyclone DDS")
print(np.mean(cyclone["cyclone_nps"]))
print(np.median(cyclone["cyclone_nps"]))
ax[0].hist(cyclone["cyclone_nps"],density=False,align="mid",range=[0,10],bins=10, color="red")
ax[0].grid()
ax[0].set_xlim(0,11)
ticks = np.arange(0,11,1)
tick_names = ["{0}".format(t) for t in ticks]
ax[0].set_xticks(ticks)
ax[0].set_xticklabels(tick_names)
ax[0].set_ylabel("Respondents",fontsize="x-large")
ax[0].set_xlabel("Net Promotoer Score",fontsize="x-large")
ax[0].set_title("Cyclone DDS Net Promoter Score",fontsize="x-large")
out = df.groupby(["choice"])
fast = out.get_group("Fast DDS")
print(np.mean(fast["fast_nps"]))
print(np.median(fast["fast_nps"]))
bottom = np.arange(0,10,1)
ax[1].hist(fast["fast_nps"],density=False,align="mid",range=[0,10],bins=10, color="blue")
ax[1].grid()
ax[1].set_xlim(0,11)
ticks = np.arange(0,11,1)
tick_names = ["{0}".format(t) for t in ticks]
ax[1].set_xticks(ticks)
ax[1].set_xticklabels(tick_names)
ax[1].set_ylabel("Respondents",fontsize="x-large")
ax[1].set_xlabel("Net Promotoer Score",fontsize="x-large")
ax[1].set_title("Fast DDS Net Promoter Score",fontsize="x-large")
plt.savefig("./plots/DDSNPS.png")
plt.show()
```
| github_jupyter |
<img src='./img/logoline_12000.png' align='right' width='100%'></img>
# Tutorial on creating a climate index for wind chill
In this tutorial we will plot a map of wind chill over Europe using regional climate reanalysis data (UERRA) of wind speed and temperature. From the WEkEO Jupyterhub we will download this data from the Climate Data Store (CDS) of the Copernicus Climate Change Service (C3S). The tutorial comprises the following steps:
1. [Search and download](#search_download) regional climate reanalysis data (UERRA) of 10m wind speed and 2m temperature.
2. [Read data](#read_data): Once downloaded, we will read and understand the data, including its variables and coordinates.
3. [Calculate wind chill index](#wind_chill): We will calculate the wind chill index from the two parameters of wind speed and temperature, and view a map of average wind chill over Europe.
4. [Calculate wind chill with ERA5](#era5): In order to assess the reliability of the results, repeat the process with ERA5 reanalysis data and compare the results with those derived with UERRA.
<img src='./img/climate_indices.png' align='center' width='100%'></img>
## <a id='search_download'></a>1. Search and download data
Before we begin we must prepare our environment. This includes installing the Application Programming Interface (API) of the CDS, and importing the various python libraries that we will need.
#### Install CDS API
To install the CDS API, run the following command. We use an exclamation mark to pass the command to the shell (not to the Python interpreter).
```
!pip install cdsapi
```
#### Import libraries
We will be working with data in NetCDF format. To best handle this data we need a number of libraries for working with multidimensional arrays, in particular Xarray. We will also need libraries for plotting and viewing data, in particular Matplotlib and Cartopy.
```
# CDS API
import cdsapi
# Libraries for working with multidimensional arrays
import numpy as np
import xarray as xr
# Libraries for plotting and visualising data
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
```
#### Enter your CDS API key
Please follow the steps at this link to obtain a User ID and a CDS API key:
https://cds.climate.copernicus.eu/api-how-to
Once you have these, please enter them in the fields below by replacing "UID" with your User ID, and "API_KEY" with your API key.
```
CDS_URL = "https://cds.climate.copernicus.eu/api/v2"
# enter your CDS authentication key:
CDS_KEY = "UID:API_KEY"
```
#### Search for climate data to calculate wind chill index
The wind chill index we will calculate takes two parameters as input, these are 2m near-surface air temperature, and 10m wind speed. Data for these parameters are available as part of the UERRA regional reanalysis dataset for Europe for the period 1961 to 2019. We will search for this data on the CDS website: http://cds.climate.copernicus.eu. The specific dataset we will use is the UERRA regional reanalysis for Europe on single levels from 1961 to 2019.
<img src='./img/CDS.jpg' align='left' width='45%'></img> <img src='./img/CDS_UERRA.png' align='right' width='45%'></img>
Having selected the dataset, we now need to specify what product type, variables, temporal and geographic coverage we are interested in. These can all be selected in the **"Download data"** tab. In this tab a form appears in which we will select the following parameters to download:
- Origin: `UERRA-HARMONIE`
- Variable: `10m wind speed` and `2m temperature` (these will need to be selected one at a time)
- Year: `1998 to 2019`
- Month: `December`
- Day: `15`
- Time: `12:00`
- Format: `NetCDF`
<img src='./img/CDS_UERRA_download.png' align='center' width='45%'></img>
At the end of the download form, select **"Show API request"**. This will reveal a block of code, which you can simply copy and paste into a cell of your Jupyter Notebook (see cells below). You will do this twice: once for 10m wind speed and again for 2m temperature.
#### Download data
Having copied the API requests into the cells below, run these to retrieve and download the data you requested into your local directory.
```
c = cdsapi.Client(url=CDS_URL, key=CDS_KEY)
c.retrieve(
'reanalysis-uerra-europe-single-levels',
{
'origin': 'uerra_harmonie',
'variable': '10m_wind_speed',
'year': [
'1998', '1999', '2000',
'2001', '2002', '2003',
'2004', '2005', '2006',
'2007', '2008', '2009',
'2010', '2011', '2012',
'2013', '2014', '2015',
'2016', '2017', '2018',
],
'month': '12',
'day': '15',
'time': '12:00',
'format': 'netcdf',
},
'UERRA_ws10m.nc')
c = cdsapi.Client(url=CDS_URL, key=CDS_KEY)
c.retrieve(
'reanalysis-uerra-europe-single-levels',
{
'origin': 'uerra_harmonie',
'variable': '2m_temperature',
'year': [
'1998', '1999', '2000',
'2001', '2002', '2003',
'2004', '2005', '2006',
'2007', '2008', '2009',
'2010', '2011', '2012',
'2013', '2014', '2015',
'2016', '2017', '2018',
],
'month': '12',
'day': '15',
'time': '12:00',
'format': 'netcdf',
},
'UERRA_t2m.nc')
```
## <a id='read_data'></a>2. Read Data
Now that we have downloaded the data, we can start to play ...
We have requested the data in NetCDF format. This is a commonly used format for array-oriented scientific data.
To read and process this data we will make use of the Xarray library. Xarray is an open source project and Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun! We will read the data from our NetCDF file into an Xarray **"dataset"**
```
fw = 'UERRA_ws10m.nc'
ft = 'UERRA_t2m.nc'
# Create Xarray Dataset
dw = xr.open_dataset(fw)
dt = xr.open_dataset(ft)
```
Now we can query our newly created Xarray datasets ...
```
dw
dt
```
We see that dw (dataset for wind speed) has one variable called **"si10"**. If you view the documentation for this dataset on the CDS you will see that this is the wind speed valid for a grid cell at the height of 10m above the surface. It is computed from both the zonal (u) and the meridional (v) wind components by $\sqrt{(u^{2} + v^{2})}$. The units are m/s.
The other dataset, dt (2m temperature), has a variable called **"t2m"**. According to the documentation on the CDS this is air temperature valid for a grid cell at the height of 2m above the surface, in units of Kelvin.
While an Xarray **dataset** may contain multiple variables, an Xarray **data array** holds a single multi-dimensional variable and its coordinates. To make the processing of the **si10** and **t2m** data easier, we will convert them into Xarray data arrays.
```
# Create Xarray Data Arrays
aw = dw['si10']
at = dt['t2m']
```
## <a id='wind_chill'></a>3. Calculate wind chill index
There are several indices to calculate wind chill based on air temperature and wind speed. Until recently, a commonly applied index was the following:
$\textit{WCI} = (10 \sqrt{\upsilon}-\upsilon + 10.5) \cdot (33 - \textit{T}_{a})$
where:
- WCI = wind chill index, $kg*cal/m^{2}/h$
- $\upsilon$ = wind velocity, m/s
- $\textit{T}_{a}$ = air temperature, °C
We will use the more recently adopted North American and United Kingdom wind chill index, which is calculated as follows:
$\textit{T}_{WC} = 13.12 + 0.6215\textit{T}_{a} - 11.37\upsilon^{0.16} + 0.3965\textit{T}_{a}\upsilon^{0.16}$
where:
- $\textit{T}_{WC}$ = wind chill index
- $\textit{T}_{a}$ = air temperature in degrees Celsius
- $\upsilon$ = wind speed at 10 m standard anemometer height, in kilometres per hour
To calculate $\textit{T}_{WC}$ we first have to ensure our data is in the right units. For the wind speed we need to convert from m/s to km/h, and for air temperature we need to convert from Kelvin to degrees Celsius:
```
# wind speed, convert from m/s to km/h: si10 * 1000 / (60*60)
w = aw * 3600 / 1000
# air temperature, convert from Kelvin to Celsius: t2m - 273.15
t = at - 273.15
```
Now we can calculate the North American and United Kingdom wind chill index:
$\textit{T}_{WC} = 13.12 + 0.6215\textit{T}_{a} - 11.37\upsilon^{0.16} + 0.3965\textit{T}_{a}\upsilon^{0.16}$
```
twc = 13.12 + (0.6215*t) - (11.37*(w**0.16)) + (0.3965*t*(w**0.16))
```
Let's calculate the average wind chill for 12:00 on 15 December for the 20 year period from 1998 to 2019:
```
twc_mean = twc.mean(dim='time')
```
Now let's plot the average wind chill for this time over Europe:
```
# create the figure panel
fig = plt.figure(figsize=(10,10))
# create the map using the cartopy Orthographic projection
ax = plt.subplot(1,1,1, projection=ccrs.Orthographic(central_longitude=8., central_latitude=42.))
# add coastlines
ax.coastlines()
ax.gridlines(draw_labels=False, linewidth=1, color='gray', alpha=0.5, linestyle='--')
# provide a title
ax.set_title('Wind Chill Index 12:00, 15 Dec, 1998 to 2019')
# plot twc
im = plt.pcolormesh(twc_mean.longitude, twc_mean.latitude,
twc_mean, cmap='viridis', transform=ccrs.PlateCarree())
# add colourbar
cbar = plt.colorbar(im)
cbar.set_label('Wind Chill Index')
```
Can you identify areas where frostbite may occur (see chart below)?
<img src='./img/Windchill_effect_en.svg' align='left' width='60%'></img>
RicHard-59, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons
## <a id='era5'></a>4. Exercise: Repeat process with ERA5 data and compare results
So far you have plotted wind chill using the UERRA regional reanalysis dataset, but how accurate is this plot? One way to assess a dataset is to compare it with an alternative independent one to see what differences there may be. An alternative to UERRA is the ERA5 reanalysis data that you used in the previous tutorials. Repeat the steps above with ERA5 and compare your results with those obtained using UERRA.
<hr>
| github_jupyter |
# Automatic music generation system (AMGS) - Pop genre
An affective rule-based generative music system that generates retro pop music.
```
import numpy as np
import pandas as pd
import mido
import scipy.io
import time
import statistics
from numpy.random import choice
from IPython.display import clear_output
import math
import json
# set up midi ports
print(mido.get_output_names())
percussion = mido.open_output('IAC Driver Bus 1')
piano = mido.open_output('IAC Driver Bus 2')
# read in composed progressions
with open('composed_progressions.txt') as json_file:
data = json.load(json_file)
```
# Scales, progressions and patterns
This section determines the scales, chord progressions, melodic patterns and rhythmic patterns used by the system.
```
import playerContainer
import progressionsContainer as progs
# initialize helper functions
player = playerContainer.PlayerContainer()
# set relative positions of notes in major and parallel minor scales
# MIDI note numbers for C major: 60 (C4), 62 (D), 64 (E), 65 (F), 67 (G), 69 (A), 71 (B)
tonic = 60
majorScale = [tonic, tonic+2, tonic+4, tonic+5, tonic+7, tonic+9, tonic+11]
minorScale = [tonic, tonic+2, tonic+3, tonic+5, tonic+7, tonic+8, tonic+10]
# test sound -> should hear note being played through audio workstation
ichannel = 1
ivelocity = 64
msg = mido.Message('note_on',channel=ichannel,note=tonic,velocity=ivelocity)
piano.send(msg)
time.sleep(0.50)
msg = mido.Message('note_off',channel=ichannel,note=tonic,velocity=ivelocity)
piano.send(msg)
# draft: percussion
# Ableton's drum pads are mapped by default to MIDI notes 36-51
ichannel = 10
ivelocity = 64
inote = 51
msg = mido.Message('note_on',channel=ichannel,note=inote,velocity=ivelocity)
percussion.send(msg)
```
# Player (Main)
This section puts together all the functions and generates music based on the current arousal and valence values.
**Arousal-based params**
1. roughness. Lower roughness -> higher note density.
3. loudness
4. tempo. Minimum = 60bpm, maximum = 160bpm
**Valence-based params**
1. voicing
2. chord progression
```
# artificially determine arousal-valence trajectory
#np.array([0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2])
input_arousal = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
input_arousal = np.repeat(input_arousal, 8)
input_valence = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
input_valence = np.repeat(input_valence, 8)
# or randomly generate a trajectory
rng = np.random.default_rng()
# low arousal, low valence, 40-bar progression
input_arousal = rng.integers(50, size=40)/100
input_valence = rng.integers(50, size=40)/100
# high arousal, low valence, 40-bar progression
input_arousal = rng.integers(50, high=100, size=40)/100
input_valence = rng.integers(50, size=40)/100
# low arousal, high valence, 40-bar progression
input_arousal = rng.integers(50, size=40)/100
input_valence = rng.integers(50, high=100, size=40)/100
# high arousal, high valence, 40-bar progression
input_arousal = rng.integers(50, high=100, size=40)/100
input_valence = rng.integers(50, high=100, size=40)/100
input_arousal
input_arousal = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
input_arousal = np.repeat(input_arousal, 4)
input_valence = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
input_valence = np.repeat(input_valence, 4)
print(input_valence)
print(input_arousal)
```
* melody generator, harmony generator, bass generator
* implement voice leading logic
**POSSIBLE CHANGES**
* maybe we can do moving average tempo instead -> but is that sacrificing accuracy of emotion feedback?
```
# initialize params: next_chord, minimal loudness, velocity, current_motive
next_chord = []
current_motive=0
# initialize memory of previous harmony and melody notes (partially determines current harmony/melody notes)
prev_noteset, melody_note = [], []
# keep track of current bar
for bar in range(len(input_arousal)):
# set arousal and valence, keep track of current bar in 8-bar progressions
arousal = input_arousal[bar]
valence = input_valence[bar]
bar = bar%8
print("arousal: ", arousal, "---valence: ", valence, "---bar: ", bar)
# set simple params: roughness, voicing, loudness, tempo
roughness = 1-arousal
low_loudness = 40 + (arousal*40)
loudness = (round(arousal*10))/10*40+60
bpm = 60 + arousal * 100
volume = int(50 + (arousal*30))
# allocate note densities
n_subdivisions = 8
if arousal >= 0.75:
arousal_cat='high'
elif arousal >= 0.40:
arousal_cat='moderate'
else:
arousal_cat='low'
activate1 = [x for x in data['rhythmic_motives'] if x['bar']==bar if x['arousal']==arousal_cat][0]['motive']
activate2 = player.setRoughness(n_subdivisions, roughness+0.3)
# change volume of instruments
# instruments[0]: piano, instruments[1]: clarinet, instruments[2]: strings
msg = mido.Message('control_change',channel=ichannel,control=7,value=volume)
instruments[0].send(msg), instruments[1].send(msg), instruments[1].send(msg)
# select chord to be sounded
if next_chord==[]:
# if next chord has not already been determined, then select randomly as usual
chord, next_chord = progs.selectChord(data['progressions'], valence, bar)
else:
chord = next_chord.pop(0)
# generate set of all valid notes within range (based on current valence)
noteset = progs.createChord(chord, majorScale)
n_notes = len(noteset)
midi_low = [x for x in data['range'] if x['valence']==math.floor(valence * 10)/10][0]['midi_low']
midi_high = [x for x in data['range'] if x['valence']==math.floor(valence * 10)/10][0]['midi_high']
range_noteset = player.setRange(midi_low, midi_high, noteset)
print("chord: ", chord[3], "---notes in noteset: ", noteset, "----notes in full range: ", range_noteset)
# initialize memory of previous chord
if prev_noteset==[]:
prev_noteset=noteset
# allocate probabilities of register for each note in chord.
bright = player.setPitch(n_notes, valence)
# determine if scale patterns should be drawn from major or minor scale
if valence<0.4:
scale = player.setRange(midi_low, midi_high, minorScale)
else:
scale = player.setRange(midi_low, midi_high, majorScale)
scale.sort()
# do we want to add in a percussion instrument?
# play bass (root note) -> want to try bassoon? instruments = [piano, clarinet, strings]
current_velocity = np.random.randint(low_loudness,loudness)
note = mido.Message('note_on', channel=1, note=min(noteset) - 12, velocity=current_velocity)
instruments[2].send(note)
# play "accompaniment"/harmony chords
chord_voicing = progs.harmonyVL(prev_noteset, noteset, range_noteset)
print("chord voicing: ", chord_voicing)
for i in range(len(chord_voicing)):
note = mido.Message('note_on',
channel=1,
note=int(chord_voicing[i]+bright[i]*12),
velocity=current_velocity)
instruments[0].send(note)
# update value of prev_noteset
prev_noteset=chord_voicing
# plays "foreground" melody
for beat in range(0,n_subdivisions):
# determine which extensions to sound and create tone
if (activate1[beat] == 1):
note1 = int(noteset[0]+bright[0]*12)
msg = mido.Message('note_on',
channel=1,
note=note1,
velocity=current_velocity)
instruments[0].send(msg)
if (activate2[beat] == 1):
# use melodic motives for voice leading logic
current_motive = player.selectMotive(data['melodic_motives'], current_motive, arousal)
melody_note = player.melodyVL_motives(current_motive, melody_note, noteset, scale)
print('melody note is: ',melody_note)
msg = mido.Message('note_on',
channel=1,
note=melody_note,
velocity=current_velocity+10)
instruments[0].send(msg)
# length of pause determined by tempo.
time.sleep((60/bpm)/(n_subdivisions/4))
# shut all down
instruments[0].reset()
instruments[1].reset()
instruments[2].reset()
instruments[0].reset()
instruments[1].reset()
instruments[2].reset()
#clear_output()
```
Three voices: bass, harmony and melody
* Bass - String ensemble, Harmony and melody - Piano
* Bass - String ensemble, Harmony and melody - Piano, melody - Clarinet (doubling)
* Bass - Clarinet, Harmony and melody - Piano
```
[x for x in zip(chord_voicing, bright*12)]
melody_note
```
# Archive
```
if np.random.rand(1)[0] < arousal:
violin.send(msg)
# write control change (cc) message. Controller number 7 maps to volume.
volume = 80
msg = mido.Message('control_change',channel=ichannel,control=7,value=volume)
piano.send(msg)
# initial idea for melody voice leading - pick closest note
# note how this doesn't depend on arousal or valence at all, basically only controls musicality
def melodyVL_closestNote(melody_note, noteset, range_noteset):
"""
Controls voice leading of melodic line by picking the closest available next note based on previous note
the melody tends to stay around the same register with this implementation
"""
rand_idx = np.random.randint(2,n_notes)
# randomly initialize melody
if melody_note==[]:
melody_note = int(noteset[rand_idx]+bright[rand_idx]*12)
else:
melody_note = min(range_noteset, key=lambda x:abs(x-melody_note))
return melody_note
# initialize params: next_chord, minimal loudness, stadard velocity, current_motive
next_chord = []
low_loudness = 50
default_velocity = 80
current_motive=0
# initialize memory of previous harmony and melody notes (partially determines current harmony/melody notes)
prev_chord, melody_note = [], []
# keep track of current bar
for bar in range(len(input_arousal)):
# set arousal and valence, keep track of current bar in 8-bar progressions
arousal = input_arousal[bar]
valence = input_valence[bar]
bar = bar%8
print("arousal: ", arousal, "---valence: ", valence, "---bar: ", bar)
# set simple params: roughness, voicing, loudness, tempo
roughness = 1-arousal
voicing = valence
loudness = (round(arousal*10))/10*40+60
bpm = 60 + arousal * 100
# first vector (activate1) determines density of background chords
# second vector (activate2) determines density of melody played by piano
# TBC: n_subdivisions should eventually be determined by rhythmic pattern
n_subdivisions = 4
activate1 = player.setRoughness(n_subdivisions, roughness+0.4)
activate2 = player.setRoughness(n_subdivisions, roughness+0.2)
# select chord to be sounded
if next_chord==[]:
# if next chord has not already been determined, then select randomly as usual
chord, next_chord = progs.selectChord(data['progressions'], valence, bar)
else:
chord = next_chord.pop(0)
# generate set of all valid notes within range (based on current valence)
noteset = progs.createChord(chord, majorScale)
n_notes = len(noteset)
midi_low = [x for x in data['range'] if x['valence']==valence][0]['midi_low']
midi_high = [x for x in data['range'] if x['valence']==valence][0]['midi_high']
range_noteset = player.setRange(midi_low, midi_high, noteset)
print("chord: ", chord[3], "---notes in noteset: ", noteset, "----notes in full range: ", range_noteset)
# allocate probabilities of register for each note in chord.
bright = player.setPitch(n_notes, voicing)
# determine if scale patterns should be drawn from major or minor scale
if valence<0.4:
scale = player.setRange(midi_low, midi_high, minorScale)
else:
scale = player.setRange(midi_low, midi_high, majorScale)
scale.sort()
# play "accompaniment"/harmony chords
# TO CHANGE: if all notes in noteset above C4 octave, tranpose whole noteset down an octave.
# Create tone for each note in chord. Serves as the harmony of the generated music
for n in noteset:
note = mido.Message('note_on',
channel=1,
#note=int(noteset[i]+bright[i]*12),
note=n,
velocity=np.random.randint(low_loudness,loudness))
piano.send(note)
# NEW: added in bass (taking lowest value in noteset and transpose down 1-2 octaves)
# this should probably be played by cello, not piano
note = mido.Message('note_on', channel=1, note=min(noteset) - 24, velocity=default_velocity)
piano.send(note)
# plays "foreground" melody [0, 0, 0, 0] [0, 1, 1, 0]
for beat in range(0,n_subdivisions):
# determine which extensions to sound and create tone
#activate1 = player.setRoughness(n_subdivisions, roughness) -> moving this here lets us change subdivision every beat
# alternatively: determine downbeat probability separately.
if (activate1[beat] == 1):
note1 = int(noteset[0]+bright[0]*12)
msg = mido.Message('note_on',
channel=1,
note=note1,
velocity=np.random.randint(low_loudness,loudness))
piano.send(msg)
# add note_off message
if (activate2[beat] == 1):
# use "closest note" voice leading logic
#melody_note = melodyVL_closestNote(melody_note)
# use melodic motives for voice leading logic
current_motive = selectMotive(data['melodic_motives'], current_motive, arousal)
melody_note = melodyVL_motives(current_motive, melody_note, noteset, scale)
print('melody note is: ',melody_note)
msg = mido.Message('note_on',
channel=1,
note=melody_note,
velocity=np.random.randint(low_loudness,loudness))
piano.send(msg)
# length of pause determined by tempo. This formula works when smallest subdivision = eighth notes
time.sleep(0.50/(bpm/60))
#piano.send(mido.Message('note_off', channel=1, note=note1, velocity=64))
#piano.send(mido.Message('note_off', channel=1, note=note2, velocity=64))
# shut all down
# see if you can change the release param
piano.reset()
# generate scale for maximum range of player (C1-C6, MIDI note numbers 24-84)
range_majorScale = player.setRange(24, 84, majorScale)
range_majorScale.sort()
range_minorScale = player.setRange(24, 84, minorScale)
range_minorScale.sort()
range_majorScale.index(60)
temp = [1, 2, 3, 4]
temp[-1]
[x for x in data['melodic_motives'] if x['arousal']=='low' if x['current_motive']=='CT'][0]['motive_weights']
motives = [1, -1, 0, 'CT']
motive_weights=[0.15, 0.15, 0.3, 0.4]
choice(len(motives), 1, p=motive_weights)[0]
def inversion(noteset, inversion):
"""
increases the chord (noteset)'s inversion
"""
noteset.sort()
for i in range(inversion):
while noteset[i] < noteset[-1]:
noteset[i]+=12
return noteset
def decrease_inversion(noteset, inversion):
"""
decreases the chord (noteset)'s inversion
"""
noteset.sort()
for i in range(inversion):
while noteset[-1-i] > noteset[0]:
noteset[-1-i]-=12
return noteset
# implement voice leading logic for bass
temp = 61
print(range_noteset)
# this chooses the closest available note
min(range_noteset, key=lambda x:abs(x-temp))
# I think another possibility is to min. total distance moved for the hamony chords (which is more human)
print(noteset)
setRange(data['range'], 0.1, noteset)
```
| github_jupyter |
```
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Getting started: Training and prediction with Keras in AI Platform
<img src="https://storage.googleapis.com/cloud-samples-data/ai-platform/census/keras-tensorflow-cmle.png" alt="Keras, TensorFlow, and AI Platform logos" width="300px">
<table align="left">
<td>
<a href="https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-keras">
<img src="https://cloud.google.com/_static/images/cloud/icons/favicons/onecloud/super_cloud.png"
alt="Google Cloud logo" width="32px"> Read on cloud.google.com
</a>
</td>
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/cloudml-samples/blob/main/notebooks/tensorflow/getting-started-keras.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/cloudml-samples/blob/main/notebooks/tensorflow/getting-started-keras.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
## Overview
This tutorial shows how to train a neural network on AI Platform
using the Keras sequential API and how to serve predictions from that
model.
Keras is a high-level API for building and training deep learning models.
[tf.keras](https://www.tensorflow.org/guide/keras) is TensorFlow’s
implementation of this API.
The first two parts of the tutorial walk through training a model on Cloud
AI Platform using prewritten Keras code, deploying the trained model to
AI Platform, and serving online predictions from the deployed model.
The last part of the tutorial digs into the training code used for this model and ensuring it's compatible with AI Platform. To learn more about building
machine learning models in Keras more generally, read [TensorFlow's Keras
tutorials](https://www.tensorflow.org/tutorials/keras).
### Dataset
This tutorial uses the [United States Census Income
Dataset](https://archive.ics.uci.edu/ml/datasets/census+income) provided by the
[UC Irvine Machine Learning
Repository](https://archive.ics.uci.edu/ml/index.php). This dataset contains
information about people from a 1994 Census database, including age, education,
marital status, occupation, and whether they make more than $50,000 a year.
### Objective
The goal is to train a deep neural network (DNN) using Keras that predicts
whether a person makes more than $50,000 a year (target label) based on other
Census information about the person (features).
This tutorial focuses more on using this model with AI Platform than on
the design of the model itself. However, it's always important to think about
potential problems and unintended consequences when building machine learning
systems. See the [Machine Learning Crash Course exercise about
fairness](https://developers.google.com/machine-learning/crash-course/fairness/programming-exercise)
to learn about sources of bias in the Census dataset, as well as machine
learning fairness more generally.
### Costs
This tutorial uses billable components of Google Cloud Platform (GCP):
* AI Platform
* Cloud Storage
Learn about [AI Platform
pricing](https://cloud.google.com/ml-engine/docs/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Before you begin
You must do several things before you can train and deploy a model in
AI Platform:
* Set up your local development environment.
* Set up a GCP project with billing and the necessary
APIs enabled.
* Authenticate your GCP account in this notebook.
* Create a Cloud Storage bucket to store your training package and your
trained model.
### Set up your local development environment
**If you are using Colab or AI Platform Notebooks**, your environment already meets
all the requirements to run this notebook. You can skip this step.
**Otherwise**, make sure your environment meets this notebook's requirements.
You need the following:
* The Google Cloud SDK
* Git
* Python 3
* virtualenv
* Jupyter notebook running in a virtual environment with Python 3
The Google Cloud guide to [Setting up a Python development
environment](https://cloud.google.com/python/setup) and the [Jupyter
installation guide](https://jupyter.org/install) provide detailed instructions
for meeting these requirements. The following steps provide a condensed set of
instructions:
1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
2. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
3. [Install
virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
and create a virtual environment that uses Python 3.
4. Activate that environment and run `pip install jupyter` in a shell to install
Jupyter.
5. Run `jupyter notebook` in a shell to launch Jupyter.
6. Open this notebook in the Jupyter Notebook Dashboard.
### Set up your GCP project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a GCP project.](https://console.cloud.google.com/cloud-resource-manager)
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the AI Platform ("Cloud Machine Learning Engine") and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
```
PROJECT_ID = "<your-project-id>" #@param {type:"string"}
! gcloud config set project $PROJECT_ID
```
### Authenticate your GCP account
**If you are using AI Platform Notebooks**, your environment is already
authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions
when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
1. In the GCP Console, go to the [**Create service account key**
page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
2. From the **Service account** drop-down list, select **New service account**.
3. In the **Service account name** field, enter a name.
4. From the **Role** drop-down list, select
**Machine Learning Engine > AI Platform Admin** and
**Storage > Storage Object Admin**.
5. Click *Create*. A JSON file that contains your key downloads to your
local environment.
6. Enter the path to your service account key as the
`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
```
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a training job using the Cloud SDK, you upload a Python package
containing your training code to a Cloud Storage bucket. AI Platform runs
the code from this package. In this tutorial, AI Platform also saves the
trained model that results from your job in the same bucket. You can then
create an AI Platform model version based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. It must be unique across all
Cloud Storage buckets.
You may also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Make sure to [choose a region where Cloud
AI Platform services are
available](https://cloud.google.com/ml-engine/docs/tensorflow/regions).
```
BUCKET_NAME = "<your-bucket-name>" #@param {type:"string"}
REGION = "us-central1" #@param {type:"string"}
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION gs://$BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al gs://$BUCKET_NAME
```
## Part 1. Quickstart for training in AI Platform
This section of the tutorial walks you through submitting a training job to Cloud
AI Platform. This job runs sample code that uses Keras to train a deep neural
network on the United States Census data. It outputs the trained model as a
[TensorFlow SavedModel
directory](https://www.tensorflow.org/guide/saved_model#save_and_restore_models)
in your Cloud Storage bucket.
### Get training code and dependencies
First, download the training code and change the notebook's working directory:
```
# Clone the repository of AI Platform samples
! git clone --depth 1 https://github.com/GoogleCloudPlatform/cloudml-samples
# Set the working directory to the sample code directory
%cd cloudml-samples/census/tf-keras
```
Notice that the training code is structured as a Python package in the
`trainer/` subdirectory:
```
# `ls` shows the working directory's contents. The `p` flag adds trailing
# slashes to subdirectory names. The `R` flag lists subdirectories recursively.
! ls -pR
```
Run the following cell to install Python dependencies needed to train the model locally. When you run the training job in AI Platform,
dependencies are preinstalled based on the [runtime
version](https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list)
you choose.
```
! pip install -r requirements.txt
```
### Train your model locally
Before training on AI Platform, train the job locally to verify the file
structure and packaging is correct.
For a complex or resource-intensive job, you
may want to train locally on a small sample of your dataset to verify your code.
Then you can run the job on AI Platform to train on the whole dataset.
This sample runs a relatively quick job on a small dataset, so the local
training and the AI Platform job run the same code on the same data.
Run the following cell to train a model locally:
```
# Explicitly tell `gcloud ai-platform local train` to use Python 3
! gcloud config set ml_engine/local_python $(which python3)
# This is similar to `python -m trainer.task --job-dir local-training-output`
# but it better replicates the AI Platform environment, especially for
# distributed training (not applicable here).
! gcloud ai-platform local train \
--package-path trainer \
--module-name trainer.task \
--job-dir local-training-output
```
### Train your model using AI Platform
Next, submit a training job to AI Platform. This runs the training module
in the cloud and exports the trained model to Cloud Storage.
First, give your training job a name and choose a directory within your Cloud
Storage bucket for saving intermediate and output files:
```
JOB_NAME = 'my_first_keras_job'
JOB_DIR = 'gs://' + BUCKET_NAME + '/keras-job-dir'
```
Run the following command to package the `trainer/` directory, upload it to the
specified `--job-dir`, and instruct AI Platform to run the
`trainer.task` module from that package.
The `--stream-logs` flag lets you view training logs in the cell below. You can
also see logs and other job details in the GCP Console.
### Hyperparameter tuning
You can optionally perform hyperparameter tuning by using the included
`hptuning_config.yaml` configuration file. This file tells AI Platform to tune the batch size and learning rate for training over multiple trials to maximize accuracy.
In this example, the training code uses a [TensorBoard
callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard),
which [creates TensorFlow `Summary`
`Event`s](https://www.tensorflow.org/api_docs/python/tf/summary/FileWriter#add_summary)
during training. AI Platform uses these events to track the metric you want to
optimize. Learn more about [hyperparameter tuning in
AI Platform Training](https://cloud.google.com/ml-engine/docs/tensorflow/hyperparameter-tuning-overview).
```
! gcloud ai-platform jobs submit training $JOB_NAME \
--package-path trainer/ \
--module-name trainer.task \
--region $REGION \
--python-version 3.7 \
--runtime-version 1.15 \
--job-dir $JOB_DIR \
--stream-logs
```
## Part 2. Quickstart for online predictions in AI Platform
This section shows how to use AI Platform and your trained model from Part 1
to predict a person's income bracket from other Census information about them.
### Create model and version resources in AI Platform
To serve online predictions using the model you trained and exported in Part 1,
create a *model* resource in AI Platform and a *version* resource
within it. The version resource is what actually uses your trained model to
serve predictions. This structure lets you adjust and retrain your model many times and
organize all the versions together in AI Platform. Learn more about [models
and
versions](https://cloud.google.com/ai-platform/prediction/docs/projects-models-versions-jobs).
While you specify `--region $REGION` in gcloud commands, you will use regional endpoint. You can also specify `--region global` to use global endpoint. Please note that you must create versions using the same endpoint as the one you use to create the model. Learn more about available [regional endpoints](https://cloud.google.com/ai-platform/prediction/docs/regional-endpoints).
First, name and create the model resource:
```
MODEL_NAME = "my_first_keras_model"
! gcloud ai-platform models create $MODEL_NAME \
--region $REGION
```
Next, create the model version. The training job from Part 1 exported a timestamped
[TensorFlow SavedModel
directory](https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory)
to your Cloud Storage bucket. AI Platform uses this directory to create a
model version. Learn more about [SavedModel and
AI Platform](https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models).
You may be able to find the path to this directory in your training job's logs.
Look for a line like:
```
Model exported to: gs://<your-bucket-name>/keras-job-dir/keras_export/1545439782
```
Execute the following command to identify your SavedModel directory and use it to create a model version resource:
```
MODEL_VERSION = "v1"
# Get a list of directories in the `keras_export` parent directory
KERAS_EXPORT_DIRS = ! gsutil ls $JOB_DIR/keras_export/
# Update the directory as needed, in case you've trained
# multiple times
SAVED_MODEL_PATH = keras_export
# Create model version based on that SavedModel directory
! gcloud ai-platform versions create $MODEL_VERSION \
--region $REGION \
--model $MODEL_NAME \
--runtime-version 1.15 \
--python-version 3.7 \
--framework tensorflow \
--origin $SAVED_MODEL_PATH
```
### Prepare input for prediction
To receive valid and useful predictions, you must preprocess input for prediction in the same way that training data was preprocessed. In a production
system, you may want to create a preprocessing pipeline that can be used identically at training time and prediction time.
For this exercise, use the training package's data-loading code to select a random sample from the evaluation data. This data is in the form that was used to evaluate accuracy after each epoch of training, so it can be used to send test predictions without further preprocessing:
```
from trainer import util
_, _, eval_x, eval_y = util.load_data()
prediction_input = eval_x.sample(20)
prediction_targets = eval_y[prediction_input.index]
prediction_input
```
Notice that categorical fields, like `occupation`, have already been converted to integers (with the same mapping that was used for training). Numerical fields, like `age`, have been scaled to a
[z-score](https://developers.google.com/machine-learning/crash-course/representation/cleaning-data). Some fields have been dropped from the original
data. Compare the prediction input with the raw data for the same examples:
```
import pandas as pd
_, eval_file_path = util.download(util.DATA_DIR)
raw_eval_data = pd.read_csv(eval_file_path,
names=util._CSV_COLUMNS,
na_values='?')
raw_eval_data.iloc[prediction_input.index]
```
Export the prediction input to a newline-delimited JSON file:
```
import json
with open('prediction_input.json', 'w') as json_file:
for row in prediction_input.values.tolist():
json.dump(row, json_file)
json_file.write('\n')
! cat prediction_input.json
```
The `gcloud` command-line tool accepts newline-delimited JSON for online
prediction, and this particular Keras model expects a flat list of
numbers for each input example.
AI Platform requires a different format when you make online prediction requests to the REST API without using the `gcloud` tool. The way you structure
your model may also change how you must format data for prediction. Learn more
about [formatting data for online
prediction](https://cloud.google.com/ml-engine/docs/tensorflow/prediction-overview#prediction_input_data).
### Submit the online prediction request
Use `gcloud` to submit your online prediction request.
```
! gcloud ai-platform predict \
--region $REGION \
--model $MODEL_NAME \
--version $MODEL_VERSION \
--json-instances prediction_input.json
```
Since the model's last layer uses a [sigmoid function](https://developers.google.com/machine-learning/glossary/#sigmoid_function) for its activation, outputs between 0 and 0.5 represent negative predictions ("<=50K") and outputs between 0.5 and 1 represent positive ones (">50K").
Do the predicted income brackets match the actual ones? Run the following cell
to see the true labels.
```
prediction_targets
```
## Part 3. Developing the Keras model from scratch
At this point, you have trained a machine learning model on AI Platform, deployed the trained model as a version resource on AI Platform, and received online predictions from the deployment. The next section walks through recreating the Keras code used to train your model. It covers the following parts of developing a machine learning model for use with AI Platform:
* Downloading and preprocessing data
* Designing and training the model
* Visualizing training and exporting the trained model
While this section provides more detailed insight to the tasks completed in previous parts, to learn more about using `tf.keras`, read [TensorFlow's guide to Keras](https://www.tensorflow.org/tutorials/keras). To learn more about structuring code as a training packge for AI Platform, read [Packaging a training application](https://cloud.google.com/ml-engine/docs/tensorflow/packaging-trainer) and reference the [complete training code](https://github.com/GoogleCloudPlatform/cloudml-samples/tree/master/census/tf-keras), which is structured as a Python package.
### Import libraries and define constants
First, import Python libraries required for training:
```
import os
from six.moves import urllib
import tempfile
import numpy as np
import pandas as pd
import tensorflow as tf
# Examine software versions
print(__import__('sys').version)
print(tf.__version__)
print(tf.keras.__version__)
```
Then, define some useful constants:
* Information for downloading training and evaluation data
* Information required for Pandas to interpret the data and convert categorical fields into numeric features
* Hyperparameters for training, such as learning rate and batch size
```
### For downloading data ###
# Storage directory
DATA_DIR = os.path.join(tempfile.gettempdir(), 'census_data')
# Download options.
DATA_URL = 'https://storage.googleapis.com/cloud-samples-data/ai-platform' \
'/census/data'
TRAINING_FILE = 'adult.data.csv'
EVAL_FILE = 'adult.test.csv'
TRAINING_URL = '%s/%s' % (DATA_URL, TRAINING_FILE)
EVAL_URL = '%s/%s' % (DATA_URL, EVAL_FILE)
### For interpreting data ###
# These are the features in the dataset.
# Dataset information: https://archive.ics.uci.edu/ml/datasets/census+income
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CATEGORICAL_TYPES = {
'workclass': pd.api.types.CategoricalDtype(categories=[
'Federal-gov', 'Local-gov', 'Never-worked', 'Private', 'Self-emp-inc',
'Self-emp-not-inc', 'State-gov', 'Without-pay'
]),
'marital_status': pd.api.types.CategoricalDtype(categories=[
'Divorced', 'Married-AF-spouse', 'Married-civ-spouse',
'Married-spouse-absent', 'Never-married', 'Separated', 'Widowed'
]),
'occupation': pd.api.types.CategoricalDtype([
'Adm-clerical', 'Armed-Forces', 'Craft-repair', 'Exec-managerial',
'Farming-fishing', 'Handlers-cleaners', 'Machine-op-inspct',
'Other-service', 'Priv-house-serv', 'Prof-specialty', 'Protective-serv',
'Sales', 'Tech-support', 'Transport-moving'
]),
'relationship': pd.api.types.CategoricalDtype(categories=[
'Husband', 'Not-in-family', 'Other-relative', 'Own-child', 'Unmarried',
'Wife'
]),
'race': pd.api.types.CategoricalDtype(categories=[
'Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other', 'White'
]),
'native_country': pd.api.types.CategoricalDtype(categories=[
'Cambodia', 'Canada', 'China', 'Columbia', 'Cuba', 'Dominican-Republic',
'Ecuador', 'El-Salvador', 'England', 'France', 'Germany', 'Greece',
'Guatemala', 'Haiti', 'Holand-Netherlands', 'Honduras', 'Hong', 'Hungary',
'India', 'Iran', 'Ireland', 'Italy', 'Jamaica', 'Japan', 'Laos', 'Mexico',
'Nicaragua', 'Outlying-US(Guam-USVI-etc)', 'Peru', 'Philippines', 'Poland',
'Portugal', 'Puerto-Rico', 'Scotland', 'South', 'Taiwan', 'Thailand',
'Trinadad&Tobago', 'United-States', 'Vietnam', 'Yugoslavia'
]),
'income_bracket': pd.api.types.CategoricalDtype(categories=[
'<=50K', '>50K'
])
}
# This is the label (target) we want to predict.
_LABEL_COLUMN = 'income_bracket'
### Hyperparameters for training ###
# This the training batch size
BATCH_SIZE = 128
# This is the number of epochs (passes over the full training data)
NUM_EPOCHS = 20
# Define learning rate.
LEARNING_RATE = .01
```
### Download and preprocess data
#### Download the data
Next, define functions to download training and evaluation data. These functions also fix minor irregularities in the data's formatting.
```
def _download_and_clean_file(filename, url):
"""Downloads data from url, and makes changes to match the CSV format.
The CSVs may use spaces after the comma delimters (non-standard) or include
rows which do not represent well-formed examples. This function strips out
some of these problems.
Args:
filename: filename to save url to
url: URL of resource to download
"""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.io.gfile.GFile(temp_file, 'r') as temp_file_object:
with tf.io.gfile.GFile(filename, 'w') as file_object:
for line in temp_file_object:
line = line.strip()
line = line.replace(', ', ',')
if not line or ',' not in line:
continue
if line[-1] == '.':
line = line[:-1]
line += '\n'
file_object.write(line)
tf.io.gfile.remove(temp_file)
def download(data_dir):
"""Downloads census data if it is not already present.
Args:
data_dir: directory where we will access/save the census data
"""
tf.io.gfile.makedirs(data_dir)
training_file_path = os.path.join(data_dir, TRAINING_FILE)
if not tf.io.gfile.exists(training_file_path):
_download_and_clean_file(training_file_path, TRAINING_URL)
eval_file_path = os.path.join(data_dir, EVAL_FILE)
if not tf.io.gfile.exists(eval_file_path):
_download_and_clean_file(eval_file_path, EVAL_URL)
return training_file_path, eval_file_path
```
Use those functions to download the data for training and verify that you have CSV files for training and evaluation:
```
training_file_path, eval_file_path = download(DATA_DIR)
# You should see 2 files: adult.data.csv and adult.test.csv
!ls -l $DATA_DIR
```
Next, load these files using Pandas and examine the data:
```
# This census data uses the value '?' for fields (column) that are missing data.
# We use na_values to find ? and set it to NaN values.
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html
train_df = pd.read_csv(training_file_path, names=_CSV_COLUMNS, na_values='?')
eval_df = pd.read_csv(eval_file_path, names=_CSV_COLUMNS, na_values='?')
# Here's what the data looks like before we preprocess the data.
train_df.head()
```
#### Preprocess the data
The first preprocessing step removes certain features from the data and
converts categorical features to numerical values for use with Keras.
Learn more about [feature engineering](https://developers.google.com/machine-learning/crash-course/representation/feature-engineering) and [bias in data](https://developers.google.com/machine-learning/crash-course/fairness/types-of-bias).
```
UNUSED_COLUMNS = ['fnlwgt', 'education', 'gender']
def preprocess(dataframe):
"""Converts categorical features to numeric. Removes unused columns.
Args:
dataframe: Pandas dataframe with raw data
Returns:
Dataframe with preprocessed data
"""
dataframe = dataframe.drop(columns=UNUSED_COLUMNS)
# Convert integer valued (numeric) columns to floating point
numeric_columns = dataframe.select_dtypes(['int64']).columns
dataframe[numeric_columns] = dataframe[numeric_columns].astype('float32')
# Convert categorical columns to numeric
cat_columns = dataframe.select_dtypes(['object']).columns
dataframe[cat_columns] = dataframe[cat_columns].apply(lambda x: x.astype(
_CATEGORICAL_TYPES[x.name]))
dataframe[cat_columns] = dataframe[cat_columns].apply(lambda x: x.cat.codes)
return dataframe
prepped_train_df = preprocess(train_df)
prepped_eval_df = preprocess(eval_df)
```
Run the following cell to see how preprocessing changed the data. Notice in particular that `income_bracket`, the label that you're training the model to predict, has changed from `<=50K` and `>50K` to `0` and `1`:
```
prepped_train_df.head()
```
Next, separate the data into features ("x") and labels ("y"), and reshape the label arrays into a format for use with `tf.data.Dataset` later:
```
# Split train and test data with labels.
# The pop() method will extract (copy) and remove the label column from the dataframe
train_x, train_y = prepped_train_df, prepped_train_df.pop(_LABEL_COLUMN)
eval_x, eval_y = prepped_eval_df, prepped_eval_df.pop(_LABEL_COLUMN)
# Reshape label columns for use with tf.data.Dataset
train_y = np.asarray(train_y).astype('float32').reshape((-1, 1))
eval_y = np.asarray(eval_y).astype('float32').reshape((-1, 1))
```
Scaling training data so each numerical feature column has a mean of 0 and a standard deviation of 1 [can improve your model](https://developers.google.com/machine-learning/crash-course/representation/cleaning-data).
In a production system, you may want to save the means and standard deviations from your training set and use them to perform an identical transformation on test data at prediction time. For convenience in this exercise, temporarily combine the training and evaluation data to scale all of them:
```
def standardize(dataframe):
"""Scales numerical columns using their means and standard deviation to get
z-scores: the mean of each numerical column becomes 0, and the standard
deviation becomes 1. This can help the model converge during training.
Args:
dataframe: Pandas dataframe
Returns:
Input dataframe with the numerical columns scaled to z-scores
"""
dtypes = list(zip(dataframe.dtypes.index, map(str, dataframe.dtypes)))
# Normalize numeric columns.
for column, dtype in dtypes:
if dtype == 'float32':
dataframe[column] -= dataframe[column].mean()
dataframe[column] /= dataframe[column].std()
return dataframe
# Join train_x and eval_x to normalize on overall means and standard
# deviations. Then separate them again.
all_x = pd.concat([train_x, eval_x], keys=['train', 'eval'])
all_x = standardize(all_x)
train_x, eval_x = all_x.xs('train'), all_x.xs('eval')
```
Finally, examine some of your fully preprocessed training data:
```
# Verify dataset features
# Note how only the numeric fields (not categorical) have been standardized
train_x.head()
```
### Design and train the model
#### Create training and validation datasets
Create an input function to convert features and labels into a
[`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets) for training or evaluation:
```
def input_fn(features, labels, shuffle, num_epochs, batch_size):
"""Generates an input function to be used for model training.
Args:
features: numpy array of features used for training or inference
labels: numpy array of labels for each example
shuffle: boolean for whether to shuffle the data or not (set True for
training, False for evaluation)
num_epochs: number of epochs to provide the data for
batch_size: batch size for training
Returns:
A tf.data.Dataset that can provide data to the Keras model for training or
evaluation
"""
if labels is None:
inputs = features
else:
inputs = (features, labels)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
if shuffle:
dataset = dataset.shuffle(buffer_size=len(features))
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
return dataset
```
Next, create these training and evaluation datasets.Use the `NUM_EPOCHS`
and `BATCH_SIZE` hyperparameters defined previously to define how the training
dataset provides examples to the model during training. Set up the validation
dataset to provide all its examples in one batch, for a single validation step
at the end of each training epoch.
```
# Pass a numpy array by using DataFrame.values
training_dataset = input_fn(features=train_x.values,
labels=train_y,
shuffle=True,
num_epochs=NUM_EPOCHS,
batch_size=BATCH_SIZE)
num_eval_examples = eval_x.shape[0]
# Pass a numpy array by using DataFrame.values
validation_dataset = input_fn(features=eval_x.values,
labels=eval_y,
shuffle=False,
num_epochs=NUM_EPOCHS,
batch_size=num_eval_examples)
```
#### Design a Keras Model
Design your neural network using the [Keras Sequential API](https://www.tensorflow.org/guide/keras#sequential_model).
This deep neural network (DNN) has several hidden layers, and the last layer uses a sigmoid activation function to output a value between 0 and 1:
* The input layer has 100 units using the ReLU activation function.
* The hidden layer has 75 units using the ReLU activation function.
* The hidden layer has 50 units using the ReLU activation function.
* The hidden layer has 25 units using the ReLU activation function.
* The output layer has 1 units using a sigmoid activation function.
* The optimizer uses the binary cross-entropy loss function, which is appropriate for a binary classification problem like this one.
Feel free to change these layers to try to improve the model:
```
def create_keras_model(input_dim, learning_rate):
"""Creates Keras Model for Binary Classification.
Args:
input_dim: How many features the input has
learning_rate: Learning rate for training
Returns:
The compiled Keras model (still needs to be trained)
"""
Dense = tf.keras.layers.Dense
model = tf.keras.Sequential(
[
Dense(100, activation=tf.nn.relu, kernel_initializer='uniform',
input_shape=(input_dim,)),
Dense(75, activation=tf.nn.relu),
Dense(50, activation=tf.nn.relu),
Dense(25, activation=tf.nn.relu),
Dense(1, activation=tf.nn.sigmoid)
])
# Custom Optimizer:
# https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
optimizer = tf.keras.optimizers.RMSprop(
lr=learning_rate)
# Compile Keras model
model.compile(
loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
```
Next, create the Keras model object and examine its structure:
```
num_train_examples, input_dim = train_x.shape
print('Number of features: {}'.format(input_dim))
print('Number of examples: {}'.format(num_train_examples))
keras_model = create_keras_model(
input_dim=input_dim,
learning_rate=LEARNING_RATE)
# Take a detailed look inside the model
keras_model.summary()
```
#### Train and evaluate the model
Define a learning rate decay to encourage model paramaters to make smaller
changes as training goes on:
```
# Setup Learning Rate decay.
lr_decay_cb = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: LEARNING_RATE + 0.02 * (0.5 ** (1 + epoch)),
verbose=True)
# Setup TensorBoard callback.
tensorboard_cb = tf.keras.callbacks.TensorBoard(
os.path.join(JOB_DIR, 'keras_tensorboard'),
histogram_freq=1)
```
Finally, train the model. Provide the appropriate `steps_per_epoch` for the
model to train on the entire training dataset (with `BATCH_SIZE` examples per step) during each epoch. And instruct the model to calculate validation
accuracy with one big validation batch at the end of each epoch.
```
history = keras_model.fit(training_dataset,
epochs=NUM_EPOCHS,
steps_per_epoch=int(num_train_examples/BATCH_SIZE),
validation_data=validation_dataset,
validation_steps=1,
callbacks=[lr_decay_cb, tensorboard_cb],
verbose=1)
```
### Visualize training and export the trained model
#### Visualize training
Import `matplotlib` to visualize how the model learned over the training period.
```
! pip install matplotlib
from matplotlib import pyplot as plt
%matplotlib inline
```
Plot the model's loss (binary cross-entropy) and accuracy, as measured at the
end of each training epoch:
```
# Visualize History for Loss.
plt.title('Keras model loss')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='upper right')
plt.show()
# Visualize History for Accuracy.
plt.title('Keras model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['training', 'validation'], loc='lower right')
plt.show()
```
Over time, loss decreases and accuracy increases. But do they converge to a
stable level? Are there big differences between the training and validation
metrics (a sign of overfitting)?
Learn about [how to improve your machine learning
model](https://developers.google.com/machine-learning/crash-course/). Then, feel
free to adjust hyperparameters or the model architecture and train again.
#### Export the model for serving
AI Platform requires when you [create a model version
resource](https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models#create_a_model_version).
Since not all optimizers can be exported to the SavedModel format, you may see
warnings during the export process. As long you successfully export a serving
graph, AI Platform can used the SavedModel to serve predictions.
```
# Export the model to a local SavedModel directory
export_path = tf.keras.experimental.export_saved_model(keras_model, 'keras_export')
print("Model exported to: ", export_path)
```
You may export a SavedModel directory to your local filesystem or to Cloud
Storage, as long as you have the necessary permissions. In your current
environment, you granted access to Cloud Storage by authenticating your GCP account and setting the `GOOGLE_APPLICATION_CREDENTIALS` environment variable.
AI Platform training jobs can also export directly to Cloud Storage, because
AI Platform service accounts [have access to Cloud Storage buckets in their own
project](https://cloud.google.com/ml-engine/docs/tensorflow/working-with-cloud-storage).
Try exporting directly to Cloud Storage:
```
# Export the model to a SavedModel directory in Cloud Storage
export_path = tf.keras.experimental.export_saved_model(keras_model, JOB_DIR + '/keras_export')
print("Model exported to: ", export_path)
```
You can now deploy this model to AI Platform and serve predictions by
following the steps from Part 2.
## Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Alternatively, you can clean up individual resources by running the following
commands:
```
# Delete model version resource
! gcloud ai-platform versions delete $MODEL_VERSION --region $REGION --quiet --model $MODEL_NAME
# Delete model resource
! gcloud ai-platform models delete $MODEL_NAME --region $REGION --quiet
# Delete Cloud Storage objects that were created
! gsutil -m rm -r $JOB_DIR
# If the training job is still running, cancel it
! gcloud ai-platform jobs cancel $JOB_NAME --quiet --verbosity critical
```
If your Cloud Storage bucket doesn't contain any other objects and you would like to delete it, run `gsutil rm -r gs://$BUCKET_NAME`.
## What's next?
* View the [complete training
code](https://github.com/GoogleCloudPlatform/cloudml-samples/tree/master/census/tf-keras) used in this guide, which structures the code to accept custom
hyperparameters as command-line flags.
* Read about [packaging
code](https://cloud.google.com/ml-engine/docs/tensorflow/packaging-trainer) for an AI Platform training job.
* Read about [deploying a
model](https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models) to serve predictions.
| github_jupyter |
```
con <- url("http://www2.math.su.se/~esbj/GLMbook/moppe.sas")
data <- readLines(con, n = 200L, warn = FALSE, encoding = "unknown")
close(con)
data.start <- grep("^cards;", data) + 1L
data.end <- grep("^;", data[data.start:999L]) + data.start - 2L
table.1.2 <- read.table(text = data[data.start:data.end],
header = FALSE,
sep = "",
quote = "",
col.names = c("premiekl", "moptva", "zon", "dur",
"medskad", "antskad", "riskpre", "helpre", "cell"),
na.strings = NULL,
colClasses = c(rep("factor", 3), "numeric",
rep("integer", 4), "NULL"),
comment.char = "")
rm(con, data, data.start, data.end)
comment(table.1.2) <-
c("Title: Partial casco moped insurance from Wasa insurance, 1994--1999",
"Source: http://www2.math.su.se/~esbj/GLMbook/moppe.sas",
"Copyright: http://www2.math.su.se/~esbj/GLMbook/")
table.1.2$skadfre = with(table.1.2, antskad / dur)
comment(table.1.2$premiekl) <-
c("Name: Class",
"Code: 1=Weight over 60kg and more than 2 gears",
"Code: 2=Other")
comment(table.1.2$moptva) <-
c("Name: Age",
"Code: 1=At most 1 year",
"Code: 2=2 years or more")
comment(table.1.2$zon) <-
c("Name: Zone",
"Code: 1=Central and semi-central parts of Sweden's three largest cities",
"Code: 2=suburbs and middle-sized towns",
"Code: 3=Lesser towns, except those in 5 or 7",
"Code: 4=Small towns and countryside, except 5--7",
"Code: 5=Northern towns",
"Code: 6=Northern countryside",
"Code: 7=Gotland (Sweden's largest island)")
comment(table.1.2$dur) <-
c("Name: Duration",
"Unit: year")
comment(table.1.2$medskad) <-
c("Name: Claim severity",
"Unit: SEK")
comment(table.1.2$antskad) <- "Name: No. claims"
comment(table.1.2$riskpre) <-
c("Name: Pure premium",
"Unit: SEK")
comment(table.1.2$helpre) <-
c("Name: Actual premium",
"Note: The premium for one year according to the tariff in force 1999",
"Unit: SEK")
comment(table.1.2$skadfre) <-
c("Name: Claim frequency",
"Unit: /year")
save(table.1.2, file = "table.1.2.RData")
print(table.1.2)
install.packages(c("data.table", "foreach", "ggplot2"), dependencies = TRUE, repos = "http://cran.us.r-project.org")
if (!exists("table.1.2"))
load("table.1.2.RData")
library("foreach")
table27 <-
data.frame(rating.factor =
c(rep("Vehicle class", nlevels(table.1.2$premiekl)),
rep("Vehicle age", nlevels(table.1.2$moptva)),
rep("Zone", nlevels(table.1.2$zon))),
class =
c(levels(table.1.2$premiekl),
levels(table.1.2$moptva),
levels(table.1.2$zon)),
stringsAsFactors = FALSE)
new.cols <-
foreach (rating.factor = c("premiekl", "moptva", "zon"),
.combine = rbind) %do%
{
nclaims <- tapply(table.1.2$antskad, table.1.2[[rating.factor]], sum)
sums <- tapply(table.1.2$dur, table.1.2[[rating.factor]], sum)
n.levels <- nlevels(table.1.2[[rating.factor]])
contrasts(table.1.2[[rating.factor]]) <-
contr.treatment(n.levels)[rank(-sums, ties.method = "first"), ]
data.frame(duration = sums, n.claims = nclaims)
}
table27 <- cbind(table27, new.cols)
rm(new.cols)
model.frequency <-
glm(antskad ~ premiekl + moptva + zon + offset(log(dur)),
data = table.1.2, family = poisson)
rels <- coef( model.frequency )
rels <- exp( rels[1] + rels[-1] ) / exp( rels[1] )
table27$rels.frequency <-
c(c(1, rels[1])[rank(-table27$duration[1:2], ties.method = "first")],
c(1, rels[2])[rank(-table27$duration[3:4], ties.method = "first")],
c(1, rels[3:8])[rank(-table27$duration[5:11], ties.method = "first")])
model.severity <-
glm(medskad ~ premiekl + moptva + zon,
data = table.1.2[table.1.2$medskad > 0, ],
family = Gamma("log"), weights = antskad)
rels <- coef( model.severity )
rels <- exp( rels[1] + rels[-1] ) / exp( rels[1] )
table27$rels.severity <-
c(c(1, rels[1])[rank(-table27$duration[1:2], ties.method = "first")],
c(1, rels[2])[rank(-table27$duration[3:4], ties.method = "first")],
c(1, rels[3:8])[rank(-table27$duration[5:11], ties.method = "first")])
table27$rels.pure.premium <- with(table27, rels.frequency * rels.severity)
print(table27, digits = 2)
```
| github_jupyter |
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# Remotive - Post daily jobs on slack
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Remotive/Remotive_Post_daily_jobs_on_slack.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
**Tags:** #remotive #jobs #slack #gsheet #naas_drivers #automation #opendata #text
**Author:** [Sanjeet Attili](https://www.linkedin.com/in/sanjeet-attili-760bab190/)
## Input
### Import libraries
```
import pandas as pd
from bs4 import BeautifulSoup
import requests
from datetime import datetime
import time
from naas_drivers import gsheet, slack
import naas
```
### Setup slack channel configuration
```
SLACK_TOKEN = "xoxb-1481042297777-3085654341191-xxxxxxxxxxxxxxxxxxxxxxxxx"
SLACK_CHANNEL = "05_work"
```
### Setup sheet log data
```
spreadsheet_id = "1EBefhkbmqaXMZLRCiafabf6xxxxxxxxxxxxxxxxxxx"
sheet_name = "SLACK_CHANNEL_POSTS"
```
### Setup Remotive
#### Get categories from Remotive
```
def get_remotejob_categories():
req_url = f"https://remotive.io/api/remote-jobs/categories"
res = requests.get(req_url)
try:
res.raise_for_status()
except requests.HTTPError as e:
return e
res_json = res.json()
# Get categories
jobs = res_json.get('jobs')
return pd.DataFrame(jobs)
df_categories = get_remotejob_categories()
df_categories
```
#### Enter your parameters
```
categories = ['data'] # Pick the list of categories in columns "slug"
date_from = - 10 # Choose date difference in days from now => must be negative
```
### Set the Scheduler
```
naas.scheduler.add(recurrence="0 9 * * *")
# # naas.scheduler.delete() # Uncomment this line to delete your scheduler if needed
```
## Model
### Get the sheet log of jobs
```
df_jobs_log = gsheet.connect(spreadsheet_id).get(sheet_name=sheet_name)
df_jobs_log
```
### Get all jobs posted after timestamp_date
All jobs posted after the date from will be fetched.<br>
In summary, we can set the value, in seconds, of 'search_data_from' to fetch all jobs posted since this duration
```
REMOTIVE_DATETIME = "%Y-%m-%dT%H:%M:%S"
NAAS_DATETIME = "%Y-%m-%d %H:%M:%S"
def get_remotive_jobs_since(jobs, date):
ret = []
for job in jobs:
publication_date = datetime.strptime(job['publication_date'], REMOTIVE_DATETIME).timestamp()
if publication_date > date:
ret.append({
'URL': job['url'],
'TITLE': job['title'],
'COMPANY': job['company_name'],
'PUBLICATION_DATE': datetime.fromtimestamp(publication_date).strftime(NAAS_DATETIME)
})
return ret
def get_category_jobs_since(category, date, limit):
url = f"https://remotive.io/api/remote-jobs?category={category}&limit={limit}"
res = requests.get(url)
if res.json()['jobs']:
publication_date = datetime.strptime(res.json()['jobs'][-1]['publication_date'], REMOTIVE_DATETIME).timestamp()
if len(res.json()['jobs']) < limit or date > publication_date:
print(f"Jobs from catgory {category} fetched ✅")
return get_remotive_jobs_since(res.json()['jobs'], date)
else:
return get_category_jobs_since(category, date, limit + 5)
return []
def get_jobs_since(categories: list,
date_from: int):
if date_from >= 0:
return("'date_from' must be negative. Please update your parameter.")
# Transform datefrom int to
search_jobs_from = date_from * 24 * 60 * 60 # days in seconds
timestamp_date = time.time() + search_jobs_from
jobs = []
for category in categories:
jobs += get_category_jobs_since(category, timestamp_date, 5)
print(f'- All job since {datetime.fromtimestamp(timestamp_date)} have been fetched -')
return pd.DataFrame(jobs)
df_jobs = get_jobs_since(categories, date_from=date_from)
df_jobs
```
### Remove duplicate jobs
```
def remove_duplicates(df1, df2):
# Get jobs log
jobs_log = df1.URL.unique()
# Exclude jobs already log from jobs
df2 = df2[~df2.URL.isin(jobs_log)]
return df2.sort_values(by="PUBLICATION_DATE")
df_new_jobs = remove_duplicates(df_jobs_log, df_jobs)
df_new_jobs
```
## Output
### Add new jobs on the sheet log
```
gsheet.connect(spreadsheet_id).send(sheet_name=sheet_name,
data=df_new_jobs,
append=True)
```
### Send all jobs link to the slack channel
```
if len(df_new_jobs) > 0:
for _, row in df_new_jobs.iterrows():
url = row.URL
slack.connect(SLACK_TOKEN).send(SLACK_CHANNEL, f"<{url}>")
else:
print("Nothing to published in Slack !")
```
| github_jupyter |
AMUSE tutorial on multiple code in a single bridge
====================
A cascade of bridged codes to address the problem of running multiple planetary systems in, for example, a star cluster. This is just an example of how to initialize such a cascaded bridge without any stellar evolution, background potentials. The forces for one planetary system on the planets in the other systems are ignored to save computer time. This gives rise to some energy errors, and inconsistencies (for example when one star tries to capture planets from another system. The latter will not happen here.
This can be addressed by intorducing some logic in checking what stars are nearby which planets.
```
import numpy
from amuse.units import (units, constants)
from amuse.lab import Particles
from amuse.units import nbody_system
from matplotlib import pyplot
## source https://en.wikipedia.org/wiki/TRAPPIST-1
trappist= {"b": {"m": 1.374 | units.MEarth,
"a": 0.01154 | units.au,
"e": 0.00622,
"i": 89.56},
"c": {"m": 1.308 | units.MEarth,
"a": 0.01580 | units.au,
"e": 0.00654,
"i": 89.70},
"d": {"m": 0.388 | units.MEarth,
"a": 0.02227 | units.au,
"e": 0.00837,
"i": 89.89},
"e": {"m": 0.692 | units.MEarth,
"a": 0.02925 | units.au,
"e": 0.00510,
"i": 89.736},
"f": {"m": 1.039 | units.MEarth,
"a": 0.03849 | units.au,
"e": 0.01007,
"i": 89.719},
"g": {"m": 1.321 | units.MEarth,
"a": 0.04683 | units.au,
"e": 0.00208,
"i": 89.721},
"h": {"m": 0.326 | units.MEarth,
"a": 0.06189 | units.au,
"e": 0.00567,
"i": 89.796}
}
def trappist_system():
from amuse.ext.orbital_elements import new_binary_from_orbital_elements
from numpy.random import uniform
star = Particles(1)
setattr(star, "name", "")
setattr(star, "type", "")
star[0].mass = 0.898 | units.MSun
star[0].position = (0,0,0) | units.au
star[0].velocity = (0,0,0) | units.kms
star[0].name = "trappist"
star[0].type = "star"
bodies = Particles(len(trappist))
setattr(bodies, "name", "")
setattr(bodies, "type", "")
for bi, planet in zip(bodies, trappist):
true_anomaly = uniform(0, 360)
b = new_binary_from_orbital_elements(star.mass,
trappist[planet]['m'],
trappist[planet]["a"],
trappist[planet]["e"],
true_anomaly = true_anomaly,
inclination = trappist[planet]["i"],
G = constants.G)
bi.name = planet
bi.type = "planet"
bi.mass = b[1].mass
bi.position = b[1].position - b[0].position
bi.velocity = b[1].velocity - b[0].velocity
return star | bodies
from amuse.community.ph4.interface import ph4
from amuse.community.hermite.interface import Hermite
from amuse.ic.plummer import new_plummer_model
import numpy.random
numpy.random.seed(1624973942)
converter=nbody_system.nbody_to_si(1 | units.MSun, 0.1|units.parsec)
t1 = trappist_system()
t2 = trappist_system()
t3 = trappist_system()
p = new_plummer_model(3, convert_nbody=converter)
t1.position += p[0].position
t1.velocity += p[0].velocity
t2.position += p[1].position
t2.velocity += p[1].velocity
t3.position += p[2].position
t3.velocity += p[2].velocity
converter=nbody_system.nbody_to_si(t1.mass.sum(), 0.1|units.au)
bodies = Particles(0)
gravity1 = ph4(converter)
t = gravity1.particles.add_particles(t1)
bodies.add_particles(t1)
gravity2 = ph4(converter)
t = gravity2.particles.add_particles(t2)
bodies.add_particles(t2)
gravity3 = Hermite(converter)
t = gravity3.particles.add_particles(t3)
bodies.add_particles(t3)
channel_from_g1 = gravity1.particles.new_channel_to(bodies)
channel_from_g2 = gravity2.particles.new_channel_to(bodies)
channel_from_g3 = gravity3.particles.new_channel_to(bodies)
from amuse.plot import scatter
from matplotlib import pyplot
scatter(bodies.x-bodies[0].x, bodies.z-bodies[0].z)
#pyplot.xlim(-0.1, 0.1)
#pyplot.ylim(-0.1, 0.1)
pyplot.show()
def plot(bodies):
from amuse.plot import scatter
from matplotlib import pyplot
stars = bodies[bodies.type=='star']
planets = bodies-stars
pyplot.scatter((stars.x-bodies[0].x).value_in(units.au),
(stars.z-bodies[0].z).value_in(units.au), c='r', s=100)
pyplot.scatter((planets.x-bodies[0].x).value_in(units.au),
(planets.z-bodies[0].z).value_in(units.au), c='b', s=10)
pyplot.xlim(-0.1, 0.1)
pyplot.ylim(-0.1, 0.1)
pyplot.show()
plot(bodies)
from amuse.couple import bridge
gravity = bridge.Bridge()
gravity.add_system(gravity1, (gravity2,gravity3))
gravity.add_system(gravity2, (gravity1,gravity3))
gravity.add_system(gravity3, (gravity1,gravity2))
from amuse.lab import zero
Etot_init = gravity.kinetic_energy + gravity.potential_energy
Etot_prev = Etot_init
gravity.timestep = 100.0| units.yr
time = zero
dt = 200.0|units.yr
t_end = 1000.0| units.yr
while time < t_end:
time += dt
gravity.evolve_model(time)
Etot_prev_se = gravity.kinetic_energy + gravity.potential_energy
channel_from_g1.copy()
channel_from_g2.copy()
channel_from_g3.copy()
plot(bodies)
print(bodies[1].position.in_(units.au))
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
print("T=", time.in_(units.yr), end=' ')
print("E= ", Etot/Etot_init, "Q= ", Ekin/Epot, end=' ')
print("dE=", (Etot_init-Etot)/Etot, "ddE=", (Etot_prev-Etot)/Etot)
Etot_prev = Etot
gravity.stop()
```
| github_jupyter |
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex SDK: AutoML training tabular binary classification model for online prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/sdk_automl_tabular_binary_classification_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/sdk_automl_tabular_binary_classification_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex SDK to create tabular binary classification models and do online prediction using Google Cloud's [AutoML](https://cloud.google.com/vertex-ai/docs/start/automl-users).
### Dataset
The dataset used for this tutorial is the [Bank Marketing](gs://cloud-ml-tables-data/bank-marketing.csv). This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.
### Objective
In this tutorial, you create an AutoML tabular binary classification model and deploy for online prediction from a Python script using the Vertex SDK. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.
The steps performed include:
- Create a Vertex `Dataset` resource.
- Train the model.
- View the model evaluation.
- Deploy the `Model` resource to a serving `Endpoint` resource.
- Make a prediction.
- Undeploy the `Model`.
### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Installation
Install the latest version of Vertex SDK.
```
import sys
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = '--user'
else:
USER_FLAG = ''
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
```
### Restart the kernel
Once you've installed the Vertex SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
```
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
```
PROJECT_ID = "[your-project-id]" #@param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations)
```
REGION = 'us-central1' #@param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" #@param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
```
import google.cloud.aiplatform as aip
```
## Initialize Vertex SDK
Initialize the Vertex SDK for your project and corresponding bucket.
```
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
```
# Tutorial
Now you are ready to start creating your own AutoML tabular binary classification model.
## Create a Dataset Resource
First, you create an tabular Dataset resource for the Bank Marketing dataset.
### Data preparation
The Vertex `Dataset` resource for tabular has a couple of requirements for your tabular data.
- Must be in a CSV file or a BigQuery query.
#### CSV
For tabular binary classification, the CSV file has a few requirements:
- The first row must be the heading -- note how this is different from Vision, Video and Language where the requirement is no heading.
- All but one column are features.
- One column is the label, which you will specify when you subsequently create the training pipeline.
#### Location of Cloud Storage training data.
Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
```
IMPORT_FILE = 'gs://cloud-ml-tables-data/bank-marketing.csv'
```
#### Quick peek at your data
You will use a version of the Bank Marketing dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.
You also need for training to know the heading name of the label column, which is save as `label_column`. For this dataset, it is the last column in the CSV file.
```
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(',')[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
```
### Create the Dataset
Next, create the `Dataset` resource using the `create()` method for the `TabularDataset` class, which takes the following parameters:
- `display_name`: The human readable name for the `Dataset` resource.
- `gcs_source`: A list of one or more dataset index file to import the data items into the `Dataset` resource.
This operation may take several minutes.
```
dataset = aip.TabularDataset.create(
display_name="Bank Marketing" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE]
)
print(dataset.resource_name)
```
## Train the model
Now train an AutoML tabular binary classification model using your Vertex `Dataset` resource. To train the model, do the following steps:
1. Create an Vertex training pipeline for the `Dataset` resource.
2. Execute the pipeline to start the training.
### Create and run training pipeline
To train an AutoML tabular binary classification model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
#### Create training pipeline
An AutoML training pipeline is created with the `AutoMLTabularTrainingJob` class, with the following parameters:
- `display_name`: The human readable name for the `TrainingJob` resource.
- `optimization_prediction_type`: The type task to train the model for.
- `classification`: A tabuar classification model.
- `regression`: A tabular regression model.
- `forecasting`: A tabular forecasting model.
- `column_transformations`: (Optional): Transformations to apply to the input columns
- `optimization_objective`: The optimization objective to minimize or maximize.
- `minimize-log-loss`
```
dag = aip.AutoMLTabularTrainingJob(
display_name="bank_" + TIMESTAMP,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss"
)
```
#### Run the training pipeline
Next, you run the DAG to start the training job by invoking the method `run()`, with the following parameters:
- `dataset`: The `Dataset` resource to train the model.
- `model_display_name`: The human readable name for the trained model.
- `target_column`: The name of the column to train as the label.
- `training_fraction_split`: The percentage of the dataset to use for training.
- `validation_fraction_split`: The percentage of the dataset to use for validation.
- `test_fraction_split`: The percentage of the dataset to use for test (holdout data).
- `budget_milli_node_hours`: (optional) Maximum training time specified in unit of millihours (1000 = hour).
- `disable_early_stopping`: If `True`, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.
The `run` method when completed returns the `Model` resource.
The execution of the training pipeline will take upto 20 minutes.
```
model = dag.run(
dataset=dataset,
target_column=label_column,
model_display_name="bank_" + TIMESTAMP,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=1000,
disable_early_stopping=False
)
```
## Deploy the model
Next, deploy your `Model` resource to an `Endpoint` resource for online prediction. To deploy the `Model` resource, you invoke the `deploy()` method. This call will create an `Endpoint` resource automatically.
The method returns the created `Endpoint` resource.
The `deploy()` method takes the following arguments:
- `machine_type`: The type of compute machine.
```
endpoint = model.deploy(machine_type="n1-standard-4")
```
## Make a online prediction request
Now do a online prediction to your deployed model.
### Make test item
You will use synthetic data as a test data item. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
```
INSTANCE = {"Age": '58', "Job": "managment", "MaritalStatus": "married", "Education": "teritary", "Default": "no",
"Balance": '2143', "Housing": "yes", "Loan": "no", "Contact": "unknown", "Day": '5', "Month": "may",
"Duration": '261', "Campaign": '1', "PDays": '-1', "Previous": "0", "POutcome": "unknown"}
```
### Make the prediction
Now that your `Model` resource is deployed to an `Endpoint` resource, one can do online predictions by sending prediction requests to the `Endpoint` resource.
#### Request
The format of each instance is:
{[feature_list] }
Since the `predict()` method can take multiple items (instances), send your single test item as a list of one test item.
#### Response
The response from the `predict()` call is a Python dictionary with the following entries:
- `ids`: The internal assigned unique identifiers for each prediction request.
- TODO
- `deployed_model_id`: The Vertex identifier for the deployed `Model` resource which did the predictions.
```
instances_list = [INSTANCE]
prediction = endpoint.predict(instances_list)
print(prediction)
```
## Undeploy the model
When you are done doing predictions, you undeploy the `Model` resource from the `Endpoint` resouce. This deprovisions all compute resources and ends billing for the deployed model.
```
endpoint.undeploy_all()
```
# Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex dataset object
try:
if delete_dataset and 'dataset' in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if delete_model and 'model' in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if delete_endpoint and 'model' in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if delete_batchjob and 'model' in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
if delete_bucket and 'BUCKET_NAME' in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
# Data Visualization with Python and Jupyter
In this module of the course, we will use some of the libraries available with Python and Jupyter to examine our data set. In order to better understand the data, we can use visualizations such as charts, plots, and graphs. We'll use some commont tools such as [`matplotlib`](https://matplotlib.org/users/index.html) and [`seaborn`](https://seaborn.pydata.org/index.html) and gather some statistical insights into our data.
We'll continue to use the [`insurance.csv`](https://www.kaggle.com/noordeen/insurance-premium-prediction/download) file from you project assets, so if you have not already [`downloaded this file`](https://www.kaggle.com/noordeen/insurance-premium-prediction/download) to your local machine, and uploaded it to your project, do that now.
## Table of Contents
1. [Using the Jupyter notebook](#jupyter)<br>
2. [Load the data](#data)<br>
3. [Visualize Data](#visualize)<br>
4. [Understand Data](#understand)<br>
<a id="jupyter"></a>
## 1. Using the Jupyter notebook
### Jupyter cells
When you are editing a cell in Jupyter notebook, you need to re-run the cell by pressing **`<Shift> + <Enter>`**. This will allow changes you made to be available to other cells.
Use **`<Enter>`** to make new lines inside a cell you are editing.
#### Code cells
Re-running will execute any statements you have written. To edit an existing code cell, click on it.
#### Markdown cells
Re-running will render the markdown text. To edit an existing markdown cell, double-click on it.
<hr>
### Common Jupyter operations
Near the top of the Jupyter notebook page, Jupyter provides a row of menu options (`File`, `Edit`, `View`, `Insert`, ...) and a row of tool bar icons (disk, plus sign, scissors, 2 files, clipboard and file, up arrow, ...).
#### Inserting and removing cells
- Use the "plus sign" icon to insert a cell below the currently selected cell
- Use "Insert" -> "Insert Cell Above" from the menu to insert above
#### Clear the output of all cells
- Use "Kernel" -> "Restart" from the menu to restart the kernel
- click on "clear all outputs & restart" to have all the output cleared
#### Save your notebook file locally
- Clear the output of all cells
- Use "File" -> "Download as" -> "IPython Notebook (.ipynb)" to download a notebook file representing your session
<hr>
<a id="data"></a>
## 2.0 Load the data
A lot of data is **structured data**, which is data that is organized and formatted so it is easily readable, for example a table with variables as columns and records as rows, or key-value pairs in a noSQL database. As long as the data is formatted consistently and has multiple records with numbers, text and dates, you can probably read the data with [Pandas](https://pandas.pydata.org/pandas-docs/stable/index.html), an open-source Python package providing high-performance data manipulation and analysis.
### 2.1 Load our data as a pandas data frame
**<font color='red'><< FOLLOW THE INSTRUCTIONS BELOW TO LOAD THE DATASET >></font>**
* Highlight the cell below by clicking it.
* Click the `10/01` "Find data" icon in the upper right of the notebook.
* Add the locally uploaded file `insurance.csv` by choosing the `Files` tab. Then choose the `insurance.csv`. Click `Insert to code` and choose `Insert Pandas DataFrame`.
* The code to bring the data into the notebook environment and create a Pandas DataFrame will be added to the cell below.
* Run the cell
```
# Place cursor below and insert the Pandas DataFrame for the Insurance Expense data
```
### 2.2 Update the variable for our Pandas dataframe
We'll use the Pandas naming convention df for our DataFrame. Make sure that the cell below uses the name for the dataframe used above. For the locally uploaded file it should look like df_data_1 or df_data_2 or df_data_x.
**<font color='red'><< UPDATE THE VARIABLE ASSIGNMENT TO THE VARIABLE GENERATED ABOVE. >></font>**
```
# Replace data_df_1 with the variable name generated above.
df = df_data_1
```
<a id="visualize"></a>
## 3.0 Visualize Data
Pandas uses [`Matplotlib`](https://matplotlib.org/users/index.html) as the default for visualisations.
In addition, we'll use [`Numpy`](https://numpy.org), which is "The fundamental package for scientific computing with Python".
The conventions when using Jupyter notebooks is to import numpy as `np` and to import matplotlib.pyplot as `plt`. You can call these variables whatever you want, but you will often see them done this way.
Import the packages and also add the magic line starting with `%` to output the charts within the notebook. This is what is known as a [`magic command`](https://ipython.readthedocs.io/en/stable/interactive/magics.html).
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
%matplotlib inline
```
### 3.1 Seaborn
Seaborn is a Python data visualization library based on matplotlib. It is an easy to use visualisation package that works well with Pandas DataFrames.
Below are a few examples using Seaborn.
Refer to this [documentation](https://seaborn.pydata.org/index.html) for information on lots of plots you can create.
```
import seaborn as sns
```
### 3.2 Statistical description
We can use the Pandas method [`describe()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.describe.html) to get some statistics that will later be seen in our visualizations. This will include numeric data, but exclude the categorical fields.
```
df.describe()
```
### Question 1: Is there relationship between BMI and insurance expenses?
We'll explore the data by asking a series of questions (hypothesis). The use of plots can help us to find relationships and correlations.
[`Body Mass Index`](https://www.nhlbi.nih.gov/health/educational/lose_wt/BMI/bmicalc.htm) (BMI) is a measure of body fat based on height and weight that applies to adult men and women. It is often correlated with health outcomes, so let's use a [`Seaborn jointplot`](http://seaborn.pydata.org/generated/seaborn.jointplot.html) with a scatterplot to see if that holds for our data.
```
sns.jointplot(x=df["expenses"], y=df["bmi"], kind="scatter")
plt.show()
```
#### Answer:
It doesn't not appear that there is a good correlation between BMI and the expenses for these patients. We see from the histogram on the right that BMI is normally distributed, and from the histogram on top we see that Expenses are clustered around the lower amounts. It does not look like BMI would be a good predictor of the expenses.
### Question 2: Is there relationship between gender and insurance expenses?
Our next hypothesis might be that there is a correlation between gender and expenses. We can use the [`Seaborn boxplot`](https://seaborn.pydata.org/generated/seaborn.boxplot.html). A boxplot uses quartiles to show how the data is distributed, and will give us a good comparison between the 2 categories represented by `gender`. The horizontal line through our boxes is the median value. The area above the median line is the 3rd quartile, representing the values of the 50th-75th percentiles, and the area below the median line is the 2nd quartile, representing the values of the 25th-50th percentiles. The rest of the data is collapse into lines called "whiskers" and outliers are plotted as single points.
```
plt.figure(figsize = (5, 5))
sns.boxplot(x = 'sex', y = 'expenses', data = df)
```
#### Answer:
On average claims from male and female are the same, and both have approximately the same median (the value in the middle of the distribution. The 3rd quartile is "fatter" for the males, meaning there is a broader distribution of values, and it skews to a higher amount. The 4th quartile also skews higher for the males, so this category contains more of the higher expenses.
### Question 3: Is there relationship between region and claim amount?
Perhaps there is a correlation between the various regions and the insurance expenses. We can once again use a series of boxplots to see the differences betweent the regions.
```
plt.figure(figsize = (10, 5))
sns.boxplot(x = 'region', y = 'expenses', data = df)
```
#### Answer:
In this case we see that the median values across regions are nearly the same. There is some variation for the distribution of expense values, and the southeast reagion has more of the higher values in the 3rd and 4th quartile. The differences aren't particularly large, however, and it is unlikely that region could be a good predictor of expenses.
### Question: Is there relationships between claim amount between smokers and non-smokers?
Given the overwhelming evidence that smoking causes mortality (death) and morbidity (disease), we might guess that there is a relationship betweem insurance claims and smoking.
Let's use a boxplot to examine this.
```
plt.figure(figsize = (5, 5))
sns.boxplot(x = 'smoker', y = 'expenses', data = df)
```
#### Answer:
We can see that the mean, and indeed the entire interquartile range from 25% to 75% is much higher in expense for the smokers than for the non-smokers. It looks like whether or not an individual is a smoker could be a good predictor of insurance expenses.
### Question: is the smoker group well represented?
We'll want to make sure that we have a pretty good sample size for both groups.
```
# make the plot a little bigger
countplt, ax = plt.subplots(figsize = (10,7))
ax = sns.countplot(x='smoker', data=df)
```
#### Answer:
Yes, it looks like smokers are a large enough group to be statistically significant.
### Question: Is there relationship between claim amount and age?
It seems reasonable to assume that there might be different insurance costs for different age groups. For example, older adults tend to require more health care.
Since this is continuous data, let's use a scatter plot to investigate.
```
sns.jointplot(x=df['expenses'], y=df['age'], kind='scatter')
plt.show()
```
#### Answer:
Yes, it does look like Claim amounts increase with age. Furthermore, there are interesting bands around the expenses for `$1,200`, up to `$3,000`, and above `$3,000`.
<a id="understand"></a>
## 4.0 Understand data
Now that we have had a look at the data, let's bring some of this information together.
In order to look at the relationship between multiple variables, we can use the [`Seaborn pairplot()`](https://seaborn.pydata.org/generated/seaborn.pairplot.html) method. This will plot each of the variables of the data set on both the x and y axes, in every possible combination. From this we can quickly see patterns that indicate the relationship between the variables.
We'll use the `hue` to color one of the features in the plot to compare it to the other 2 variables.
### 4.1 Impact of Smoking
See which variable correlate with smoking. `Red` indicates a smoker.
```
claim_pplot=df[['age', 'bmi', 'children', 'smoker', 'expenses']]
claim_pplot.head()
sns.pairplot(claim_pplot, kind="scatter", hue = "smoker" , markers=["o", "s"], palette="Set1")
plt.show()
```
#### Analyis
We can see some interesting things from these plots. Whereas older people tend to have more expenses, we can see from `age` vs. `expenses` that smoking is a more dominant feature. The same holds for `BMI` vs `expenses`.
### 4.2 Impact of Gender
What is the correlation between the features and gender. `Red` is female, `Blue` is male.
```
claim_pplot=df[['age', 'bmi', 'children', 'sex', 'expenses']]
claim_pplot.head()
sns.pairplot(claim_pplot, kind="scatter", hue = "sex" , markers=["o", "s"], palette="Set1")
plt.show()
```
#### Analysis:
Gender has very little impact of the expenses.
#### REGION IMPACT
```
claim_pplot=df[['age', 'bmi', 'children', 'region', 'expenses']]
claim_pplot.head()
sns.pairplot(claim_pplot, kind="scatter", hue = "region" , markers=["o", "s","x","+"], palette="Set1")
plt.show()
```
#### Analysis:
Region does have some imact on the expenses, which can be seen in the `age` vs. `expenses` chart where the `northeast` region appears in the lowest band more commonly, followed by the `northwest` region, and the `southeast` region is clearly higher and more prevelant in the highest band.
### Show correlations
We can quantify the correlations between features of the data set using [`Pandas corr()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html) method. This will output a table with a numberical value for the correlation coefficient.
```
df[['age', 'sex','bmi', 'children', 'smoker', 'region', 'expenses']].corr(method='pearson')
```
#### Analysis:
We can see from the numerical correlation coefficient that there is little relationship amongst the numerical features.
## Summary:
From our visual analysis of the data, we see that the best predictor of insurance claim expenses is whether or not the individual is a smoker.
| github_jupyter |
# Sampler statistics
When checking for convergence or when debugging a badly behaving
sampler, it is often helpful to take a closer look at what the
sampler is doing. For this purpose some samplers export
statistics for each generated sample.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pymc3 as pm
%matplotlib inline
```
As a minimal example we sample from a standard normal distribution:
```
model = pm.Model()
with model:
mu1 = pm.Normal("mu1", mu=0, sigma=1, shape=10)
with model:
step = pm.NUTS()
trace = pm.sample(2000, tune=1000, init=None, step=step, cores=2)
```
NUTS provides the following statistics:
```
trace.stat_names
```
- `mean_tree_accept`: The mean acceptance probability for the tree that generated this sample. The mean of these values across all samples but the burn-in should be approximately `target_accept` (the default for this is 0.8).
- `diverging`: Whether the trajectory for this sample diverged. If there are many diverging samples, this usually indicates that a region of the posterior has high curvature. Reparametrization can often help, but you can also try to increase `target_accept` to something like 0.9 or 0.95.
- `energy`: The energy at the point in phase-space where the sample was accepted. This can be used to identify posteriors with problematically long tails. See below for an example.
- `energy_error`: The difference in energy between the start and the end of the trajectory. For a perfect integrator this would always be zero.
- `max_energy_error`: The maximum difference in energy along the whole trajectory.
- `depth`: The depth of the tree that was used to generate this sample
- `tree_size`: The number of leafs of the sampling tree, when the sample was accepted. This is usually a bit less than $2 ^ \text{depth}$. If the tree size is large, the sampler is using a lot of leapfrog steps to find the next sample. This can for example happen if there are strong correlations in the posterior, if the posterior has long tails, if there are regions of high curvature ("funnels"), or if the variance estimates in the mass matrix are inaccurate. Reparametrisation of the model or estimating the posterior variances from past samples might help.
- `tune`: This is `True`, if step size adaptation was turned on when this sample was generated.
- `step_size`: The step size used for this sample.
- `step_size_bar`: The current best known step-size. After the tuning samples, the step size is set to this value. This should converge during tuning.
- `model_logp`: The model log-likelihood for this sample.
If the name of the statistic does not clash with the name of one of the variables, we can use indexing to get the values. The values for the chains will be concatenated.
We can see that the step sizes converged after the 1000 tuning samples for both chains to about the same value. The first 2000 values are from chain 1, the second 2000 from chain 2.
```
plt.plot(trace['step_size_bar'])
```
The `get_sampler_stats` method provides more control over which values should be returned, and it also works if the name of the statistic is the same as the name of one of the variables. We can use the `chains` option, to control values from which chain should be returned, or we can set `combine=False` to get the values for the individual chains:
```
sizes1, sizes2 = trace.get_sampler_stats('depth', combine=False)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)
ax1.plot(sizes1)
ax2.plot(sizes2)
accept = trace.get_sampler_stats('mean_tree_accept', burn=1000)
sb.distplot(accept, kde=False)
accept.mean()
```
Find the index of all diverging transitions:
```
trace['diverging'].nonzero()
```
It is often useful to compare the overall distribution of the
energy levels with the change of energy between successive samples.
Ideally, they should be very similar:
```
energy = trace['energy']
energy_diff = np.diff(energy)
sb.distplot(energy - energy.mean(), label='energy')
sb.distplot(energy_diff, label='energy diff')
plt.legend()
```
If the overall distribution of energy levels has longer tails, the efficiency of the sampler will deteriorate quickly.
## Multiple samplers
If multiple samplers are used for the same model (e.g. for continuous and discrete variables), the exported values are merged or stacked along a new axis.
Note that for the `model_logp` sampler statistic, only the last column (i.e. `trace.get_sampler_stat('model_logp')[-1]`) will be the overall model logp.
```
model = pm.Model()
with model:
mu1 = pm.Bernoulli("mu1", p=0.8)
mu2 = pm.Normal("mu2", mu=0, sigma=1, shape=10)
with model:
step1 = pm.BinaryMetropolis([mu1])
step2 = pm.Metropolis([mu2])
trace = pm.sample(10000, init=None, step=[step1, step2], cores=2, tune=1000)
trace.stat_names
```
Both samplers export `accept`, so we get one acceptance probability for each sampler:
```
trace.get_sampler_stats('accept')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/moh2236945/Natural-language-processing/blob/master/Multichannel_CNN_Model_for_Text_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
```
model can be expanded by using multiple parallel convolutional neural networks that read the source document using different kernel sizes. This, in effect, creates a multichannel convolutional neural network for text that reads text with different n-gram sizes (groups of words).
Movie Review Dataset
Data Preparation
In this section, we will look at 3 things:
Separation of data into training and test sets.
Loading and cleaning the data to remove punctuation and numbers.
Prepare all reviews and save to file.
```
from string import punctuation
from os import listdir
from nltk.corpus import stopwords
from pickle import dump
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# turn a doc into clean tokens
def clean_doc(doc):
# split into tokens by white space
tokens = doc.split()
# remove punctuation from each token
table = str.maketrans('', '', punctuation)
tokens = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
# filter out short tokens
tokens = [word for word in tokens if len(word) > 1]
tokens = ' '.join(tokens)
return tokens
# load all docs in a directory
def process_docs(directory, is_trian):
documents = list()
# walk through all files in the folder
for filename in listdir(directory):
# skip any reviews in the test set
if is_trian and filename.startswith('cv9'):
continue
if not is_trian and not filename.startswith('cv9'):
continue
# create the full path of the file to open
path = directory + '/' + filename
# load the doc
doc = load_doc(path)
# clean doc
tokens = clean_doc(doc)
# add to list
documents.append(tokens)
return documents
# save a dataset to file
def save_dataset(dataset, filename):
dump(dataset, open(filename, 'wb'))
print('Saved: %s' % filename)
# load all training reviews
negative_docs = process_docs('txt_sentoken/neg', True)
positive_docs = process_docs('txt_sentoken/pos', True)
trainX = negative_docs + positive_docs
trainy = [0 for _ in range(900)] + [1 for _ in range(900)]
save_dataset([trainX,trainy], 'train.pkl')
# load all test reviews
negative_docs = process_docs('txt_sentoken/neg', False)
positive_docs = process_docs('txt_sentoken/pos', False)
testX = negative_docs + positive_docs
testY = [0 for _ in range(100)] + [1 for _ in range(100)]
save_dataset([testX,testY], 'test.pkl')
from nltk.corpus import stopwords
import string
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# turn a doc into clean tokens
def clean_doc(doc):
# split into tokens by white space
tokens = doc.split()
# remove punctuation from each token
table = str.maketrans('', '', string.punctuation)
tokens = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
# filter out short tokens
tokens = [word for word in tokens if len(word) > 1]
return tokens
# load the document
filename = 'txt_sentoken/pos/cv000_29590.txt'
text = load_doc(filename)
tokens = clean_doc(text)
print(tokens)
# load all docs in a directory
def process_docs(directory, is_trian):
documents = list()
# walk through all files in the folder
for filename in listdir(directory):
# skip any reviews in the test set
if is_trian and filename.startswith('cv9'):
continue
if not is_trian and not filename.startswith('cv9'):
continue
# create the full path of the file to open
path = directory + '/' + filename
# load the doc
doc = load_doc(path)
# clean doc
tokens = clean_doc(doc)
# add to list
documents.append(tokens)
return documents
negative_docs = process_docs('txt_sentoken/neg', True)
trainy = [0 for _ in range(900)] + [1 for _ in range(900)]
testY = [0 for _ in range(100)] + [1 for _ in range(100)]
def save_dataset(dataset, filename):
dump(dataset, open(filename, 'wb'))
print('Saved: %s' % filename)
# load all test reviews
negative_docs = process_docs('txt_sentoken/neg', False)
positive_docs = process_docs('txt_sentoken/pos', False)
testX = negative_docs + positive_docs
testY = [0 for _ in range(100)] + [1 for _ in range(100)]
save_dataset([testX,testY], 'test.pkl')
```
develop a multichannel convolutional neural network for the sentiment analysis prediction problem.
This section is divided into 3 part
```
# load a clean dataset
def load_dataset(filename):
return load(open(filename, 'rb'))
trainLines, trainLabels = load_dataset('train.pkl')
# fit a tokenizer
def create_tokenizer(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# calculate the maximum document length
def max_length(lines):
return max([len(s.split()) for s in lines])
# calculate vocabulary size
vocab_size = len(tokenizer.word_index) + 1
# calculate vocabulary size
vocab_size = len(tokenizer.word_index) + 1
# encode a list of lines
def encode_text(tokenizer, lines, length):
# integer encode
encoded = tokenizer.texts_to_sequences(lines)
# pad encoded sequences
padded = pad_sequences(encoded, maxlen=length, padding='post')
return padded
# encode a list of lines
def encode_text(tokenizer, lines, length):
# integer encode
encoded = tokenizer.texts_to_sequences(lines)
# pad encoded sequences
padded = pad_sequences(encoded, maxlen=length, padding='post')
return padded
# define the model
def define_model(length, vocab_size):
# channel 1
inputs1 = Input(shape=(length,))
embedding1 = Embedding(vocab_size, 100)(inputs1)
conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(embedding1)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling1D(pool_size=2)(drop1)
flat1 = Flatten()(pool1)
# channel 2
inputs2 = Input(shape=(length,))
embedding2 = Embedding(vocab_size, 100)(inputs2)
conv2 = Conv1D(filters=32, kernel_size=6, activation='relu')(embedding2)
drop2 = Dropout(0.5)(conv2)
pool2 = MaxPooling1D(pool_size=2)(drop2)
flat2 = Flatten()(pool2)
# channel 3
inputs3 = Input(shape=(length,))
embedding3 = Embedding(vocab_size, 100)(inputs3)
conv3 = Conv1D(filters=32, kernel_size=8, activation='relu')(embedding3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling1D(pool_size=2)(drop3)
flat3 = Flatten()(pool3)
# merge
merged = concatenate([flat1, flat2, flat3])
# interpretation
dense1 = Dense(10, activation='relu')(merged)
outputs = Dense(1, activation='sigmoid')(dense1)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize
print(model.summary())
plot_model(model, show_shapes=True, to_file='multichannel.png')
return model
# load training dataset
trainLines, trainLabels = load_dataset('train.pkl')
# create tokenizer
tokenizer = create_tokenizer(trainLines)
# calculate max document length
length = max_length(trainLines)
# calculate vocabulary size
vocab_size = len(tokenizer.word_index) + 1
print('Max document length: %d' % length)
print('Vocabulary size: %d' % vocab_size)
# encode data
trainX = encode_text(tokenizer, trainLines, length)
print(trainX.shape)
# define model
model = define_model(length, vocab_size)
# fit model
model.fit([trainX,trainX,trainX], array(trainLabels), epochs=10, batch_size=16)
# save the model
model.save('model.h5')
#Evaluation
# load datasets
trainLines, trainLabels = load_dataset('train.pkl')
testLines, testLabels = load_dataset('test.pkl')
# create tokenizer
tokenizer = create_tokenizer(trainLines)
# calculate max document length
length = max_length(trainLines)
# calculate vocabulary size
vocab_size = len(tokenizer.word_index) + 1
print('Max document length: %d' % length)
print('Vocabulary size: %d' % vocab_size)
# encode data
trainX = encode_text(tokenizer, trainLines, length)
testX = encode_text(tokenizer, testLines, length)
print(trainX.shape, testX.shape)
```
| github_jupyter |
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# NASA - Sea level
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/NASA/NASA_Sea_level.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
**Tags:** #nasa #naas #opendata #analytics #plotly
**Author:** [Colyn TIDMAN](https://www.linkedin.com/in/colyntidman/), [Dylan PICHON](https://www.linkedin.com/in/dylan-pichon/)
Sea level rise is caused primarily by two factors related to global warming: the added water from melting ice sheets and glaciers and the expansion of seawater as it warms. The first graph tracks the change in sea level since 1993 as observed by satellites.
The second graph, derived from coastal tide gauge and satellite data, shows how much sea level changed from about 1900 to 2018. Items with pluses (+) are factors that cause global mean sea level to increase, while minuses (-) are variables that cause sea levels to decrease. These items are displayed at the time they were affecting sea level.
The data shown are the latest available, with a four- to five-month lag needed for processing.
* You now need to create an Earthdata account to access NASA's sea level data. Register for free by clicking on 'Get data : http'. Once logged in you will access the data.
Website : https://climate.nasa.gov/vital-signs/sea-level/
Data source: Satellite sea level observations.
Credit: NASA's Goddard Space Flight Center
## Input
### Import libraries
```
import pandas
import plotly.graph_objects as go
```
### Path of the source
Data source : nasa_sea_levels.txt downloaded earlier
```
uri_nasa_sea_level = "nasa-sea-level-data.txt"
```
## Model
### Read the csv and create the table
```
df = pandas.read_csv(uri_nasa_sea_level, engine="python", comment='HDR',delim_whitespace=True, names=["A","B","Year + Fraction","D","E","F","G", "H","I","J","K","Smoothed GMSL (mm)",])
df.head(10)
```
Now lets only get the information we want and convert year + fraction to date
```
new_df = pandas.DataFrame(df, columns=['Year + Fraction', 'Smoothed GMSL (mm)'])
dates = []
values = []
ref = 0
for i, row in new_df.iterrows():
#date formating
date_split = str(row['Year + Fraction']).split('.')
year = date_split[0]
fraction = '0.' + date_split[1]
float_fraction = float(fraction)
date = year + "-1-1"
date_delta = 365 * float_fraction
value = pandas.to_datetime(date) + pandas.to_timedelta(date_delta, unit='D')
dates.append(value)
#value formating
#to stay inline with the graph visible on nasa's website, we need to have 0 as our first value
if i == 0:
ref = row['Smoothed GMSL (mm)']
val = row['Smoothed GMSL (mm)'] - ref
values.append(val)
new_df['Date'] = dates
new_df['Value'] = values
new_df.head()
```
## Output
### Land-Ocean Temperature Index - Visualization
```
fig = go.Figure(layout_title="<b>Sea Level variation since 1993 (mm)</b>")
fig.add_trace(go.Scatter(
x = new_df["Date"],
y = new_df["Value"],
name="Delta",
))
fig.update_layout(
autosize=False,
width=1300,
height=700,
plot_bgcolor='rgb(250,250,250)',
)
fig.add_annotation(y=6, x='2020-1-1',
text="Data source: Satellite sea level observations.<br> Credit: NASA's Goddard Space Flight Center",
showarrow=False,
)
fig.update_yaxes(title_text="Sea Height Variation (mm)")
fig.update_xaxes(title_text="Year", tickangle=60)
fig.add_hline(y=0.0)
fig.update_layout(title_x=0.5)
fig.show()
```
| github_jupyter |
# Introduction
In a prior notebook, documents were partitioned by assigning them to the domain with the highest Dice similarity of their term and structure occurrences. The occurrences of terms and structures in each domain is what we refer to as the domain "archetype." Here, we'll assess whether the observed similarity between documents and the archetype is greater than expected by chance. This would indicate that information in the framework generalizes well to individual documents.
# Load the data
```
import pandas as pd
import numpy as np
import sys
sys.path.append("..")
import utilities
from style import style
framework = "data-driven_k09"
version = 190325 # Document-term matrix version
suffix = "" # Suffix for term lists
clf = "_lr" # Classifier used to generate the framework
n_iter = 1000 # Iterations for null distribution
dx = [0.38, 0.38, 0.37, 0.39, 0.37, 0.38, 0.32, 0.34, 0.37] # Nudges for plotted means
ds = 0.11 # Nudges for plotted stars
alpha = 0.001 # Significance level for statistical comparisons
```
## Brain activation coordinates
```
act_bin = utilities.load_coordinates()
print("Document N={}, Structure N={}".format(
act_bin.shape[0], act_bin.shape[1]))
```
## Document-term matrix
```
dtm_bin = utilities.load_doc_term_matrix(version=version, binarize=True)
print("Document N={}, Term N={}".format(
dtm_bin.shape[0], dtm_bin.shape[1]))
```
## Domain archetypes
```
from collections import OrderedDict
lists, circuits = utilities.load_framework(framework, suffix=suffix, clf=clf)
words = sorted(list(set(lists["TOKEN"])))
structures = sorted(list(set(act_bin.columns)))
domains = list(OrderedDict.fromkeys(lists["DOMAIN"]))
archetypes = pd.DataFrame(0.0, index=words+structures, columns=domains)
for dom in domains:
for word in lists.loc[lists["DOMAIN"] == dom, "TOKEN"]:
archetypes.loc[word, dom] = 1.0
for struct in structures:
archetypes.loc[struct, dom] = circuits.loc[struct, dom]
archetypes[archetypes > 0.0] = 1.0
print("Term & Structure N={}, Domain N={}".format(
archetypes.shape[0], archetypes.shape[1]))
```
## Document structure-term vectors
```
pmids = dtm_bin.index.intersection(act_bin.index)
len(pmids)
dtm_words = dtm_bin.loc[pmids, words]
act_structs = act_bin.loc[pmids, structures]
docs = dtm_words.copy()
docs[structures] = act_structs.copy()
docs.head()
```
## Document splits
```
splits = {}
splits["discovery"] = [int(pmid.strip()) for pmid in open("../data/splits/train.txt")]
splits["replication"] = [int(pmid.strip()) for pmid in open("../data/splits/validation.txt")]
splits["replication"] += [int(pmid.strip()) for pmid in open("../data/splits/test.txt")]
for split, pmids in splits.items():
print("{:12s} N={}".format(split.title(), len(pmids)))
```
## Document assignments
```
doc2dom_df = pd.read_csv("../partition/data/doc2dom_{}{}.csv".format(framework, clf),
header=None, index_col=0)
doc2dom = {int(pmid): str(dom.values[0]) for pmid, dom in doc2dom_df.iterrows()}
dom2docs = {dom: {split: [] for split in ["discovery", "replication"]} for dom in domains}
for doc, dom in doc2dom.items():
for split, split_pmids in splits.items():
if doc in splits[split]:
dom2docs[dom][split].append(doc)
```
# Compute similarity to archetype
```
from scipy.spatial.distance import cdist
```
## Observed values
```
pmid_list, split_list, dom_list, obs_list = [], [], [], []
for split, split_pmids in splits.items():
split_list += [split] * len(split_pmids)
for dom in domains:
dom_pmids = dom2docs[dom][split]
dom_vecs = docs.loc[dom_pmids].values
dom_arche = archetypes[dom].values.reshape(1, archetypes.shape[0])
dom_sims = 1.0 - cdist(dom_vecs, dom_arche, metric="dice")
pmid_list += dom_pmids
dom_list += [dom] * len(dom_sims)
obs_list += list(dom_sims[:,0])
df_obs = pd.DataFrame({"PMID": pmid_list, "SPLIT": split_list,
"DOMAIN": dom_list, "OBSERVED": obs_list})
df_obs.to_csv("data/arche_obs_{}{}.csv".format(framework, clf))
df_obs.head()
```
## Null distributions
```
import os
df_null = {}
for split, split_pmids in splits.items():
print("Processing {} split (N={} documents)".format(split, len(split_pmids)))
file_null = "data/arche_null_{}{}_{}_{}iter.csv".format(framework, clf, split, n_iter)
if not os.path.isfile(file_null):
df_null[split] = np.zeros((len(domains), n_iter))
for n in range(n_iter):
null = np.random.choice(range(len(docs.columns)),
size=len(docs.columns), replace=False)
for i, dom in enumerate(domains):
dom_pmids = dom2docs[dom][split]
dom_vecs = docs.loc[dom_pmids].values
dom_arche = archetypes.values[null,i].reshape(1, archetypes.shape[0])
df_null[split][i,n] = 1.0 - np.mean(cdist(dom_vecs, dom_arche, metric="dice"))
if n % int(n_iter / 10.0) == 0:
print("----- Processed {} iterations".format(n))
df_null[split] = pd.DataFrame(df_null[split], index=domains, columns=range(n_iter))
df_null[split].to_csv(file_null)
print("")
else:
df_null[split] = pd.read_csv(file_null, index_col=0, header=0)
```
## Interleave splits to facilitate plotting
```
df_null_interleaved = pd.DataFrame()
null_idx = []
for dom in domains:
for split in ["discovery", "replication"]:
df_null_interleaved = df_null_interleaved.append(df_null[split].loc[dom])
null_idx.append(dom + "_" + split)
df_null_interleaved.index = null_idx
df_null_interleaved.head()
```
## Bootstrap distributions
```
df_boot = {}
for split, split_pmids in splits.items():
print("Processing {} split (N={} documents)".format(split, len(split_pmids)))
file_boot = "data/arche_boot_{}{}_{}_{}iter.csv".format(framework, clf, split, n_iter)
if not os.path.isfile(file_boot):
df_boot[split] = np.zeros((len(domains), n_iter))
for n in range(n_iter):
boot = np.random.choice(range(len(docs.columns)),
size=len(docs.columns), replace=True)
for i, dom in enumerate(domains):
dom_pmids = dom2docs[dom][split]
dom_vecs = docs.loc[dom_pmids].values[:,boot]
dom_arche = archetypes.values[boot,i].reshape(1, archetypes.shape[0])
df_boot[split][i,n] = 1.0 - np.mean(cdist(dom_vecs, dom_arche, metric="dice"))
if n % int(n_iter / 10.0) == 0:
print("----- Processed {} iterations".format(n))
df_boot[split] = pd.DataFrame(df_boot[split], index=domains, columns=range(n_iter))
df_boot[split].to_csv(file_boot)
print("")
else:
df_boot[split] = pd.read_csv(file_boot, index_col=0, header=0)
```
# Perform significance testing
```
from statsmodels.stats import multitest
df_stat = {}
for split, split_pmids in splits.items():
df_stat[split] = pd.DataFrame(index=domains)
pval = []
for dom in domains:
dom_obs = df_obs.loc[(df_obs["SPLIT"] == split) & (df_obs["DOMAIN"] == dom), "OBSERVED"].mean()
df_stat[split].loc[dom, "OBSERVED"] = dom_obs
dom_null = df_null[split].loc[dom].values
p = np.sum(dom_null >= dom_obs) / float(n_iter)
pval.append(p)
df_stat[split].loc[dom, "P"] = p
df_stat[split]["FDR"] = multitest.multipletests(pval, method="fdr_bh")[1]
for dom in domains:
if df_stat[split].loc[dom, "FDR"] < alpha:
df_stat[split].loc[dom, "STARS"] = "*"
else:
df_stat[split].loc[dom, "STARS"] = ""
df_stat[split] = df_stat[split].loc[domains, ["OBSERVED", "P", "FDR", "STARS"]]
df_stat[split].to_csv("data/arche_mean_{}{}_{}.csv".format(framework, clf, split))
print("-" * 65 + "\n" + split.upper() + "\n" + "-" * 65)
print(df_stat[split])
print("")
```
# Plot results
```
%matplotlib inline
palette = style.palettes["data-driven"] + style.palettes["dsm"]
utilities.plot_split_violins(framework, domains, df_obs, df_null_interleaved, df_stat,
palette, metric="arche", dx=dx, figsize=(3.5, 2.1),
ylim=[-0.25,1], yticks=[-0.25,0,0.25,0.5,0.75,1],
interval=0.999, alphas=[0], suffix=clf)
```
| github_jupyter |
# Udacity. Deep Reingorcement Learning : Collaboration and Competition
### Markus Buchholz
```
from unityagents import UnityEnvironment
import numpy as np
env = UnityEnvironment(file_name='./Tennis_Linux/Tennis.x86_64')
```
## BRAIN
```
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
```
## MODEL
```
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=200, fc2_units=150):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
```
## AGENT AND CRITIC
```
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-3 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
eps_start= 1.0
eps_end = 0.01
eps_decay = 1e-6
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, state_size, action_size, num_agents, random_seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random_seed
# Actor Network (w/ Target Network)
self.actor_local = Actor(self.state_size, self.action_size, self.seed).to(device)
self.actor_target = Actor(self.state_size, self.action_size, self.seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(self.state_size, self.action_size, self.seed).to(device)
self.critic_target = Critic(self.state_size, self.action_size, self.seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise((num_agents,action_size), random_seed)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
self.memory.add(state, action, reward, next_state, done)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.size = size
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
```
## EXAMINE THE STATE AND ACTION SPACES
```
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
```
## TRAIN DDPG AGENTS
```
num_agents = len(env_info.agents)
agent = Agent(state_size=state_size, action_size=action_size, num_agents=num_agents, random_seed=0)
def ddpg(n_episodes=3000):
total_scores_deque = deque(maxlen=100)
total_scores = []
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
scores = np.zeros(num_agents)
agent.reset()
while True:
actions = agent.act(states)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations # get the next state
rewards = env_info.rewards # get the reward
dones = env_info.local_done # see if episode has finished
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
agent.step(state, action, reward, next_state, done) # agent takes an action
scores += rewards # update the score
states = next_states # update the state
if np.any(dones): # exit loop if episode finished
break
max_score = np.max(scores)
total_scores_deque.append(max_score)
total_scores.append(max_score)
total_average_score = np.mean(total_scores_deque)
if i_episode % 10 == 0: # print every 10
print('\rEpisode {}\tTotal Average Score: {:.2f}'.format(i_episode, total_average_score))
if total_average_score >= 0.5 and i_episode >= 100:
print('Problem Solved after {} epsisodes. Total Average score: {:.2f}'.format(i_episode, total_average_score))
torch.save(agent.actor_local.state_dict(), 'checkpoint_MA_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_MA_critic.pth')
break
return total_scores
scores = ddpg()
```
## PRINT TRAIN STATISTCS
```
%matplotlib inline
import matplotlib.pyplot as plt
#def __init__(self, state_size, action_size, random_seed):
#agent = Agent(state_size=state_size, action_size=action_size, random_seed=0)
#scores = ddpg(agent)
# plot the scores
average_score = 0.5
success = [average_score] * len(scores)
fig = plt.figure(figsize=(30,20))
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores,np.arange(len(scores)), success)
plt.ylabel('Score', fontsize = 33)
plt.xlabel('Episode #', fontsize = 33)
plt.title('Train DDPG in Multi Agent Environment', fontsize = 33)
plt.gca().legend(('actual score','average'), fontsize = 23)
plt.show()
%matplotlib inline
import pandas as pd
import numpy as np
df = pd.DataFrame(scores, columns=['score'])
df.plot.box(figsize =(30,20), fontsize =33)
```
## WATCH THE AGENTS IN ACTION
```
# Load the saved weights into Pytorch model
agent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth', map_location='cpu'))
agent.critic_local.load_state_dict(torch.load('checkpoint_critic.pth', map_location='cpu'))
for i in range(100): # play game for 5 episodes
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = agent.act(states) # select actions from loaded model agent
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/HowEarthEngineWorks/ClientVsServer.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/HowEarthEngineWorks/ClientVsServer.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=HowEarthEngineWorks/ClientVsServer.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/HowEarthEngineWorks/ClientVsServer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
## Overfitting Exercise
In this exercise, we'll build a model that, as you'll see, dramatically overfits the training data. This will allow you to see what overfitting can "look like" in practice.
```
import os
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
```
For this exercise, we'll use gradient boosted trees. In order to implement this model, we'll use the XGBoost package.
```
! pip install xgboost
import xgboost as xgb
```
Here, we define a few helper functions.
```
# number of rows in a dataframe
def nrow(df):
return(len(df.index))
# number of columns in a dataframe
def ncol(df):
return(len(df.columns))
# flatten nested lists/arrays
flatten = lambda l: [item for sublist in l for item in sublist]
# combine multiple arrays into a single list
def c(*args):
return(flatten([item for item in args]))
```
In this exercise, we're going to try to predict the returns of the S&P 500 ETF. This may be a futile endeavor, since many experts consider the S&P 500 to be essentially unpredictable, but it will serve well for the purpose of this exercise. The following cell loads the data.
```
df = pd.read_csv("SPYZ.csv")
```
As you can see, the data file has four columns, `Date`, `Close`, `Volume` and `Return`.
```
df.head()
n = nrow(df)
```
Next, we'll form our predictors/features. In the cells below, we create four types of features. We also use a parameter, `K`, to set the number of each type of feature to build. With a `K` of 25, 100 features will be created. This should already seem like a lot of features, and alert you to the potential that the model will be overfit.
```
predictors = []
# we'll create a new DataFrame to hold the data that we'll use to train the model
# we'll create it from the `Return` column in the original DataFrame, but rename that column `y`
model_df = pd.DataFrame(data = df['Return']).rename(columns = {"Return" : "y"})
# IMPORTANT: this sets how many of each of the following four predictors to create
K = 25
```
Now, you write the code to create the four types of predictors.
```
for L in range(1,K+1):
# this predictor is just the return L days ago, where L goes from 1 to K
# these predictors will be named `R1`, `R2`, etc.
pR = "".join(["R",str(L)])
predictors.append(pR)
for i in range(K+1,n):
# TODO: fill in the code to assign the return from L days before to the ith row of this predictor in `model_df`
model_df.loc[i, pR] = df.loc[i-L,'Return']
# this predictor is the return L days ago, squared, where L goes from 1 to K
# these predictors will be named `Rsq1`, `Rsq2`, etc.
pR2 = "".join(["Rsq",str(L)])
predictors.append(pR2)
for i in range(K+1,n):
# TODO: fill in the code to assign the squared return from L days before to the ith row of this predictor
# in `model_df`
model_df.loc[i, pR2] = (df.loc[i-L,'Return']) ** 2
# this predictor is the log volume L days ago, where L goes from 1 to K
# these predictors will be named `V1`, `V2`, etc.
pV = "".join(["V",str(L)])
predictors.append(pV)
for i in range(K+1,n):
# TODO: fill in the code to assign the log of the volume from L days before to the ith row of this predictor
# in `model_df`
# Add 1 to the volume before taking the log
model_df.loc[i, pV] = math.log(1.0 + df.loc[i-L,'Volume'])
# this predictor is the product of the return and the log volume from L days ago, where L goes from 1 to K
# these predictors will be named `RV1`, `RV2`, etc.
pRV = "".join(["RV",str(L)])
predictors.append(pRV)
for i in range(K+1,n):
# TODO: fill in the code to assign the product of the return and the log volume from L days before to the
# ith row of this predictor in `model_df`
model_df.loc[i, pRV] = model_df.loc[i, pR] * model_df.loc[i, pV]
```
Let's take a look at the predictors we've created.
```
model_df.iloc[100:105,:]
```
Next, we create a DataFrame that holds the recent volatility of the ETF's returns, as measured by the standard deviation of a sliding window of the past 20 days' returns.
```
vol_df = pd.DataFrame(data = df[['Return']])
for i in range(K+1,n):
# TODO: create the code to assign the standard deviation of the return from the time period starting
# 20 days before day i, up to the day before day i, to the ith row of `vol_df`
vol_df.loc[i, 'vol'] = np.std(vol_df.loc[(i-20):(i-1),'Return'])
```
Let's take a quick look at the result.
```
vol_df.iloc[100:105,:]
```
Now that we have our data, we can start thinking about training a model.
```
# for training, we'll use all the data except for the first K days, for which the predictors' values are NaNs
model = model_df.iloc[K:n,:]
```
In the cell below, first split the data into train and test sets, and then split off the targets from the predictors.
```
# Split data into train and test sets
train_size = 2.0/3.0
breakpoint = round(nrow(model) * train_size)
# TODO: fill in the code to split off the chunk of data up to the breakpoint as the training set, and
# assign the rest as the test set.
training_data = model.iloc[1:breakpoint,:]
test_data = model.loc[breakpoint : nrow(model),]
# TODO: Split training data and test data into targets (Y) and predictors (X), for the training set and the test set
X_train = training_data.iloc[:,1:ncol(training_data)]
Y_train = training_data.iloc[:,0]
X_test = test_data.iloc[:,1:ncol(training_data)]
Y_test = test_data.iloc[:,0]
```
Great, now that we have our data, let's train the model.
```
# DMatrix is a internal data structure that used by XGBoost which is optimized for both memory efficiency
# and training speed.
dtrain = xgb.DMatrix(X_train, Y_train)
# Train the XGBoost model
param = { 'max_depth':20, 'silent':1 }
num_round = 20
xgModel = xgb.train(param, dtrain, num_round)
```
Now let's predict the returns for the S&P 500 ETF in both the train and test periods. If the model is successful, what should the train and test accuracies look like? What would be a key sign that the model has overfit the training data?
Todo: Before you run the next cell, write down what you expect to see if the model is overfit.
```
# Make the predictions on the test data
preds_train = xgModel.predict(xgb.DMatrix(X_train))
preds_test = xgModel.predict(xgb.DMatrix(X_test))
```
Let's quickly look at the mean squared error of the predictions on the training and testing sets.
```
# TODO: Calculate the mean squared error on the training set
msetrain = sum((preds_train-Y_train)**2)/len(preds_train)
msetrain
# TODO: Calculate the mean squared error on the test set
msetest = sum((preds_test-Y_test)**2)/len(preds_test)
msetest
```
Looks like the mean squared error on the test set is an order of magnitude greater than on the training set. Not a good sign. Now let's do some quick calculations to gauge how this would translate into performance.
```
# combine prediction arrays into a single list
predictions = c(preds_train, preds_test)
responses = c(Y_train, Y_test)
# as a holding size, we'll take predicted return divided by return variance
# this is mean-variance optimization with a single asset
vols = vol_df.loc[K:n,'vol']
position_size = predictions / vols ** 2
# TODO: Calculate pnl. Pnl in each time period is holding * realized return.
performance = position_size * responses
# plot simulated performance
plt.plot(np.cumsum(performance))
plt.ylabel('Simulated Performance')
plt.axvline(x=breakpoint, c = 'r')
plt.show()
```
Our simulated returns accumulate throughout the training period, but they are absolutely flat in the testing period. The model has no predictive power whatsoever in the out-of-sample period.
Can you think of a few reasons our simulation of performance is unrealistic?
```
# TODO: Answer the above question.
```
1. We left out any accounting of trading costs. If we had included trading costs, the performance in the out-of-sample period would be downward!
2. We didn't account for any time for trading. It's most conservative to assume that we would make trades on the day following our calculation of position size to take, and realize returns the day after that, such that there's a two-day delay between holding size calculation and realized return.
If you need a little assistance, check out the [solution](overfitting_exercise_solution.ipynb).
| github_jupyter |
# JAGS example in PyMC3
This notebook attempts to solve the same problem that has been solved manually in [w02-04b-mcmc-demo-continuous.ipynb](http://localhost:8888/notebooks/w02-04b-mcmc-demo-continuous.ipynb), but using PyMC3 instead of JAGS as demonstrated in the course video.
## Problem Definition
Data is for personnel change from last year to this year for 10 companies. Model is defined as follows:
$$y_i | \mu \overset{iid}{\sim} N(\mu, 1)$$
$$\mu \sim t(0, 1, 1)$$
where y<sub>i</sub> represents personnel change for company i, and the distribution of y<sub>i</sub> given $\mu$ is a Normal distribution with mean $\mu$ and variance 1. Prior distribution of $\mu$ is a t distribution with location 0, scale parameter 1, and degrees of freedom 1 (also known as Cauchy's distribution).
Model is not conjugate, thus posterior is not a standard form that we can conveniently sample. To get posterior samples, we will need to setup a Markov chain, whose stationery distribution is the posterior distribution we want.
Main difference with manually solved example is that we don't need to compute the analytical form of the posterior for our simulation.
## PyMC3 Solution
JAGS usage follows the 4 step process:
* Specify model -- this is the first 2 lines in the `with model` block in cell 3.
* Setup model -- this is the observed attribute in y_obs where real values of y are plugged in.
* Run MCMC sampler -- the block under the `run MCMC sampler` command. The call to `update` and `coda.sample` is merged into a single `pm.sample` call with separate `n_iter` and `n_tune` variables. The `step` attribute is set to Metropolis-Hastings as that is the preferred sampler in the course, PyMC3 default is the NUTS sampler.
* Post-processing -- whatever we do with `trace["mu"]` after the sampling is done.
```
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
y = np.array([1.2, 1.4, -0.5, 0.3, 0.9, 2.3, 1.0, 0.1, 1.3, 1.9])
n_iter = 1000
n_tune = 500
with pm.Model() as model:
# model specification, and setup (set observed=y)
mu = pm.StudentT("mu", nu=1, mu=0, sigma=1)
y_obs = pm.Normal("y_obs", mu=mu, sigma=1, observed=y)
# run MCMC sampler
step = pm.Metropolis() # PyMC3 default is NUTS, course uses Metropolis-Hastings
trace = pm.sample(n_iter, tune=n_tune, step=step)
# post-processing
mu_sims = trace["mu"]
print("mu_sims :", mu_sims)
print("len(mu_sims): {:d}".format(len(mu_sims)))
_ = pm.traceplot(trace)
_ = pm.traceplot(trace, combined=True)
pm.summary(trace)
```
## Reference
* [Markov Chain Monte Carlo for Bayesian Inference - the Metropolis Algorithm](https://www.quantstart.com/articles/Markov-Chain-Monte-Carlo-for-Bayesian-Inference-The-Metropolis-Algorithm/)
| github_jupyter |
# Importing libraries
```
import sys, os, re, csv, subprocess, operator
import pandas as pd
from urllib.request import urlopen
import urllib.request
from bs4 import BeautifulSoup
```
# Configure repository and directories
```
userhome = os.path.expanduser('~')
txt_file = open(userhome + r"/DifferentDiffAlgorithms/SZZ/code_document/project_identity.txt", "r")
pid = txt_file.read().split('\n')
project = pid[0]
bugidentifier = pid[1]
repository = userhome + r'/DifferentDiffAlgorithms/SZZ/datasource/' + project + '/'
analyze_dir = userhome + r'/DifferentDiffAlgorithms/SZZ/projects_analyses/' + project + '/'
print ("Project name = %s" % project)
print ("Project key = %s" % bugidentifier)
```
# Load textfile contains bug-ids
```
txtfile = open(analyze_dir + "01_bug_ids_extraction/candidate_bug_ids.txt", "r")
bug_links = txtfile.read().split('\n')
print ("Found " + str(len(bug_links)) + " bug_ids")
```
# Finding affected versions by bug ids
```
error_links = []
affected_version = []
for a,b in enumerate(bug_links):
link = "https://issues.apache.org/jira/browse/" + b
sys.stdout.write("\r%i " %(a+1) + "Extracting: " + b)
sys.stdout.flush()
try:
page = urllib.request.urlopen(link)
soup = BeautifulSoup(page, 'html.parser')
aff_version = soup.find('span', attrs={'id':'versions-val'}).text.replace("\n",'').replace(" M",'-M').replace(" ",'').replace(".x",'.').split(",")
aff_version = sorted(aff_version)
aff_version.insert(0,b)
affected_version.append(aff_version)
except:
error_links.append(b)
print("\nExtraction has been completed.")
print (error_links)
#Repeat the process if there are still some affected versions by bug_ids haven't been captured due to network problems
errorlinks = []
if error_links != []:
for c,d in enumerate(error_links):
link = "https://issues.apache.org/jira/browse/" + d
sys.stdout.write("\r%i " %(c+1) + "Extracting: " + d)
sys.stdout.flush()
try:
page = urllib.request.urlopen(link)
soup = BeautifulSoup(page, 'html.parser')
types = soup.find('span', attrs={'id':'versions-val'}).text.replace("\n",'').replace(" M",'-M').replace(" ",'').replace(".x",'.').split(",")
types = sorted(types)
types.insert(0, d)
affected_version.append(types)
except:
errorlinks.append(d)
print ("\nExtraction is complete")
print (errorlinks)
affected_version.sort()
#Finding the earliest version affected by the bug ids
earliest_version = []
for num, affver in enumerate(affected_version):
earliest_version.append(affver[:2])
earliest_version.sort()
for early in earliest_version:
print (early)
```
# Defining the function for git command
```
def execute_command(cmd, work_dir):
#Executes a shell command in a subprocess, waiting until it has completed.
pipe = subprocess.Popen(cmd, shell=True, cwd=work_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, error) = pipe.communicate()
return out, error
pipe.wait()
```
# Finding the versions related with earliest version
```
related_version = []
for n, item in enumerate(earliest_version):
if "." in item[1]:
git_cmd = 'git tag -l "*' + item[1] + '*"'
temp = str(execute_command(git_cmd, repository)).replace("b'",'').replace("(",'').replace(")",'').split("\\n")
del temp[len(temp)-1]
if temp == []:
temp = [item[1].replace("Java-SCA-","")]
else:
temp = ['None']
temp.insert(0, item[0])
related_version.append(temp)
for xx in related_version:
print (xx)
```
# Finding the date release for affected version
```
date_release = []
for n, item in enumerate(related_version):
sys.stdout.write("\rFinding datetime for version {}: {}".format(n+1, item[0]))
sys.stdout.flush()
if item[1] != "None":
for m in range(1, len(item)):
git_cmd = "git log -1 --format=%ai " + item[m]
temp = str(execute_command(git_cmd, repository)).replace("b'",'').replace("(",'').replace(")",'').split("\\n")
del temp[len(temp)-1]
temp = temp[0].split(" ")
if temp[0] != "',":
temp.insert(0,item[0])
temp.insert(1,item[m])
date_release.append(temp)
date_release = sorted(date_release, key=operator.itemgetter(0, 2))
"""else:
date_release.append(item)"""
date_release = sorted(date_release, key=operator.itemgetter(0), reverse=True)
print ("\nThe process is finish")
#save in CSV file
with open(analyze_dir + '04_affected_versions/affected_version.csv','w') as csvfile:
writers = csv.writer(csvfile)
writers.writerow(['bug_id','earliest_affected_version','date_release','time_release','tz'])
for item in date_release:
writers.writerow(item)
df = pd.read_csv(analyze_dir + '04_affected_versions/affected_version.csv')
df
earliest_vers = df.groupby('bug_id', as_index=False).first()
earliest_vers = earliest_vers.sort_values(['date_release', 'time_release', 'earliest_affected_version'], ascending=True)
earliest_vers.to_csv(analyze_dir + '04_affected_versions/earliest_version.csv', index=False)
earliest_vers
```
# Joining 2 csv files: list of annotated files and earliest affected versions
```
colname = ['bug_id','bugfix_commitID','parent_id','filepath','diff_myers_file','diff_histogram_file','blame_myers_file','blame_histogram_file',
'#deletions_myers','#deletions_histogram']
filedata = pd.read_csv(analyze_dir + '03_annotate/01_annotated_files/listof_diff_n_annotated_files/diff_n_blame_combination_files.csv')
filedata = filedata[colname]
details = filedata.join(earliest_vers.set_index('bug_id')[['earliest_affected_version','date_release']], on='bug_id')
details.to_csv(analyze_dir + '04_affected_versions/affected_version_for_identified_files.csv', index=False)
print ("Affected version for identified files has been created")
```
| github_jupyter |
# Part 1: Getting Started with Sionna
This tutorial will guide you through Sionna, from its basic principles to the implementation of a point-to-point link with a 5G NR compliant code and a 3GPP channel model.
You will also learn how to write custom trainable layers by implementing a state of the art neural receiver, and how to train and evaluate end-to-end communication systems.
The tutorial is structured in four notebooks:
- **Part I: Getting started with Sionna**
- Part II: Differentiable Communication Systems
- Part III: Advanced Link-level Simulations
- Part IV: Toward Learned Receivers
The [official documentation](https://nvlabs.github.io/sionna) provides key material on how to use Sionna and how its components are implemented.
* [Imports & Basics](#Imports-&-Basics)
* [Sionna Data-flow and Design Paradigms](#Sionna-Data-flow-and-Design-Paradigms)
* [Hello, Sionna!](#Hello,-Sionna!)
* [Communication Systems as Keras Models](#Communication-Systems-as-Keras-Models)
* [Forward Error Correction](#Forward-Error-Correction-(FEC))
* [Eager vs. Graph Mode](#Eager-vs-Graph-Mode)
* [Exercise](#Exercise)
## Imports & Basics
```
# Import TensorFlow and NumPy
import tensorflow as tf
import numpy as np
# Import Sionna
try:
import sionna as sn
except ImportError as e:
# Install Sionna if package is not already installed
import os
os.system("pip install sionna")
import sionna as sn
# For plotting
%matplotlib inline
# also try %matplotlib widget
import matplotlib.pyplot as plt
# for performance measurements
import time
# For the implementation of the Keras models
from tensorflow.keras import Model
```
We can now access Sionna functions within the `sn` namespace.
**Hint**: In Jupyter notebooks, you can run bash commands with `!`.
```
!nvidia-smi
```
## Sionna Data-flow and Design Paradigms
Sionna inherently parallelizes simulations via *batching*, i.e., each element in the batch dimension is simulated independently.
This means the first tensor dimension is always used for *inter-frame* parallelization similar to an outer *for-loop* in Matlab/NumPy simulations, but operations can be operated in parallel.
To keep the dataflow efficient, Sionna follows a few simple design principles:
* Signal-processing components are implemented as an individual [Keras layer](https://keras.io/api/layers/).
* `tf.float32` is used as preferred datatype and `tf.complex64` for complex-valued datatypes, respectively.
This allows simpler re-use of components (e.g., the same scrambling layer can be used for binary inputs and LLR-values).
* `tf.float64`/`tf.complex128` are available when high precision is needed.
* Models can be developed in *eager mode* allowing simple (and fast) modification of system parameters.
* Number crunching simulations can be executed in the faster *graph mode* or even *XLA* acceleration (experimental) is available for most components.
* Whenever possible, components are automatically differentiable via [auto-grad](https://www.tensorflow.org/guide/autodiff) to simplify the deep learning design-flow.
* Code is structured into sub-packages for different tasks such as channel coding, mapping,... (see [API documentation](http://nvlabs.github.io/sionna/api/sionna.html) for details).
These paradigms simplify the re-useability and reliability of our components for a wide range of communications related applications.
## Hello, Sionna!
Let's start with a very simple simulation: Transmitting QAM symbols over an AWGN channel. We will implement the system shown in the figure below.

We will use upper case for naming simulation parameters that are used throughout this notebook
Every layer needs to be initialized once before it can be used.
**Tip**: Use the [API documentation](http://nvlabs.github.io/sionna/api/sionna.html) to find an overview of all existing components.
You can directly access the signature and the docstring within jupyter via `Shift+TAB`.
*Remark*: Most layers are defined to be complex-valued.
We first need to create a QAM constellation.
```
NUM_BITS_PER_SYMBOL = 2 # QPSK
constellation = sn.mapping.Constellation("qam", NUM_BITS_PER_SYMBOL)
constellation.show();
```
**Task:** Try to change the modulation order, e.g., to 16-QAM.
We then need to setup a mapper to map bits into constellation points. The mapper takes as parameter the constellation.
We also need to setup a corresponding demapper to compute log-likelihood ratios (LLRs) from received noisy samples.
```
mapper = sn.mapping.Mapper(constellation=constellation)
# The demapper uses the same constellation object as the mapper
demapper = sn.mapping.Demapper("app", constellation=constellation)
```
**Tip**: You can access the signature+docstring via `?` command and print the complete class definition via `??` operator.
Obviously, you can also access the source code via [https://github.com/nvlabs/sionna/](https://github.com/nvlabs/sionna/).
```
# print class definition of the Constellation class
sn.mapping.Mapper??
```
As can be seen, the `Mapper` class inherits from `Layer`, i.e., implements a Keras layer.
This allows to simply built complex systems by using the [Keras functional API](https://keras.io/guides/functional_api/) to stack layers.
Sionna provides as utility a binary source to sample uniform i.i.d. bits.
```
binary_source = sn.utils.BinarySource()
```
Finally, we need the AWGN channel.
```
awgn_channel = sn.channel.AWGN()
```
Sionna provides a utility function to compute the noise power spectral density ratio $N_0$ from the energy per bit to noise power spectral density ratio $E_b/N_0$ in dB and a variety of parameters such as the coderate and the nunber of bits per symbol.
```
no = sn.utils.ebnodb2no(ebno_db=10.0,
num_bits_per_symbol=NUM_BITS_PER_SYMBOL,
coderate=1.0) # Coderate set to 1 as we do uncoded transmission here
```
We now have all the components we need to transmit QAM symbols over an AWGN channel.
Sionna natively supports multi-dimensional tensors.
Most layers operate at the last dimension and can have arbitrary input shapes (preserved at output).
```
BATCH_SIZE = 64 # How many examples are processed by Sionna in parallel
bits = binary_source([BATCH_SIZE,
1024]) # Blocklength
print("Shape of bits: ", bits.shape)
x = mapper(bits)
print("Shape of x: ", x.shape)
y = awgn_channel([x, no])
print("Shape of y: ", y.shape)
llr = demapper([y, no])
print("Shape of llr: ", llr.shape)
```
In *Eager* mode, we can directly access the values of each tensor. This simplify debugging.
```
num_samples = 8 # how many samples shall be printed
num_symbols = int(num_samples/NUM_BITS_PER_SYMBOL)
print(f"First {num_samples} transmitted bits: {bits[0,:num_samples]}")
print(f"First {num_symbols} transmitted symbols: {np.round(x[0,:num_symbols], 2)}")
print(f"First {num_symbols} received symbols: {np.round(y[0,:num_symbols], 2)}")
print(f"First {num_samples} demapped llrs: {np.round(llr[0,:num_samples], 2)}")
```
Let's visualize the received noisy samples.
```
plt.figure(figsize=(8,8))
plt.axes().set_aspect(1)
plt.grid(True)
plt.title('Channel output')
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.scatter(tf.math.real(y), tf.math.imag(y))
plt.tight_layout()
```
**Task:** One can play with the SNR to visualize the impact on the received samples.
**Advanced Task:** Compare the LLR distribution for "app" demapping with "maxlog" demapping.
The [Bit-Interleaved Coded Modulation](https://nvlabs.github.io/sionna/examples/Bit_Interleaved_Coded_Modulation.html) example notebook can be helpful for this task.
## Communication Systems as Keras Models
It is typically more convenient to wrap a Sionna-based communication system into a [Keras models](https://keras.io/api/models/model/).
These models can be simply built by using the [Keras functional API](https://keras.io/guides/functional_api/) to stack layers.
The following cell implements the previous system as a Keras model.
The key functions that need to be defined are `__init__()`, which instantiates the required components, and `__call()__`, which performs forward pass through the end-to-end system.
```
class UncodedSystemAWGN(Model): # Inherits from Keras Model
def __init__(self, num_bits_per_symbol, block_length):
"""
A keras model of an uncoded transmission over the AWGN channel.
Parameters
----------
num_bits_per_symbol: int
The number of bits per constellation symbol, e.g., 4 for QAM16.
block_length: int
The number of bits per transmitted message block (will be the codeword length later).
Input
-----
batch_size: int
The batch_size of the Monte-Carlo simulation.
ebno_db: float
The `Eb/No` value (=rate-adjusted SNR) in dB.
Output
------
(bits, llr):
Tuple:
bits: tf.float32
A tensor of shape `[batch_size, block_length] of 0s and 1s
containing the transmitted information bits.
llr: tf.float32
A tensor of shape `[batch_size, block_length] containing the
received log-likelihood-ratio (LLR) values.
"""
super().__init__() # Must call the Keras model initializer
self.num_bits_per_symbol = num_bits_per_symbol
self.block_length = block_length
self.constellation = sn.mapping.Constellation("qam", self.num_bits_per_symbol)
self.mapper = sn.mapping.Mapper(constellation=self.constellation)
self.demapper = sn.mapping.Demapper("app", constellation=self.constellation)
self.binary_source = sn.utils.BinarySource()
self.awgn_channel = sn.channel.AWGN()
# @tf.function # Enable graph execution to speed things up
def __call__(self, batch_size, ebno_db):
# no channel coding used; we set coderate=1.0
no = sn.utils.ebnodb2no(ebno_db,
num_bits_per_symbol=self.num_bits_per_symbol,
coderate=1.0)
bits = self.binary_source([batch_size, self.block_length]) # Blocklength set to 1024 bits
x = self.mapper(bits)
y = self.awgn_channel([x, no])
llr = self.demapper([y,no])
return bits, llr
```
We need first to instantiate the model.
```
model_uncoded_awgn = UncodedSystemAWGN(num_bits_per_symbol=NUM_BITS_PER_SYMBOL, block_length=1024)
```
Sionna provides a utility to easily compute and plot the bit error rate (BER).
```
EBN0_DB_MIN = -3.0 # Minimum value of Eb/N0 [dB] for simulations
EBN0_DB_MAX = 5.0 # Maximum value of Eb/N0 [dB] for simulations
BATCH_SIZE = 2000 # How many examples are processed by Sionna in parallel
ber_plots = sn.utils.PlotBER("AWGN")
ber_plots.simulate(model_uncoded_awgn,
ebno_dbs=np.linspace(EBN0_DB_MIN, EBN0_DB_MAX, 20),
batch_size=BATCH_SIZE,
num_target_block_errors=100, # simulate until 100 block errors occured
legend="Uncoded",
soft_estimates=True,
max_mc_iter=100, # run 100 Monte-Carlo simulations (each with batch_size samples)
show_fig=True);
```
The `sn.utils.PlotBER` object stores the results and allows to add additional simulations to the previous curves.
*Remark*: In Sionna, a block error is defined to happen if for two tensors at least one position in the last dimension differs (i.e., at least one bit wrongly received per codeword).
The bit error rate the total number of erroneous positions divided by the total number of transmitted bits.
## Forward Error Correction (FEC)
We now add channel coding to our transceiver to make it more robust against transmission errors. For this, we will use [5G compliant low-density parity-check (LDPC) codes and Polar codes](https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=3214).
You can find more detailed information in the notebooks [Bit-Interleaved Coded Modulation (BICM)](https://nvlabs.github.io/sionna/examples/Bit_Interleaved_Coded_Modulation.html) and [5G Channel Coding and Rate-Matching: Polar vs. LDPC Codes](https://nvlabs.github.io/sionna/examples/5G_Channel_Coding_Polar_vs_LDPC_Codes.html).
```
k = 12
n = 20
encoder = sn.fec.ldpc.LDPC5GEncoder(k, n)
decoder = sn.fec.ldpc.LDPC5GDecoder(encoder, hard_out=True)
```
Let us encode some random input bits.
```
BATCH_SIZE = 1 # one codeword in parallel
u = binary_source([BATCH_SIZE, k])
print("Input bits are: \n", u.numpy())
c = encoder(u)
print("Encoded bits are: \n", c.numpy())
```
One of the fundamental paradigms of Sionna is batch-processing.
Thus, the example above could be executed with for arbitrary batch-sizes to simulate `batch_size` codewords in parallel.
However, Sionna can do more - it supports *N*-dimensional input tensors and, thereby, allows the processing of multiple samples of multiple users and several antennas in a single command line.
Let's say we want to encoded `batch_size` codewords of length `n` for each of the `num_users` connected to each of the `num_basestations`.
This means in total we transmit `batch_size` * `n` * `num_users` * `num_basestations` bits.
```
BATCH_SIZE = 10 # samples per scenario
num_basestations = 4
num_users = 5 # users per basestation
n = 1000 # codeword length per transmitted codeword
coderate = 0.5 # coderate
k = int(coderate * n) # number of info bits per codeword
# instantiate a new encoder for codewords of length n
encoder = sn.fec.ldpc.LDPC5GEncoder(k, n)
# the decoder must be linked to the encoder (to know the exact code parameters used for encoding)
decoder = sn.fec.ldpc.LDPC5GDecoder(encoder,
hard_out=True, # binary output or provide soft-estimates
return_infobits=True, # or also return (decoded) parity bits
num_iter=20, # number of decoding iterations
cn_type="boxplus-phi") # also try "minsum" decoding
# draw random bits to encode
u = binary_source([BATCH_SIZE, num_basestations, num_users, k])
print("Shape of u: ", u.shape)
# We can immediately encode u for all users, basetation and samples
# This all happens with a single line of code
c = encoder(u)
print("Shape of c: ", c.shape)
print("Total number of processed bits: ", np.prod(c.shape))
```
This works for arbitrary dimensions and allows a simple extension of the designed system to multi-user or multi-antenna scenarios.
Let us now replace the LDPC code by a Polar code. The API remains similar.
```
k = 64
n = 128
encoder = sn.fec.polar.Polar5GEncoder(k, n)
decoder = sn.fec.polar.Polar5GDecoder(encoder,
dec_type="SCL") # you can also use "SCL"
```
*Advanced Remark:* The 5G Polar encoder/decoder class directly applies rate-matching and the additional CRC concatenation.
This is all done internally and transparent to the user.
In case you want to access low-level features of the Polar codes, please use `sionna.fec.polar.PolarEncoder` and the desired decoder (`sionna.fec.polar.PolarSCDecoder`, `sionna.fec.polar.PolarSCLDecoder` or `sionna.fec.polar.PolarBPDecoder`).
Further details can be found in the tutorial notebook on [5G Channel Coding and Rate-Matching: Polar vs. LDPC Codes](https://nvlabs.github.io/sionna/examples/5G_Channel_Coding_Polar_vs_LDPC_Codes.html).

```
class CodedSystemAWGN(Model): # Inherits from Keras Model
def __init__(self, num_bits_per_symbol, n, coderate):
super().__init__() # Must call the Keras model initializer
self.num_bits_per_symbol = num_bits_per_symbol
self.n = n
self.k = int(n*coderate)
self.coderate = coderate
self.constellation = sn.mapping.Constellation("qam", self.num_bits_per_symbol)
self.mapper = sn.mapping.Mapper(constellation=self.constellation)
self.demapper = sn.mapping.Demapper("app", constellation=self.constellation)
self.binary_source = sn.utils.BinarySource()
self.awgn_channel = sn.channel.AWGN()
self.encoder = sn.fec.ldpc.LDPC5GEncoder(self.k, self.n)
self.decoder = sn.fec.ldpc.LDPC5GDecoder(self.encoder, hard_out=True)
#@tf.function # activate graph execution to speed things up
def __call__(self, batch_size, ebno_db):
no = sn.utils.ebnodb2no(ebno_db, num_bits_per_symbol=self.num_bits_per_symbol, coderate=self.coderate)
bits = self.binary_source([batch_size, self.k])
codewords = self.encoder(bits)
x = self.mapper(codewords)
y = self.awgn_channel([x, no])
llr = self.demapper([y,no])
bits_hat = self.decoder(llr)
return bits, bits_hat
CODERATE = 0.5
BATCH_SIZE = 2000
model_coded_awgn = CodedSystemAWGN(num_bits_per_symbol=NUM_BITS_PER_SYMBOL,
n=2048,
coderate=CODERATE)
ber_plots.simulate(model_coded_awgn,
ebno_dbs=np.linspace(EBN0_DB_MIN, EBN0_DB_MAX, 15),
batch_size=BATCH_SIZE,
num_target_block_errors=500,
legend="Coded",
soft_estimates=False,
max_mc_iter=15,
show_fig=True,
forward_keyboard_interrupt=False);
```
As can be seen, the `BerPlot` class uses multiple stopping conditions and stops the simulation after no error occured at a specifc SNR point.
**Task**: Replace the coding scheme by a Polar encoder/decoder or a convolutional code with Viterbi decoding.
## Eager vs Graph Mode
So far, we have executed the example in *eager* mode.
This allows to run TensorFlow ops as if it was written NumPy and simplifies development and debugging.
However, to unleash Sionna's full performance, we need to activate *graph* mode which can be enabled with the function decorator *@tf.function()*.
We refer to [TensorFlow Functions](https://www.tensorflow.org/guide/function) for further details.
```
@tf.function() # enables graph-mode of the following function
def run_graph(batch_size, ebno_db):
# all code inside this function will be executed in graph mode, also calls of other functions
print(f"Tracing run_graph for values batch_size={batch_size} and ebno_db={ebno_db}.") # print whenever this function is traced
return model_coded_awgn(batch_size, ebno_db)
batch_size = 10 # try also different batch sizes
ebno_db = 1.5
# run twice - how does the output change?
run_graph(batch_size, ebno_db)
```
In graph mode, Python code (i.e., *non-TensorFlow code*) is only executed whenever the function is *traced*.
This happens whenever the input signature changes.
As can be seen above, the print statement was executed, i.e., the graph was traced again.
To avoid this re-tracing for different inputs, we now input tensors.
You can see that the function is now traced once for input tensors of same dtype.
See [TensorFlow Rules of Tracing](https://www.tensorflow.org/guide/function#rules_of_tracing) for details.
**Task:** change the code above such that tensors are used as input and execute the code with different input values. Understand when re-tracing happens.
*Remark*: if the input to a function is a tensor its signature must change and not *just* its value. For example the input could have a different size or datatype.
For efficient code execution, we usually want to avoid re-tracing of the code if not required.
```
# You can print the cached signatures with
print(run_graph.pretty_printed_concrete_signatures())
```
We now compare the throughput of the different modes.
```
repetitions = 4 # average over multiple runs
batch_size = BATCH_SIZE # try also different batch sizes
ebno_db = 1.5
# --- eager mode ---
t_start = time.perf_counter()
for _ in range(repetitions):
bits, bits_hat = model_coded_awgn(tf.constant(batch_size, tf.int32),
tf.constant(ebno_db, tf. float32))
t_stop = time.perf_counter()
# throughput in bit/s
throughput_eager = np.size(bits.numpy())*repetitions / (t_stop - t_start) / 1e6
print(f"Throughput in Eager mode: {throughput_eager :.3f} Mbit/s")
# --- graph mode ---
# run once to trace graph (ignored for throughput)
run_graph(tf.constant(batch_size, tf.int32),
tf.constant(ebno_db, tf. float32))
t_start = time.perf_counter()
for _ in range(repetitions):
bits, bits_hat = run_graph(tf.constant(batch_size, tf.int32),
tf.constant(ebno_db, tf. float32))
t_stop = time.perf_counter()
# throughput in bit/s
throughput_graph = np.size(bits.numpy())*repetitions / (t_stop - t_start) / 1e6
print(f"Throughput in graph mode: {throughput_graph :.3f} Mbit/s")
```
Let's run the same simulation as above in graph mode.
```
ber_plots.simulate(run_graph,
ebno_dbs=np.linspace(EBN0_DB_MIN, EBN0_DB_MAX, 12),
batch_size=BATCH_SIZE,
num_target_block_errors=500,
legend="Coded (Graph mode)",
soft_estimates=True,
max_mc_iter=100,
show_fig=True,
forward_keyboard_interrupt=False);
```
**Task:** TensorFlow allows to *compile* graphs with [XLA](https://www.tensorflow.org/xla). Try to further accelerate the code with XLA (`@tf.function(jit_compile=True)`).
*Remark*: XLA is still an experimental feature and not all TensorFlow (and, thus, Sionna) functions support XLA.
**Task 2:** Check the GPU load with `!nvidia-smi`. Find the best tradeoff between batch-size and throughput for your specific GPU architecture.
## Exercise
Simulate the coded bit error rate (BER) for a Polar coded and 64-QAM modulation.
Assume a codeword length of n = 200 and coderate = 0.5.
**Hint**: For Polar codes, successive cancellation list decoding (SCL) gives the best BER performance.
However, successive cancellation (SC) decoding (without a list) is less complex.
```
n = 200
coderate = 0.5
# *You can implement your code here*
```
| github_jupyter |
# Gap Framework - Natural Language Processing
## Syntax Module
<b>[Github] (https://github.com/andrewferlitsch/gap)</b>
# Document Preparation for NLP with Gap (Session 2)
Let's dig deeper into the basics. We will be using the <b style='color: saddlebrown'>SYNTAX</b> module in the **Gap** framework.
## <span style='color: saddlebrown'>Words</span> Object
Let's directly use the <b style='color: saddlebrown'>Words</b> object to control how the text is NLP preprocessed. We will cover the following:
- Syntax Preprocessing
- Text Reduction (Stopwords)
- Name Entity Recognition
- Parts of Speech Tagging
- De-Identification
- Measurement Extraction
```
import os
os.chdir("../")
!cd
#!ls #on linux
# import the Words class
from gapml.syntax import Words
```
### Syntax Preprocessing
The <b style='color: saddlebrown'>SYNTAX</b> module supports various keyword parameters to configure how the text is NLP preprocessed. We will cover just a few in this code-along. When the text is preprocessed, an ordered sequential list of <b style='color: saddlebrown'>Word</b> objects are generated; each consisting a set of key/value pairs.
In *bare* mode, all the text and punctuation is preserved, and no tagging, parts of speech (POS), stemming, lemmatization, name entity recognition (NER) or stopword removal is perform.
#### Bare
Let's look at the preprocessing of a simple sentence in *bare* mode.
```
# Process this well-known typing phrase which contains all 26 letters of the alphabet
w = Words('The quick brown fox jumped over the lazy dog.', bare=True)
print(w.words)
```
As you can see, the *words* property displays a list, where each entry is an object consisting of a word and tag key value pair. I know you don't know what the integer values of the tags mean (see Vocabulary.py). In bare mode, all words are tagged as UNTAGGED (0) and punctuation as PUNCT (23).
Note how in bare mode, all words are kept, their capitalization, order and punctuation.
#### Stopwords and Stemming
Let's do some text reduction. In NLP, a lot of things add very little to the understanding of the text, such as common words like 'the', 'and', 'a', and punctuation. Removing these common words is called stopword removal. There are several lists for doing this, the most common being the *Porter* list.
Additionallly, we can make it easier to match words if we lowercase all the words and remove word endings, such as plural and 'ing'; which is called stemming. Let's give it a try with the same sentence.
Note how words like 'the', and 'over' have been removed, the punctuation has been removed, words have been lowercased and 'jumped' has been stemmed to its root word 'jump'.
```
# Stem words using the NLTK Porter stemmer
w = Words('The quick brown fox jumped over the lazy dog.', stem='porter')
print(w.words)
```
Stemmers sometimes reduce words into something that isn't the root. Like 'riding' could end up being 'rid', after cutting off 'ing'. Note above how the *NLTK Porter* stemmer changed 'lazy' into 'lazi'.
Different stemmers have different errors. This can be corrected using a lemmatization. Let's repeat the above but use the **Gap** stemmer which has a lemmatizer correction.
```
# Stem words using the Gap stemmer
w = Words('The quick brown fox jumped over the lazy dog.', stem='gap')
print(w.words)
```
#### Gender Recognition
The <b style='color: saddlebrown'>Words</b> object will also recognize gender specific words. We will preprocess four different ways of saying 'father'. In each case, the tag will be set to MALE (15) and each word will be replaced (reduced) with its common equivalent 'father'.
```
# Let's recognize various forms of father
w = Words("dad daddy father papa", gender=True)
w.words
```
Let's now try a variety of words indicating the gender FEMALE (16). Note now 'mom' and 'mother' got reduced to the common equivalent 'mother', and the slang 'auntie' and 'sis' got reduced to 'aunt' and sister', respectively.
```
w = Words("girl lady mother mom auntie sis", gender=True)
w.words
```
#### NER (Name Entity Recognition)
The <b style='color: saddlebrown'>SYNTAX</b> module will recognize a wide variety of proper names, places and identification, such as a person's name (11), a social security number (9) a title (33), geographic location.
```
# Let's look at a string with a name, social security number, and title.
w = Words("Patient: Jim Jones, SSN: 123-12-1234. Dr. Nancy Lou", stopwords=True)
# Let's print the word list. Note that jim and jones are tagged 11 (Proper Name), 123121234 is tagged 9 (SSN), and
# Dr is tagged 33 (Title)
w.words
```
Let's now try an address. Nice, in our example we recognized (tagged) a street number (27), street direction (28), street name (29), street type (30), a secondary address unit (36), a city (31), a state (32) and postal code (34).
Both US and Canadian street and postal addresses are recognized. Note how the state name "Oregon" got replaced with its ISO international standard code.
```
w = Words("124 NE Main Ave, Apt #6, Portland, OR 97221", address=True)
w.words
```
#### De-Identification
The <b style='color: saddlebrown'>SYNTAX</b> module supports de-identification of the text. One can remove names, dates of birth, gender, social security number, telephone numbers and addresses.
```
# Let's remove any names and SSN from our text
w = Words("Patient: Jim Jones, SSN: 123-12-1234", name=False, ssn=False)
w.words
```
#### Measurements
The <b style='color: saddlebrown'>SYNTAX</b> module supports extracting measurement units, such as height, weight, speed, volume and quantity (38). You can also configure to convert measurements (25) to Standard or Metric system. A wide variety of acronyms and formats are recognized. Note that numbers are tagged as 1.
```
# Let's do height using ' for foot and " for inches
w = Words("Height: 5'7\"", stopwords=True)
w.words
# Let's do height using the acronym ft and in.
w = Words("Height: 5 ft 7 in", stopwords=True)
w.words
# Let's do height using the acronym ft and in, with no space between the value and unit
w = Words("Height: 5ft 7in", stopwords=True)
w.words
# Let's do an example in Standard and convert to Metric system.
w = Words("Weight is 120lbs", stopwords=True, metric=True)
w.words
```
## THAT'S ALL FOR SESSION 2
Look forward to seeing everyone again on session 3 where we will do some data preparation for computer vision.
| github_jupyter |
Saturation curves for SM-omics and ST<br>
Input files are generated by counting number of unique molecules and number of annotated reads per annotated region after adjusting for sequencing depth, in downsampled fastq files (proportions 0.001, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1) processed using ST-pipeline.<br>
```
%matplotlib inline
import os
import numpy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import glob
import warnings
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
warnings.filterwarnings('ignore')
def condition(row):
""" Takes row in pandas df as input and returns type of condition
"""
# The samples are run in triplicate based on condition
condition = ['HE', 'DAPI', 'Nestin']
if row['Name'] in ['10015CN108fl_D1', '10015CN108fl_D2', '10015CN108flfl_E2']:
return condition[2]
elif row['Name'] in ['10015CN90_C2', '10015CN90_D2', '10015CN90_E2']:
return condition[1]
elif row['Name'] in ['10015CN108_C2', '10015CN108_D2', '10015CN108_E1']:
return condition[0]
# Load input files
path = '../../smomics_data'
stats_list = []
samples_list = ['10015CN108fl_D2',
'10015CN108flfl_E2',
'10015CN108fl_D1',
'10015CN90_C2',
'10015CN90_D2',
'10015CN90_E2',
'10015CN108_C2',
'10015CN108_D2',
'10015CN108_E1']
prop_list = [0.001, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1]
for filename in samples_list:
cond_file = pd.read_csv(os.path.join(path, filename + '_umi_after_seq_depth_in_spots_under_outside_tissue.txt'), sep = '\t')
print(cond_file)
cond_file.sort_values(by='Num reads', inplace=True)
cond_file['Prop_annot_reads'] = prop_list
cond_file['Condition'] = cond_file.apply(lambda row: condition(row), axis = 1)
cond_file['norm uniq mol inside'] = cond_file['UMI inside']
cond_file['norm uniq mol outside'] = cond_file['UMI outside']
stats_list.append(cond_file)
# Concat all files
cond_merge = pd.concat(stats_list)
#Plot
fig = plt.figure(figsize=(20, 10))
x="Prop_annot_reads"
y="norm uniq mol inside"
#y="Genes"
hue='Condition'
################ LINE PLOT
ax = sns.lineplot(x=x, y=y, data=cond_merge,hue=hue,
palette = ['mediumorchid', 'goldenrod', 'blue'], hue_order = ['HE', 'DAPI', 'Nestin'],ci=95)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('k')
ax.spines['left'].set_color('k')
# X and y label size
ax.set_xlabel("Proportion annotated reads", fontsize=15)
ax.set_ylabel("Number of unique molecules under tissue", fontsize=15)
# Set ticks size
ax.tick_params(axis='y', labelsize=15)
ax.tick_params(axis='x', labelsize=15)
# change background color
back_c = 'white'
ax.set_facecolor(back_c)
ax.grid(False)
# Thousand seprator on y axis
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
# LEGEND
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[0:], labels=['HE', 'DAPI', 'Nestin'],loc='upper left', ncol=2, fontsize=20)
fig.set_size_inches(20, 10)
# plt.savefig("../../figures/saturation_sm_stainings_saturation.pdf", transparent=True, bbox_inches = 'tight',
# pad_inches = 0, dpi=1200)
plt.show()
cond_file['Prop_annot_reads'] = 100*cond_file['Prop_annot_reads']
#cond_merge.to_csv('../../smomics_data/sm_stainings_unique_molecules_under_outside_tissue.csv')
```
| github_jupyter |
# Federated Learning Training Plan: Host Plan & Model
Here we load Plan and Model params created earlier in "Create Plan" notebook, host them to PyGrid,
and run sample syft.js app that executes them.
```
%load_ext autoreload
%autoreload 2
import websockets
import json
import base64
import requests
import torch
import syft as sy
from syft.grid.grid_client import GridClient
from syft.serde import protobuf
from syft_proto.execution.v1.plan_pb2 import Plan as PlanPB
from syft_proto.execution.v1.state_pb2 import State as StatePB
sy.make_hook(globals())
# force protobuf serialization for tensors
hook.local_worker.framework = None
async def sendWsMessage(data):
async with websockets.connect('ws://' + gatewayWsUrl) as websocket:
await websocket.send(json.dumps(data))
message = await websocket.recv()
return json.loads(message)
def deserializeFromBin(worker, filename, pb):
with open(filename, "rb") as f:
bin = f.read()
pb.ParseFromString(bin)
return protobuf.serde._unbufferize(worker, pb)
```
## Step 4a: Host in PyGrid
Here we load "ops list" Plan.
PyGrid should translate it to other types (e.g. torchscript) automatically.
```
# Load files with protobuf created in "Create Plan" notebook.
training_plan = deserializeFromBin(hook.local_worker, "tp_full.pb", PlanPB())
model_params_state = deserializeFromBin(hook.local_worker, "model_params.pb", StatePB())
```
Follow PyGrid README.md to build `openmined/grid-gateway` image from the latest `dev` branch
and spin up PyGrid using `docker-compose up --build`.
```
# Default gateway address when running locally
gatewayWsUrl = "127.0.0.1:5000"
grid = GridClient(id="test", address=gatewayWsUrl, secure=False)
grid.connect()
```
Define name, version, configs.
```
# These name/version you use in worker
name = "mnist"
version = "1.0.0"
client_config = {
"name": name,
"version": version,
"batch_size": 64,
"lr": 0.01,
"max_updates": 100 # custom syft.js option that limits number of training loops per worker
}
server_config = {
"min_workers": 3, # temporarily this plays role "min # of worker's diffs" for triggering cycle end event
"max_workers": 3,
"pool_selection": "random",
"num_cycles": 5,
"do_not_reuse_workers_until_cycle": 4,
"cycle_length": 28800,
"minimum_upload_speed": 0,
"minimum_download_speed": 0
}
```
Shoot!
If everything's good, success is returned.
If the name/version already exists in PyGrid, change them above or cleanup PyGrid db by re-creating docker containers (e.g. `docker-compose up --force-recreate`).
```
response = grid.host_federated_training(
model=model_params_state,
client_plans={'training_plan': training_plan},
client_protocols={},
server_averaging_plan=None,
client_config=client_config,
server_config=server_config
)
print("Host response:", response)
```
Let's double-check that data is loaded by requesting a cycle.
(Request is made directly, will be methods on grid client in the future)
```
auth_request = {
"type": "federated/authenticate",
"data": {}
}
auth_response = await sendWsMessage(auth_request)
print('Auth response: ', json.dumps(auth_response, indent=2))
cycle_request = {
"type": "federated/cycle-request",
"data": {
"worker_id": auth_response['data']['worker_id'],
"model": name,
"version": version,
"ping": 1,
"download": 10000,
"upload": 10000,
}
}
cycle_response = await sendWsMessage(cycle_request)
print('Cycle response:', json.dumps(cycle_response, indent=2))
worker_id = auth_response['data']['worker_id']
request_key = cycle_response['data']['request_key']
model_id = cycle_response['data']['model_id']
training_plan_id = cycle_response['data']['plans']['training_plan']
```
Let's download model and plan (both versions) and check they are actually workable.
```
# Model
req = requests.get(f"http://{gatewayWsUrl}/federated/get-model?worker_id={worker_id}&request_key={request_key}&model_id={model_id}")
model_data = req.content
pb = StatePB()
pb.ParseFromString(req.content)
model_params_downloaded = protobuf.serde._unbufferize(hook.local_worker, pb)
print(model_params_downloaded)
# Plan "list of ops"
req = requests.get(f"http://{gatewayWsUrl}/federated/get-plan?worker_id={worker_id}&request_key={request_key}&plan_id={training_plan_id}&receive_operations_as=list")
pb = PlanPB()
pb.ParseFromString(req.content)
plan_ops = protobuf.serde._unbufferize(hook.local_worker, pb)
print(plan_ops.role.actions)
print(plan_ops.torchscript)
# Plan "torchscript"
req = requests.get(f"http://{gatewayWsUrl}/federated/get-plan?worker_id={worker_id}&request_key={request_key}&plan_id={training_plan_id}&receive_operations_as=torchscript")
pb = PlanPB()
pb.ParseFromString(req.content)
plan_ts = protobuf.serde._unbufferize(hook.local_worker, pb)
print(plan_ts.role.actions)
print(plan_ts.torchscript.code)
```
## Step 5a: Train
Start and open "with-grid" example in syft.js project (http://localhost:8080 by default),
enter model name and version and start FL training.
## Step 6a: Submit diff
This emulates submitting worker's diff (created earlier in Execute Plan notebook) to PyGrid.
After several diffs submitted, PyGrid will end the cycle and create new model checkpoint and cycle.
(Request is made directly, will be methods on grid client in the future)
```
with open("diff.pb", "rb") as f:
diff = f.read()
report_request = {
"type": "federated/report",
"data": {
"worker_id": auth_response['data']['worker_id'],
"request_key": cycle_response['data']['request_key'],
"diff": base64.b64encode(diff).decode("utf-8")
}
}
report_response = await sendWsMessage(report_request)
print('Report response:', json.dumps(report_response, indent=2))
```
| github_jupyter |
## Prerequisites
This notebook contains examples which are expected *to be run with exactly 4 MPI processes*; not because they wouldn't work otherwise, but simply because it's what their description assumes. For this, you need to:
* Install an MPI distribution on your system, such as OpenMPI, MPICH, or Intel MPI (if not already available).
* Install some optional dependencies, including `mpi4py` and `ipyparallel`; from the root Devito directory, run
```
pip install -r requirements-optional.txt
```
* Create an `ipyparallel` MPI profile, by running our simple setup script. From the root directory, run
```
./scripts/create_ipyparallel_mpi_profile.sh
```
## Launch and connect to an ipyparallel cluster
We're finally ready to launch an ipyparallel cluster. Open a new terminal and run the following command
```
ipcluster start --profile=mpi -n 4
```
Once the engines have started successfully, we can connect to the cluster
```
import ipyparallel as ipp
c = ipp.Client(profile='mpi')
```
In this tutorial, to run commands in parallel over the engines, we will use the %px line magic.
```
%%px --group-outputs=engine
from mpi4py import MPI
print(f"Hi, I'm rank %d." % MPI.COMM_WORLD.rank)
```
## Overview of MPI in Devito
Distributed-memory parallelism via MPI is designed so that users can "think sequentially" for as much as possible. The few things requested to the user are:
* Like any other MPI program, run with `mpirun -np X python ...`
* Some pre- and/or post-processing may be rank-specific (e.g., we may want to plot on a given MPI rank only, even though this might be hidden away in the next Devito releases, when newer support APIs will be provided.
* Parallel I/O (if and when necessary) to populate the MPI-distributed datasets in input to a Devito Operator. If a shared file system is available, there are a few simple alternatives to pick from, such as NumPy’s memory-mapped arrays.
To enable MPI, users have two options. Either export the environment variable `DEVITO_MPI=1` or, programmatically:
```
%%px --group-outputs=engine
from devito import configuration
configuration['mpi'] = True
%%px --block --group-outputs=engine
# Keep generated code as simple as possible
configuration['openmp'] = False
# Fix platform so that this notebook can be tested by py.test --nbval
configuration['platform'] = 'knl7210'
```
An `Operator` will then generate MPI code, including sends/receives for halo exchanges. Below, we introduce a running example through which we explain how domain decomposition as well as data access (read/write) and distribution work. Performance optimizations are discussed [in a later section](#Performance-optimizations).
Let's start by creating a `TimeFunction`.
```
%%px --group-outputs=engine
from devito import Grid, TimeFunction, Eq, Operator
grid = Grid(shape=(4, 4))
u = TimeFunction(name="u", grid=grid, space_order=2, time_order=0)
```
Domain decomposition is performed when creating a `Grid`. Users may supply their own domain decomposition, but this is not shown in this notebook. Devito exploits the MPI Cartesian topology abstraction to logically split the `Grid` over the available MPI processes. Since `u` is defined over a decomposed `Grid`, its data get distributed too.
```
%%px --group-outputs=engine
u.data
```
Globally, `u` consists of 4x4 points -- this is what users "see". But locally, as shown above, each rank has got a 2x2 subdomain. The key point is: **for the user, the fact that `u.data` is distributed is completely abstracted away -- the perception is that of indexing into a classic NumPy array, regardless of whether MPI is enabled or not**. All sort of NumPy indexing schemes (basic, slicing, etc.) are supported. For example, we can write into a slice-generated view of our data.
```
%%px --group-outputs=engine
u.data[0, 1:-1, 1:-1] = 1.
%%px --group-outputs=engine
u.data
```
The only limitation, currently, is that a data access cannot require a direct data exchange among two or more processes (e.g., the assignment `u.data[0, 0] = u.data[3, 3]` will raise an exception unless both entries belong to the same MPI rank).
We can finally write out a trivial `Operator` to try running something.
```
%%px --group-outputs=engine
#NBVAL_IGNORE_OUTPUT
op = Operator(Eq(u.forward, u + 1))
summary = op.apply(time_M=0)
```
And we can now check again the (distributed) content of our `u.data`
```
%%px --group-outputs=engine
u.data
```
Everything as expected. We could also peek at the generated code, because we may be curious to see what sort of MPI calls Devito has generated...
```
%%px --targets 0
print(op)
```
Hang on. There's nothing MPI-specific here! At least apart from the header file `#include "mpi.h"`. What's going on? Well, it's simple. Devito was smart enough to realize that this trivial `Operator` doesn't even need any sort of halo exchange -- the `Eq` implements a pure "map computation" (i.e., fully parallel), so it can just let each MPI process do its job without ever synchronizing with halo exchanges. We might want try again with a proper stencil `Eq`.
```
%%px --targets 0
op = Operator(Eq(u.forward, u.dx + 1))
print(op)
```
Uh-oh -- now the generated code looks more complicated than before, though it still is pretty much human-readable. We can spot the following routines:
* `haloupdate0` performs a blocking halo exchange, relying on three additional functions, `gather0`, `sendrecv0`, and `scatter0`;
* `gather0` copies the (generally non-contiguous) boundary data into a contiguous buffer;
* `sendrecv0` takes the buffered data and sends it to one or more neighboring processes; then it waits until all data from the neighboring processes is received;
* `scatter0` copies the received data into the proper array locations.
This is the simplest halo exchange scheme available in Devito. There are a few, and some of them apply aggressive optimizations, [as shown later on](#Performance-optimizations).
Before looking at other scenarios and performance optimizations, there is one last thing it is worth discussing -- the `data_with_halo` view.
```
%%px --group-outputs=engine
u.data_with_halo
```
This is again a global data view. The shown *with_halo* is the "true" halo surrounding the physical domain, **not** the halo used for the MPI halo exchanges (often referred to as "ghost region"). So it gets trivial for a user to initialize the "true" halo region (which is typically read by a stencil `Eq` when an `Operator` iterates in proximity of the domain bounday).
```
%%px --group-outputs=engine
u.data_with_halo[:] = 1.
%%px --group-outputs=engine
u.data_with_halo
```
## MPI and SparseFunction
A `SparseFunction` represents a sparse set of points which are generically unaligned with the `Grid`. A sparse point could be anywhere within a grid, and is therefore attached some coordinates. Given a sparse point, Devito looks at its coordinates and, based on the domain decomposition, **logically** assigns it to a given MPI process; this is purely logical ownership, as in Python-land, before running an Operator, the sparse point physically lives on the MPI rank which created it. Within `op.apply`, right before jumping to C-land, the sparse points are scattered to their logical owners; upon returning to Python-land, the sparse points are gathered back to their original location.
In the following example, we attempt injection of four sparse points into the neighboring grid points via linear interpolation.
```
%%px --group-outputs=engine
from devito import Function, SparseFunction
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
x, y = grid.dimensions
f = Function(name='f', grid=grid)
coords = [(0.5, 0.5), (1.5, 2.5), (1.5, 1.5), (2.5, 1.5)]
sf = SparseFunction(name='sf', grid=grid, npoint=len(coords), coordinates=coords)
```
Let:
* O be a grid point
* x be a halo point
* A, B, C, D be the sparse points
We show the global view, that is what the user "sees".
```
O --- O --- O --- O
| A | | |
O --- O --- O --- O
| | C | B |
O --- O --- O --- O
| | D | |
O --- O --- O --- O
```
And now the local view, that is what the MPI ranks own when jumping to C-land.
```
Rank 0 Rank 1
O --- O --- x x --- O --- O
| A | | | | |
O --- O --- x x --- O --- O
| | C | | C | B |
x --- x --- x x --- x --- x
Rank 2 Rank 3
x --- x --- x x --- x --- x
| | C | | C | B |
O --- O --- x x --- O --- O
| | D | | D | |
O --- O --- x x --- O --- O
```
We observe that the sparse points along the boundary of two or more MPI ranks are _duplicated_ and thus redundantly computed over multiple processes. However, the contributions from these points to the neighboring halo points are naturally ditched, so the final result of the interpolation is as expected. Let's convince ourselves that this is the case. We assign a value of $5$ to each sparse point. Since we are using linear interpolation and all points are placed at the exact center of a grid quadrant, we expect that the contribution of each sparse point to a neighboring grid point will be $5 * 0.25 = 1.25$. Based on the global view above, we eventually expect `f` to look like as follows:
```
1.25 --- 1.25 --- 0.00 --- 0.00
| | | |
1.25 --- 2.50 --- 2.50 --- 1.25
| | | |
0.00 --- 2.50 --- 3.75 --- 1.25
| | | |
0.00 --- 1.25 --- 1.25 --- 0.00
```
Let's check this out.
```
%%px
#NBVAL_IGNORE_OUTPUT
sf.data[:] = 5.
op = Operator(sf.inject(field=f, expr=sf))
summary = op.apply()
%%px --group-outputs=engine
f.data
```
## Performance optimizations
The Devito compiler applies several optimizations before generating code.
* Redundant halo exchanges are identified and removed. A halo exchange is redundant if a prior halo exchange carries out the same `Function` update and the data is not “dirty” yet.
* Computation/communication overlap, with explicit prodding of the asynchronous progress engine to make sure that non-blocking communications execute in background during the compute part.
* Halo exchanges could also be reshuffled to maximize the extension of the computation/communication overlap region.
To run with all these optimizations enabled, instead of `DEVITO_MPI=1`, users should set `DEVITO_MPI=full`, or, equivalently
```
%%px --group-outputs=engine
configuration['mpi'] = 'full'
```
We could now peek at the generated code to see that things now look differently.
```
%%px --group-outputs=engine
op = Operator(Eq(u.forward, u.dx + 1))
# Uncomment below to show code (it's quite verbose)
# print(op)
```
The body of the time-stepping loop has changed, as it now implements a classic computation/communication overlap scheme:
* `haloupdate0` triggers non-blocking communications;
* `compute0` executes the core domain region, that is the sub-region which doesn't require reading from halo data to be computed;
* `halowait0` wait and terminates the non-blocking communications;
* `remainder0`, which internally calls `compute0`, computes the boundary region requiring the now up-to-date halo data.
| github_jupyter |
```
import os
import jieba
import re
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
```
## 读数据
```
df=pd.read_csv('./data/biquge_2500.csv',encoding='UTF-8-sig')
df.head()
```
## 获得id和标题2id的字典
```
df['id']=np.array([df['link'][i][-9:-5] for i in range(len(df['link']))])
#df.drop(columns=['link'],inplace=True)
df.head()
t2id={}
id2t={}
for i in range(len(df)):
t2id[df.loc[i]['title']]=df.loc[i]['id']
id2t[df.loc[i]['id']]=df.loc[i]['title']
t2id['重生之极品赘婿']
df.set_index('id',inplace=True)
df.head()
```
## 描述信息分词,去停用词
```
def get_stopwords_list():
stopwords = [line.strip() for line in open('./stopwords.txt',encoding='UTF-8-sig').readlines()]
return stopwords
stopwords_list=get_stopwords_list()
stopwords_list[:5]
def remove_digits(input_str):
punc = u'0123456789.'
output_str = re.sub(r'[{}]+'.format(punc), '', input_str)
return output_str
def move_stopwords(sentence_list, stopwords_list):
out_list = []#results
for word in sentence_list:
if word not in stopwords_list:
if not remove_digits(word):
continue
if word != '\t':
out_list.append(word)
return ' '.join(out_list)
#df['split_title']=df['title'].map(lambda x:move_stopwords(list(jieba.cut(x)),stopwords_list))
df['discription']=df['discription'].astype('str')
df.head()
df['discription']=df['discription'].map(lambda x:move_stopwords(list(jieba.cut(x.replace(u'\xa0', u''))),stopwords_list))
df.head()
df['discription'][0]
```
## 向量化&计算余弦相似度
```
count = CountVectorizer()
count_matrix = count.fit_transform(df['discription'])
cosine_sim = cosine_similarity(count_matrix, count_matrix)
cosine_sim
indices = pd.Series(df.index)
indices[:5]
```
## 选择10部值得推荐的小说
```
def recommendations(title, cosine_sim = cosine_sim):
recommended_movies = []
# gettin the index of the movie that matches the title
idx = indices[indices == title].index[0]
# creating a Series with the similarity scores in descending order
score_series = pd.Series(cosine_sim[idx]).sort_values(ascending = False)
# getting the indexes of the 10 most similar movies
top_10_indexes = list(score_series.iloc[1:10].index)
# populating the list with the titles of the best 10 matching movies
for i in top_10_indexes:
recommended_movies.append(list(df.index)[i])
return recommended_movies
rec_list=recommendations('7260')
print('《'+id2t['7260']+"》"+'的相关推荐小说是:')
print()
for v in rec_list:
print(id2t[v])
df.head()
df['rec']=df.index.map(lambda x:' '.join([id2t[y] for y in recommendations(x)]))
df.head()
df.drop(columns=['discription'],inplace=True)
```
# 保存数据
```
df.to_csv("./results/biquge2500_results.csv",index=False, sep=',',encoding='utf-8')
```
| github_jupyter |
```
# Imports
import numpy as np
import torch
from phimal_utilities.data import Dataset
from phimal_utilities.data.burgers import BurgersDelta
from phimal_utilities.analysis import load_tensorboard
from DeePyMoD_SBL.deepymod_torch.library_functions import library_1D_in
from DeePyMoD_SBL.deepymod_torch.DeepMod import DeepModDynamic
from DeePyMoD_SBL.deepymod_torch.training import train_dynamic
from sklearn.linear_model import LassoLarsIC, LassoCV
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'svg'
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
np.random.seed(42)
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
v = 0.1
A = 1.0
# Making grid
x = np.linspace(-3, 4, 100)
t = np.linspace(0.5, 5.0, 50)
x_grid, t_grid = np.meshgrid(x, t, indexing='ij')
dataset = Dataset(BurgersDelta, v=v, A=A)
X_train, y_train, rand_idx = dataset.create_dataset(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1), n_samples=2000, noise=0.05, random=True, return_idx=True)
# Making data
estimator = LassoLarsIC(fit_intercept=False)
#estimator = LassoCV()
config = {'n_in': 2, 'hidden_dims': [30, 30, 30, 30, 30], 'n_out': 1, 'library_function':library_1D_in, 'library_args':{'poly_order':2, 'diff_order': 3}, 'sparsity_estimator': estimator}
model = DeepModDynamic(**config)
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.999), amsgrad=True)
train_dynamic(model, X_train, y_train, optimizer, 5000, log_dir='runs/testing/')
df_part1 = load_tensorboard('runs/testing/')
plt.semilogy(df_part1.index, df_part1['MSE_0'])
plt.semilogy(df_part1.index, df_part1['Regression_0'])
df_part1.keys()
plt.plot(df_part1.index, df_part1['L1_0'])
plt.ylim([1.3, 6])
coeff_keys = [key for key in df_part1.keys() if key[:5]=='coeff']
scaled_coeff_keys = [key for key in df_part1.keys() if key[:6]=='scaled']
coeff_keys
plt.plot(df_part1[coeff_keys])
plt.ylim([-2, 2])
plt.plot(df_part1[scaled_coeff_keys])
plt.ylim([-2, 2])
active_map = df_part1[coeff_keys][::100]
active_map[active_map!=0.0] = 1.0
sns.heatmap(active_map)
df_part1['L1_0'].idxmin()
model.constraints.sparsity_mask
model.constraints.coeff_vector
model.sparsity_estimator.coef_
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit_predict(np.abs(model.sparsity_estimator.coef_[:, None]))
train_dynamic(model, X_train, y_train, optimizer, 1000, loss_func_args={'patience': 50, 'initial_idx': 200}, log_dir='runs/part_3/')
model.constraints.sparsity_mask
model.constraints.coeff_vector
df_part2 = load_tensorboard('runs/part_3/')
#plt.semilogy(df_part2.index, df_part2['MSE_0'])
plt.semilogy(df_part2.index, df_part2['Regression_0'])
plt.plot(df_part2.index, df_part2['L1_0'])
```
| github_jupyter |
```
"""
A randomly connected network learning a sequence
This example contains a reservoir network of 500 neurons.
400 neurons are excitatory and 100 neurons are inhibitory.
The weights are initialized randomly, based on a log-normal distribution.
The network activity is stimulated with three different inputs (A, B, C).
The inputs are given in i a row (A -> B -> C -> A -> ...)
The experiment is defined in 'pelenet/experiments/sequence.py' file.
A log file, parameters, and plot figures are stored in the 'log' folder for every run of the simulation.
NOTE: The main README file contains some more information about the structure of pelenet
"""
# Load pelenet modules
from pelenet.utils import Utils
from pelenet.experiments.sequence import SequenceExperiment
# Official modules
import numpy as np
import matplotlib.pyplot as plt
# Overwrite default parameters (pelenet/parameters/ and pelenet/experiments/sequence.py)
parameters = {
# Experiment
'seed': 1, # Random seed
'trials': 10, # Number of trials
'stepsPerTrial': 60, # Number of simulation steps for every trial
# Neurons
'refractoryDelay': 2, # Refactory period
'voltageTau': 100, # Voltage time constant
'currentTau': 5, # Current time constant
'thresholdMant': 1200, # Spiking threshold for membrane potential
# Network
'reservoirExSize': 400, # Number of excitatory neurons
'reservoirConnPerNeuron': 35, # Number of connections per neuron
'isLearningRule': True, # Apply a learning rule
'learningRule': '2^-2*x1*y0 - 2^-2*y1*x0 + 2^-4*x1*y1*y0 - 2^-3*y0*w*w', # Defines the learning rule
# Input
'inputIsSequence': True, # Activates sequence input
'inputSequenceSize': 3, # Number of input clusters in sequence
'inputSteps': 20, # Number of steps the trace input should drive the network
'inputGenSpikeProb': 0.8, # Probability of spike for the generator
'inputNumTargetNeurons': 40, # Number of neurons activated by the input
# Probes
'isExSpikeProbe': True, # Probe excitatory spikes
'isInSpikeProbe': True, # Probe inhibitory spikes
'isWeightProbe': True # Probe weight matrix at the end of the simulation
}
# Initilizes the experiment, also initializes the log
# Creating a new object results in a new log entry in the 'log' folder
# The name is optional, it is extended to the folder in the log directory
exp = SequenceExperiment(name='random-network-sequence-learning', parameters=parameters)
# Instantiate the utils singleton
utils = Utils.instance()
# Build the network, in this function the weight matrix, inputs, probes, etc. are defined and created
exp.build()
# Run the network simulation, afterwards the probes are postprocessed to nice arrays
exp.run()
# Weight matrix before learning (randomly initialized)
exp.net.plot.initialExWeightMatrix()
# Plot distribution of weights
exp.net.plot.initialExWeightDistribution(figsize=(12,3))
# Plot spike trains of the excitatory (red) and inhibitory (blue) neurons
exp.net.plot.reservoirSpikeTrain(figsize=(12,6), to=600)
# Weight matrix after learning
exp.net.plot.trainedExWeightMatrix()
# Sorted weight matrix after learning
supportMask = utils.getSupportWeightsMask(exp.net.trainedWeightsExex)
exp.net.plot.weightsSortedBySupport(supportMask)
```
| github_jupyter |
# Transformação de Fontes
Jupyter Notebook desenvolvido por [Gustavo S.S.](https://github.com/GSimas)
**Transformação de fontes é o processo de substituir uma fonte de tensão
vs em série com um resistor R por uma fonte de corrente is em paralelo com
um resistor R, ou vice-versa.**
Assim como na transformação estrela-triângulo, uma
transformação de fontes não afeta a parte remanescente do circuito.

Portanto, a transformação de fontes requer que
\begin{align}
{\Large v_s = i_sR}
\\
\\{\Large i_s = \frac{v_s}{R}}
\end{align}
A transformação de fontes também se aplica a fontes dependentes, desde
que tratemos adequadamente a variável dependente.

**Exemplo 4.6**
Use transformação de fontes para determinar vo no circuito da Figura 4.17.

```
print("Exemplo 4.6")
#trasforma fonte 1 (corrente -> tensao)
#vs1 = is*R = 12V
#Req em serie entre 4 e 2
#Req1 = 4 + 2 = 6
#transforma fonte 2 (tensao -> corrente)
#is2 = 12/3 = 4A
#transforma fonte 1 (tensao -> corrente)
#is1 = 12/6 = 2A
#Req paralelo entre 6 e 3
#Req2 = 6*3/(6 + 3) = 2
#fonte resultante
#ir = is2 - is1 = 4 - 2 = 2A
#transforma fonte 2 (corrente -> tensao)
#vs2 = Req2*ir = 2 * 2 = 4V
#divisor tensao
#v0 = vs2*8/(8 + Req2)
v0 = 4*8/(8 + 2)
print("Tensao v0",v0,"V")
```
**Problema Prático 4.6**
Determine io no circuito da Figura 4.19 usando transformação de fontes.

```
print("Problema Prático 4.6")
#Req serie 4 e 1 = 5
#Req paralelo 6 e 3 = 2
#transforma fonte 1 (corrente -> tensao)
#vs1 = R*is1 = 5*2 = 10V
#soma fonte 1 e 2 = 5 + 10 = 15V
#transforma fonte soma (tensao -> corrente)
#is = 15/2 = 7,5A
#Req paralelo 5 e 2 = 10/7
#soma fonte corrente = 7,5 + 3 = 10,5 A
#divisor corrente
i0 = 10.5*(10/7)/((10/7) + 7)
print("Corrente i0:",i0,"A")
```
**Exemplo 4.7**
Determine vx na Figura 4.20 usando transformação de fontes.

```
print("Exemplo 4.7")
#transforma fonte 1 (tensao -> corrente)
#is1 = 6/2 = 3 A
#transforma fonte dep. (corrente -> tensao)
#vs_dep = 0.25Vx * 4 = Vx
#soma fonte dep. e fonte 2 = 18 + Vx
#Req paralelo 2 e 2 = 1
#transforma fontes soma (tensao -> corrente)
#is_soma = 18/4 + Vx/4
#soma fontes = 18/4 + Vx/4 + 3 = 30/4 + Vx/4 = (30 + Vx)/4
#transforma fontes soma (corrente -> tensao)
#fonte resultante = ((30 + Vx)/4)*4 = 30 + Vx
#LKT
#(30 + Vx) - 4*ix - Vx = 0
#ix = (30 + Vx)/5 = 6 + Vx/5
#30 - 24 - 4Vx/5 = 0
vx = 6*5/4
print("Tensão Vx",vx,"V")
```
**Problema Prático 4.7**
Use transformação de fontes para determinar ix no circuito exposto na Figura 4.22.

```
print("Problema Prático 4.7")
#transforma fonte dep. (tensao -> corrente)
#is_dep = 2ix/5
#soma fontes = 0.024 - 2ix
#divisor corrente
#ix = (24m - 2ix)*5/(5 + 10)
#ix = (0.12 - 10ix)/15
#ix + 2ix/3 = 0.008
#5ix/3 = 0.008
ix = 0.008*3/5
print("Corrente ix:",ix,"A")
```
# Teorema de Thèvenin
**O teorema de Thévenin afirma que um circuito linear de dois terminais
pode ser substituído por um circuito equivalente formado por uma fonte
de tensão VTh em série com um resistor RTh, onde VTh é a tensão de circuito
aberto nos terminais e RTh, a resistência de entrada ou equivalente nos
terminais quando as fontes independentes forem desativadas.**
O teorema de Thévenin é muito importante na análise de circuitos, porque
ajuda a simplificar um circuito, e um circuito grande pode ser substituído por
uma única fonte de tensão independente e um único resistor.

Para tanto, suponha
que os dois circuitos da Figura 4.23 sejam equivalentes – dois circuitos são
ditos equivalentes se tiverem a mesma relação tensão-corrente em seus terminais. Se
os terminais a-b forem tornados um circuito aberto (eliminando-se a carga),
nenhuma corrente fluirá e, portanto, a tensão nos terminais a-b da Figura 4.23a
terá de ser igual à fonte de tensão VTh da Figura 4.23b, já que os dois circuitos
são equivalentes. Logo:
\begin{align}
{\Large V_{Th} = v_{oc}}
\end{align}
A resistência de entrada (ou
resistência equivalente) do circuito inativo nos terminais a-b da Figura 4.23a
deve ser igual a RTh da Figura 4.23b, pois os dois circuitos são equivalentes.
Portanto, RTh é a resistência de entrada nos terminais quando as fontes
independentes forem desligadas. Logo:
\begin{align}
{\Large R_{Th} = R_{oc}}
\end{align}

- **Caso 1:** Se a rede não tiver fontes dependentes, **desligamos todas as fontes independentes**. RTh é a resistência de entrada da rede, olhando-se entre os terminais a e b.
- **Caso 2:** Se a rede tiver fontes dependentes, **desligamos todas as fontes independentes**. As fontes dependentes não devem ser desligadas, pois elas são controladas por variáveis de circuito. Aplicamos uma tensão vo aos terminais a e b, e determinamos a corrente resultante io. Então, RTh = vo/io. De forma alternativa, poderíamos inserir uma fonte de corrente io nos terminais a e b, como na Figura 4.25b, e encontrar a tensão entre os terminais vo. Chegamos novamente a RTh = vo/io. Qualquer um dos dois métodos leva ao mesmo resultado. Em ambos os métodos, podemos supor qualquer valor de vo e io. Poderíamos usar, por exemplo, vo = 1 V ou io = 1 A, ou até mesmo valores não especificados de vo ou io.

Muitas vezes, pode ocorrer de RTh assumir um valor negativo; nesse caso,
a resistência negativa (v = –iR) implica o fato de o circuito estar **fornecendo
energia.**
**Exemplo 4.8**
Determine o circuito equivalente de Thévenin do circuito mostrado na Figura 4.27,
à esquerda dos terminais a-b. Em seguida, determine a corrente através de RL = 6 Ω,
16 Ω e 36 Ω.

```
print("Exemplo 4.8")
#Req1 = 4*12/(4 + 12) = 48/16 = 3
#Rth = 3 + 1 = 4
#transforma fonte 1 (tensao -> corrente)
#is1 = 32/4 = 8 A
#soma fontes = 8 + 2 = 10 A
#ix = 10*4/(4 + 12) = 40/16 = 5/2
#Vab = 12*(5/2) = 30 = Vth
Vth = 30
Rth = 4
Rl = 6
Il = Vth/(Rl + Rth)
print("Para RL = 6, Corrente:",Il,"A")
Rl = 16
Il = Vth/(Rl + Rth)
print("Para RL = 6, Corrente:",Il,"A")
Rl = 36
Il = Vth/(Rl + Rth)
print("Para RL = 6, Corrente:",Il,"A")
```
**Problema Prático 4.8**
Usando o teorema de Thévenin, determine o circuito equivalente à esquerda dos terminais do circuito da Figura 4.30. Em seguida, determine I.

```
print("Problema Prático 4.8")
#Req1 = 6 + 6 = 12
#Rth = Req1*4/(Req1 + 4) = 48/16 = 3
Rth = 3
#Superposicao Vsource
#Vab1 = Vs*4/(4 + 6 + 6) = 12*4/16 = 3V
#Superposicao Csource
#Iab = Is*6/(4 + 6 + 6) = 2*6/16 = 3/4
#Vab2 = Iab*4 = 3V
#Vth = Vab1 + Vab2
Vth = 6
I = Vth/(Rth + 1)
print("Tensao Vth:",Vth,"V")
print("Resistencia Rth:",Rth)
print("Corrente I:",I,"A")
```
**Exemplo 4.9**
Determine o equivalente de Thévenin do circuito da Figura 4.31.

```
print("Exemplo 4.9")
import numpy as np
#Descobrir Rth - desliga fontes indep., nao se alteram fontes dep.
#Aplicar tensao vo arbitraria entre terminais a b
#vo = 1 V
#Analise de malhas
#-2Vx + 2(i1 - i2) = 0
#Vx = i1 - i2
#Vx = -4i2
#i1 + 3i2 = 0
#-Vx + 2(i2 - i1) + 6(i2 - i3) = 0
#2i2 - 2i1 + 6i2 - 6i3 = Vx
#-3i1 + 9i2 - 6i3 = 0
#-i1 + 3i2 - 2i3 = 0
#Vo + 6(i3 - i2) + 2i3 = 0
#6i3 - 6i2 + 2i3 = -1
#-6i2 + 8i3 = -1
coef = np.matrix("1 3 0;-1 3 -2;0 -6 8")
res = np.matrix("0;0;-1")
I = np.linalg.inv(coef)*res
#i3 = -i0
io = -I[2]
#Rth = Vo/io
Rth = 1/io
print("Resistencia Rth:",float(Rth))
#Descobrir Vth
#Analise de tensao em terminais a b
#Analise de Malhas
#i1 = 5 A
#-2Vx + 2(i2 - i3) = 0
#Vx = i2 - i3
#Vx = 4(5 - i3) = 20 - 4i3
#i2 + 3i3 = 20
#4(i3 - 5) + 2(i3 - i2) + 6i3 = 0
#4i3 +2i3 - 2i2 + 6i3 = 20
#-2i2 + 12i3 = 20
#-i2 + 6i3 = 10
coef = np.matrix("1 3;-1 6")
res = np.matrix("20;10")
I = np.linalg.inv(coef)*res
Vth = 6*I[1]
print("Tensão Vth:",float(Vth),"V")
```
**Problema Prático 4.9**
Determine o equivalente de Thévenin do circuito da Figura 4.34 à esquerda dos terminais.

```
print("Problema Prático 4.9")
#Descobrir Rth
#Vo = 1V
#Analise Nodal
#i1 - Ix/2 = 0
#v1/5 - Ix/2 = 0
#Ix = (v1 - 1)/3
#v1/5 - (v1 - 1)/6 = 0
#v1/5 - v1/6 = -1/6
#v1/30 = -1/6
#v1 = -5
#Ix = (v1 - 1)/3 = -6/3 = -2 A
#i2 = 1/4 A
#io = -Ix + i2 = 9/4 A
#Rth = 1/(9/4) = 4/9
Rth = 4/9
print("Resistencia Rth:",Rth)
#Descobrir Vth
#Analise de Malhas
#-6 + 5i1 + 3Ix + 4Ix = 0
#5i1 + 7Ix = 6
#3Ix/2 + i1 = Ix
#Ix/2 + i1 = 0
#2i1 + Ix = 0
coef = np.matrix("5 7;2 1")
res = np.matrix("6;0")
I = np.linalg.inv(coef)*res
Ix = float(I[1])
Vth = 4*Ix
print("Tensão Vth:",Vth,"V")
```
**Exemplo 4.10**
Determine o equivalente de Thévenin do circuito da Figura 4.35a nos terminais a-b.

```
print("Exemplo 4.10")
#vab = -vo = -1 V
#i1 = 1/4 A
#ix = 1/2 A
#i0 = 2ix - ix - i1 = 1 - 1/2 - 1/4 = 1/4 A
#Rth = -1/(1/4) = -4
Rth = -4
print("Resistencia Rth:", Rth)
print("Tensao Vth:",0,"V")
```
**Problema Prático 4.10**
Obtenha o equivalente de Thévenin do circuito da Figura 4.36.

```
print("Problema Prático 4.10")
#iab = 1 A
#-vx + 10i1 + 4vx + 15(i1 - iab) = 0
#3vx + 25i1 - 15iab = 0
#vx = -5i1
#-15i1 + 25i1 = 15
#10i1 = 15
#i1 = 1,5 A = 3/2 A
#vx = -5i1 = -7,5 V = -15/2 V
#vdep = 4*vx = -30V
#vab = vo = 15(i1 - iab) = 15/2 = 7,5V
#Rth = vo/(-iab) = -7,5
Rth = -7.5
print("Tensao Vth:",0,"V")
print("Resistencia Rth",Rth)
```
| github_jupyter |
# Computingthe mean of a bunch of images:
```
# computing statistics:
import torch
from torchvision import transforms, datasets
import numpy as np
import time
unlab_ddset = datasets.ImageFolder('./surrogate_dataset/unlab_dataset_055/train_set/',
transform = transforms.Compose([transforms.ToTensor()]))
unlab_loader = torch.utils.data.DataLoader(unlab_ddset,
batch_size = 20,
shuffle = True,
) # iterating over the DataLoader gives the tuple (input, target)
def compute_mean(loader):
mean = [0, 0, 0]
std = [0, 0, 0]
for i, (images, targets) in enumerate(unlab_loader):
mean0, mean1, mean2 = (0.0, 0.0, 0.0)
std0, std1, std2 = (0.0, 0.0, 0.0)
for num, t in enumerate(images):
mean0 += t[0].mean()
mean1 += t[1].mean()
mean2 += t[2].mean()
std0 += t[0].std()
std1 += t[1].std()
std2 += t[2].std()
mean[0] += mean0/num
mean[1] += mean1/num
mean[2] += mean2/num
std[0] += std0/num
std[1] += std1/num
std[2] += std2/num
return ([x / i for x in mean], [x / i for x in std])
st = time.time()
mean, std = compute_mean(unlab_loader)
end = time.time()
print 'Time to compute the statistics: ' + str(end-st)
print "Mean of xxx random images transformed 100 each:"
print mean
print std
# computing statistics:
import torch
from torchvision import transforms, datasets
import numpy as np
import time
unlab_ddset = datasets.ImageFolder('./surrogate_dataset/unlab_dataset007/data/',
transform = transforms.Compose([transforms.ToTensor()]))
unlab_loader = torch.utils.data.DataLoader(unlab_ddset,
batch_size = 20,
shuffle = True,
) # iterating over the DataLoader gives the tuple (input, target)
def compute_mean(loader):
mean = [0, 0, 0]
for i, (images, targets) in enumerate(unlab_loader):
mean0, mean1, mean2 = (0, 0, 0)
for num, t in enumerate(images):
mean0 += t[0].mean()
mean1 += t[1].mean()
mean2 += t[2].mean()
mean[0] += mean0/num
mean[1] += mean1/num
mean[2] += mean2/num
return [x / i for x in mean]
st = time.time()
mean = compute_mean(unlab_loader)
end = time.time()
print 'Time to compute the statistics: ' + str(end-st)
print "Mean of xxx random images transformed 100 each:"
print mean
```
# Checking how the normalization affects the images:
```
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from matplotlib import pyplot as plt
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import time
import numpy as np
from PIL import Image
experiment = '002_6'
path = '../saving_model/alexNet' + str(experiment) + '.pth.tar'
#print path
normalize = transforms.Normalize(mean = [0.6128879173491645, 0.6060359745417173, 0.5640660479324938],
std=[1, 1, 1])
batch_size = 100
unlab_ddset = datasets.ImageFolder('./surrogate_dataset/unlab_train/',
transform = transforms.Compose([transforms.ToTensor()]))
unlab_loader = torch.utils.data.DataLoader(unlab_ddset,
batch_size = batch_size,
shuffle = True,
)
for i, data in enumerate(unlab_loader):
break
# data loaded with the pytorch loader and no normalization
type(data[0]), type(data[1]), data[1][5], data[0][5].max(), data[0][5].min(), data[0][5].mean()
experiment = '002_6'
path = '../saving_model/alexNet' + str(experiment) + '.pth.tar'
#print path
normalize = transforms.Normalize(mean = [0.6128879173491645, 0.6060359745417173, 0.5640660479324938],
std=[1, 1, 1])
batch_size = 100
unlab_ddset = datasets.ImageFolder('./surrogate_dataset/unlab_train/',
transform = transforms.Compose([transforms.ToTensor(), normalize]))
unlab_loader = torch.utils.data.DataLoader(unlab_ddset,
batch_size = batch_size,
shuffle = True,
)
for i, data in enumerate(unlab_loader):
break
# data loaded with the pytorch loader and normalization like follows:
# (mean = [0.6128879173491645, 0.6060359745417173, 0.5640660479324938], std=[1, 1, 1])
type(data[0]), type(data[1]), data[1][5], data[0][5].max(), data[0][5].min(), data[0][5].mean()
```
| github_jupyter |
### Bouns: Difference of proportions
Another simple way to calculate distinctive words in two texts is to calculate the words with the highest and lowest difference or proportions. In theory frequent words like 'the' and 'of' will have a small difference. In practice this doesn't happen.
To demonstrate this we will run a difference of proportion calculation on *Pride and Prejudice* and *A Garland for Girls*.
To get the text in shape for scikit-learn we need to creat a list object with each novel as an element in a list. We'll use the append function to do this.
```
import pandas
from sklearn.feature_extraction.text import CountVectorizer
text_list = []
#open and read the novels, save them as variables
austen_string = open('../Data/Austen_PrideAndPrejudice.txt', encoding='utf-8').read()
alcott_string = open('../Data/Alcott_GarlandForGirls.txt', encoding='utf-8').read()
#append each novel to the list
text_list.append(austen_string)
text_list.append(alcott_string)
print(text_list[0][:100])
```
Create a DTM from these two novels, force it into a pandas DF, and inspect the output:
```
countvec = CountVectorizer()
novels_df = pandas.DataFrame(countvec.fit_transform(text_list).toarray(), columns=countvec.get_feature_names())
novels_df
```
Notice the number of rows and columns.
Question: What does this mean?
Next, we need to get a word frequency count for each novel, which we can do by summing across the entire row. Note how the syntax is different here compared to when we summed one column across all rows.
```
novels_df['word_count'] = novels_df.sum(axis=1)
novels_df
```
Next we divide each frequency cell by the word count. This syntax gets a bit tricky, so let's walk through it.
```
novels_df = novels_df.iloc[:,:].div(novels_df.word_count, axis=0)
novels_df
```
Finally, we subtract one row from another, and add the output as a third row.
```
novels_df.loc[2] = novels_df.loc[0] - novels_df.loc[1]
novels_df
```
We can sort based of the values of this row
```
novels_df.loc[2].sort_values(ascending=False)
```
Stop words are still in there. Why?
We can, of course, manually remove stop words. This does successfully identify distinctive content words.
We can do this in the CountVectorizer step, by setting the correct option.
```
#change stop_words option to 'english
countvec_sw = CountVectorizer(stop_words="english")
#same as code above
novels_df_sw = pandas.DataFrame(countvec_sw.fit_transform(text_list).toarray(), columns=countvec_sw.get_feature_names())
novels_df_sw['word_count'] = novels_df_sw.sum(axis=1)
novels_df_sw = novels_df_sw.iloc[:,0:].div(novels_df_sw.word_count, axis=0)
novels_df_sw.loc[2] = novels_df_sw.loc[0] - novels_df_sw.loc[1]
novels_df_sw.loc[2].sort_values(axis=0, ascending=False)
```
We can also do this by setting the max_df option (maximum document frequency) to either an absolute value, or a decimal between 0 and 1. An absolute value indicate that if the word occurs in more documents than the stated value, that word **will not** be included in the DTM. A decimal value will do the same, but proportion of documents.
Question: In the case of this corpus, what does setting the max_df value to 1 do? What output do you expect?
```
#Change max_df option to 1
countvec_freq = CountVectorizer(max_df=1)
#same as the code above
novels_df_freq = pandas.DataFrame(countvec_freq.fit_transform(text_list).toarray(), columns=countvec_freq.get_feature_names())
novels_df_freq['word_count'] = novels_df_freq.sum(axis=1)
novels_df_freq = novels_df_freq.iloc[:,0:].div(novels_df_freq.word_count, axis=0)
novels_df_freq.loc[2] = novels_df_freq.loc[0] - novels_df_freq.loc[1]
novels_df_freq.loc[2].sort_values(axis=0, ascending=False)
```
Question: What would happen if we set the max_df to 2, in this case?
Question: What might we do for the music reviews dataset?
### Exercise:
Use the difference of proportions calculation to compare two genres, or two artists, in the music reviews dataset. There are many ways you can do this. Think through the problem in steps.
| github_jupyter |
# Experimental design and pattern estimation
This week's lab will be about the basics of pattern analysis of (f)MRI data. We assume that you've worked through the two Nilearn tutorials already.
Functional MRI data are most often stored as 4D data, with 3 spatial dimensions ($X$, $Y$, and $Z$) and 1 temporal dimension ($T$). But most pattern analyses assume that data are formatted in 2D: trials ($N$) by patterns (often a subset of $X$, $Y$, and $Z$). Where did the time dimension ($T$) go? And how do we "extract" the patterns of the $N$ trials? In this lab, we'll take a look at various methods to estimate patterns from fMRI time series. Because these methods often depend on your experimental design (and your research question, of course), the first part of this lab will discuss some experimental design considerations. After this more theoretical part, we'll dive into how to estimate patterns from fMRI data.
**What you'll learn**: At the end of this tutorial, you ...
* Understand the most important experimental design factors for pattern analyses;
* Understand and are able to implement different pattern estimation techniques
**Estimated time needed to complete**: 8-12 hours
```
# We need to limit the amount of threads numpy can use, otherwise
# it tends to hog all the CPUs available when using Nilearn
import os
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
import numpy as np
```
## Experimental design
Before you can do any fancy machine learning or representational similarity analysis (or any other pattern analysis), there are several decisions you need to make and steps to take in terms of study design, (pre)processing, and structuring your data. Roughly, there are three steps to take:
1. Design your study in a way that's appropriate to answer your question through a pattern analysis; this, of course, needs to be done *before* data acquisition!
2. Estimate/extract your patterns from the (functional) MRI data;
3. Structure and preprocess your data appropriately for pattern analyses;
While we won't go into all the design factors that make for an *efficient* pattern analysis (see [this article](http://www.sciencedirect.com/science/article/pii/S105381191400768X) for a good review), we will now discuss/demonstrate some design considerations and how they impact the rest of the MVPA pipeline.
### Within-subject vs. between-subject analyses
As always, your experimental design depends on your specific research question. If, for example, you're trying to predict schizophrenia patients from healthy controls based on structural MRI, your experimental design is going to be different than when you, for example, are comparing fMRI activity patterns in the amygdala between trials targeted to induce different emotions. Crucially, with *design* we mean the factors that you as a researcher control: e.g., which schizophrenia patients and healthy control to scan in the former example and which emotion trials to present at what time. These two examples indicate that experimental design considerations are quite different when you are trying to model a factor that varies *between subjects* (the schizophrenia vs. healthy control example) versus a factor that varies *within subjects* (the emotion trials example).
<div class='alert alert-warning'>
<b>ToDo/ToThink</b> (1.5 points): before continuing, let's practice a bit. For the three articles below, determine whether they used a within-subject or between-subject design.<br>
<ol>
<li><a href="https://www.nature.com/articles/nn1444">https://www.nature.com/articles/nn1444</a> (machine learning based)</li>
<li><a href="http://www.jneurosci.org/content/33/47/18597.short">http://www.jneurosci.org/content/33/47/18597.short</a> (RSA based)</li>
<li><a href="https://www.sciencedirect.com/science/article/pii/S1053811913000074">https://www.sciencedirect.com/science/article/pii/S1053811913000074</a> (machine learning based)</li>
</ol>
Assign either 'within' or 'between' to the variables corresponding to the studies above (i.e., <tt>study_1</tt>, <tt>study_2</tt>, <tt>study_3</tt>).
</div>
```
''' Implement the ToDo here. '''
study_1 = '' # fill in 'within' or 'between'
study_2 = '' # fill in 'within' or 'between'
study_3 = '' # fill in 'within' or 'between'
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
for this_study in [study_1, study_2, study_3]:
if not this_study: # if empty string
raise ValueError("You haven't filled in anything!")
else:
if this_study not in ['within', 'between']:
raise ValueError("Fill in either 'within' or 'between'!")
print("Your answer will be graded by hidden tests.")
```
Note that, while we think it is a useful way to think about different types of studies, it is possible to use "hybrid" designs and analyses. For example, you could compare patterns from a particular condition (within-subject) across different participants (between-subject). This is, to our knowledge, not very common though, so we won't discuss it here.
<div class='alert alert-info'>
<b>ToThink</b> (1 point)<br>
Suppose a researcher wants to implement a decoding analysis in which he/she aims to predict schizophrenia (vs. healthy control) from gray-matter density patterns in the orbitofrontal cortex. Is this an example of a within-subject or between-subject pattern analysis? Can it be either one? Why (not)?
</div>
YOUR ANSWER HERE
That said, let's talk about something that is not only important for univariate MRI analyses, but also for pattern-based multivariate MRI analyses: confounds.
### Confounds
For most task-based MRI analyses, we try to relate features from our experiment (stimuli, responses, participant characteristics; let's call these $\mathbf{S}$) to brain features (this is not restricted to "activity patterns"; let's call these $\mathbf{R}$\*). Ideally, we have designed our experiment that any association between our experimental factor of interest ($\mathbf{S}$) and brain data ($\mathbf{R}$) can *only* be due to our experimental factor, not something else.
If another factor besides our experimental factor of interest can explain this association, this "other factor" may be a *confound* (let's call this $\mathbf{C}$). If we care to conclude anything about our experimental factor of interest and its relation to our brain data, we should try to minimize any confounding factors in our design.
---
\* Note that the notation for experimental variables ($\mathbf{S}$) and brain features ($\mathbf{R}$) is different from what we used in the previous course, in which we used $\mathbf{X}$ for experimental variables and $\mathbf{y}$ for brain signals. We did this to conform to the convention to use $\mathbf{X}$ for the set of independent variables and $\mathbf{y}$ for dependent variables. In some pattern analyses (such as RSA), however, this independent/dependent variable distintion does not really apply, so that's why we'll stick to the more generic $\mathbf{R}$ (for brain features) and $\mathbf{S}$ (for experimental features) terms.
<div class='alert alert-success'>
<b>Note</b>: In some situations, you may only be interested in maximizing your explanatory/predictive power; in that case, you could argue that confounds are not a problem. The article by <a href="https://www.sciencedirect.com/science/article/pii/S1053811917306523"> Hebart & Baker (2018)</a> provides an excellent overview of this issue.
</div>
Statistically speaking, you should design your experiment in such a way that there are no associations (correlations) between $\mathbf{R}$ and $\mathbf{C}$, such that any association between $\mathbf{S}$ and $\mathbf{R}$ can *only* be due to $\mathbf{R}$. Note that this is not trivial, because this presumes that you (1) know which factors might confound your study and (2) if you know these factors, that they are measured properly ([Westfall & Yarkoni, 2016)](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0152719)).
Minimizing confounds in between-subject studies is notably harder than in within-subject designs, especially when dealing with clinical populations that are hard to acquire, because it is simply easier to experimentally control within-subject factors (especially when they are stimulus- rather than response-based). There are ways to deal with confounds post-hoc, but ideally you prevent confounds in the first place. For an overview of confounds in (multivariate/decoding) neuroimaging analyses and a proposed post-hoc correction method, see [this article](https://www.sciencedirect.com/science/article/pii/S1053811918319463) (apologies for the shameless self-promotion) and [this follow-up article](https://www.biorxiv.org/content/10.1101/2020.08.17.255034v1.abstract).
In sum, as with *any* (neuroimaging) analysis, a good experimental design is one that minimizes the possibilities of confounds, i.e., associations between factors that are not of interest ($\mathbf{C}$) and experimental factors that *are* of interest ($\mathbf{S}$).
<div class='alert alert-info'>
<b>ToThink</b> (0 points): Suppose that you are interested in the neural correlates of ADHD. You want to compare multivariate resting-state fMRI networks between ADHD patients and healthy controls. What is the experimental factor ($\mathbf{S}$)? And can you think of a factor that, when unaccounted for, presents a major confound ($\mathbf{C}$) in this study/analysis?
</div>
<div class='alert alert-info'>
<b>ToThink</b> (1 point): Suppose that you're interested in the neural representation of "cognitive effort". You think of an experimental design in which you show participants either easy arithmetic problems, which involve only single-digit addition/subtraction (e.g., $2+5-4$) or hard(er) arithmetic problems, which involve two-digit addition/subtraction and multiplication (e.g., $12\times4-2\times11$), for which they have to respond whether the solution is odd (press left) or even (press right) as fast as possible. You then compare patterns during the between easy and hard trials. What is the experimental factor of interest ($\mathbf{S}$) here? And what are <em>possible</em> confounds ($\mathbf{C}$) in this design? Name at least two. (Note: this is a separate hypothetical experimental from the previous ToThink.)
</div>
YOUR ANSWER HERE
### What makes up a "pattern"?
So far, we talked a lot about "patterns", but what do we mean with that term? There are different options with regard to *what you choose as your unit of measurement* that makes up your pattern. The far majority of pattern analyses in functional MRI use patterns of *activity estimates*, i.e., the same unit of measurement — relative (de)activation — as is common in standard mass-univariate analyses. For example, decoding object category (e.g., images of faces vs. images of houses) from fMRI activity patterns in inferotemporal cortex is an example of a pattern analysis that uses *activity estimates* as its unit of measurement.
However, you are definitely not limited to using *activity estimates* for your patterns. For example, you could apply pattern analyses to structural data (e.g., patterns of voxelwise gray-matter volume values, like in [voxel-based morphometry](https://en.wikipedia.org/wiki/Voxel-based_morphometry)) or to functional connectivity data (e.g., patterns of time series correlations between voxels, or even topological properties of brain networks). (In fact, the connectivity examples from the Nilearn tutorial represents a way to estimate these connectivity features, which can be used in pattern analyses.) In short, pattern analyses can be applied to patterns composed of *any* type of measurement or metric!
Now, let's get a little more technical. Usually, as mentioned in the beginning, pattern analyses represent the data as a 2D array of brain patterns. Let's call this $\mathbf{R}$. The rows of $\mathbf{R}$ represent different instances of patterns (sometimes called "samples" or "observations") and the columns represent different brain features (e.g., voxels; sometimes simply called "features"). Note that we thus lose all spatial information by "flattening" our patterns into 1D rows!
Let's call the number of samples $N$ and the number of brain features $K$. We can thus represent $\mathbf{R}$ as a $N\times K$ matrix (2D array):
\begin{align}
\mathbf{R} =
\begin{bmatrix}
R_{1,1} & R_{1,2} & R_{1,3} & \dots & R_{1,K}\\
R_{2,1} & R_{1,2} & R_{1,3} & \dots & R_{2,K}\\
R_{3,1} & R_{1,2} & R_{1,3} & \dots & R_{3,K}\\
\vdots & \vdots & \vdots & \ddots & \vdots\\
R_{N,1} & R_{1,2} & R_{1,3} & \dots & R_{N,K}\\
\end{bmatrix}
\end{align}
As discussed before, the values themselves (e.g., $R_{1,1}$, $R_{1,2}$, $R_{3,6}$) represent whatever you chose for your patterns (fMRI activity, connectivity estimates, VBM, etc.). What is represented by the rows (samples/observations) of $\mathbf{R}$ depends on your study design: in between-subject studies, these are usually participants, while in within-subject studies, these samples represent trials (or averages of trials or sometimes runs). The columns of $\mathbf{R}$ represent the different (brain) features in your pattern; for example, these may be different voxels (or sensors/magnetometers in EEG/MEG), vertices (when working with cortical surfaces), edges in functional brain networks, etc. etc.
Let's make it a little bit more concrete. We'll make up some random data below that represents a typical data array in pattern analyses:
```
import numpy as np
N = 100 # e.g. trials
K = 250 # e.g. voxels
R = np.random.normal(0, 1, size=(N, K))
R
```
Let's visualize this:
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(12, 4))
plt.imshow(R, aspect='auto')
plt.xlabel('Brain features', fontsize=15)
plt.ylabel('Samples', fontsize=15)
plt.title(r'$\mathbf{R}_{N\times K}$', fontsize=20)
cbar = plt.colorbar()
cbar.set_label('Feature value', fontsize=13, rotation=270, labelpad=10)
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo</b> (1 point): Extract the pattern of the 42nd trial and store it in a variable called <tt>trial42</tt>. Then, extract the values of 187th brain feature across all trials and store it in a variable called <tt>feat187</tt>. Lastly, extract feature value of the 60th trial and the 221nd feature and store it in a variable called <tt>t60_f221</tt>. Remember: Python uses zero-based indexing (first value in an array is indexed by 0)!
</div>
```
''' Implement the ToDo here.'''
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
from niedu.tests.nipa.week_1 import test_R_indexing
test_R_indexing(R, trial42, feat187, t60_f221)
```
Alright, to practice a little bit more. We included whole-brain VBM data for 20 subjects in the `vbm/` subfolder:
```
import os
sorted(os.listdir('vbm'))
```
The VBM data represents spatially normalized (to MNI152, 2mm), whole-brain voxelwise gray matter volume estimates (read more about VBM [here](https://en.wikipedia.org/wiki/Voxel-based_morphometry)).
Let's inspect the data from a single subject:
```
import os
import nibabel as nib
from nilearn import plotting
sub_01_vbm_path = os.path.join('vbm', 'sub-01.nii.gz')
sub_01_vbm = nib.load(sub_01_vbm_path)
print("Shape of Nifti file: ", sub_01_vbm.shape)
# Let's plot it as well
plotting.plot_anat(sub_01_vbm)
plt.show()
```
As you can see, the VBM data is a 3D array of shape 91 ($X$) $\times$ 109 ($Y$) $\times$ 91 ($Z$) (representing voxels). These are the spatial dimensions associated with the standard MNI152 (2 mm) template provided by FSL. As VBM is structural (not functional!) data, there is no time dimension ($T$).
Now, suppose that we want to do a pattern analysis on the data of all 20 subjects. We should then create a 2D array of shape 20 (subjects) $\times\ K$ (number of voxels, i.e., $91 \times 109 \times 91$). To do so, we need to create a loop over all files, load them in, "flatten" the data, and ultimately stack them into a 2D array.
Before you'll implement this as part of the next ToDo, we will show you a neat Python function called `glob`, which allows you to simply find files using "[wildcards](https://en.wikipedia.org/wiki/Wildcard_character)":
```
from glob import glob
```
It works as follows:
```
list_of_files = glob('path/with/subdirectories/*/*.nii.gz')
```
Importantly, the string you pass to `glob` can contain one or more wildcard characters (such as `?` or `*`). Also, *the returned list is not sorted*! Let's try to get all our VBM subject data into a list using this function:
```
# Let's define a "search string"; we'll use the os.path.join function
# to make sure this works both on Linux/Mac and Windows
search_str = os.path.join('vbm', 'sub-*.nii.gz')
vbm_files = glob(search_str)
# this is also possible: vbm_files = glob(os.path.join('vbm', 'sub-*.nii.gz'))
# Let's print the returned list
print(vbm_files)
```
As you can see, *the list is not alphabetically sorted*, so let's fix that with the `sorted` function:
```
vbm_files = sorted(vbm_files)
print(vbm_files)
# Note that we could have done that with a single statement
# vbm_files = sorted(glob(os.path.join('vbm', 'sub-*.nii.gz')))
# But also remember: shorter code is not always better!
```
<div class='alert alert-warning'>
<b>ToDo</b> (2 points): Create a 2D array with the vertically stacked subject-specific (flattened) VBM patterns, in which the first subject should be the first row. You may want to pre-allocate this array before starting your loop (using, e.g., <tt>np.zeros</tt>). Also, the <tt>enumerate</tt> function may be useful when writing your loop. Try to google how to flatten an N-dimensional array into a single vector. Store the final 2D array in a variable named <tt>R_vbm</tt>.
</div>
```
''' Implement the ToDo here. '''
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
from niedu.tests.nipa.week_1 import test_R_vbm_loop
test_R_vbm_loop(R_vbm)
```
<div class='alert alert-success'>
<b>Tip</b>: While it is a good exercise to load in the data yourself, you can also easily load in and concatenate a set of Nifti files using Nilearn's <a href="https://nilearn.github.io/modules/generated/nilearn.image.concat_imgs.html">concat_imgs</a> function (which returns a 4D <tt>Nifti1Image</tt>, with the different patterns as the fourth dimension). You'd still have to reorganize this data into a 2D array, though.
</div>
```
# Run this cell after you're done with the ToDo
# This will remove the all numpy arrays from memory,
# clearing up RAM for the next sections
%reset -f array
```
### Patterns as "points in space"
Before we continue with the topic of pattern estimation, there is one idea that we'd like to introduce: thinking of patterns as points (i.e., coordinates) in space. Thinking of patterns this way is helpful to understanding both machine learning based analyses and representational similarity analysis. While for some, this idea might sound trivial, we believe it's worth going over anyway. Now, let's make this idea more concrete.
Suppose we have estimated fMRI activity patterns for 20 trials (rows of $\mathbf{R}$). Now, we will also assume that those patterns consist of only two features (e.g., voxels; columns of $\mathbf{R}$), because this will make visualizing patterns as points in space easier than when we choose a larger number of features.
Alright, let's simulate and visualize the data (as a 2D array):
```
K = 2 # features (voxels)
N = 20 # samples (trials)
R = np.random.multivariate_normal(np.zeros(K), np.eye(K), size=N)
print("Shape of R:", R.shape)
# Plot 2D array as heatmap
fig, ax = plt.subplots(figsize=(2, 10))
mapp = ax.imshow(R)
cbar = fig.colorbar(mapp, pad=0.1)
cbar.set_label('Feature value', fontsize=13, rotation=270, labelpad=15)
ax.set_yticks(np.arange(N))
ax.set_xticks(np.arange(K))
ax.set_title(r"$\mathbf{R}$", fontsize=20)
ax.set_xlabel('Voxels', fontsize=15)
ax.set_ylabel('Trials', fontsize=15)
plt.show()
```
Now, we mentioned that each pattern (row of $\mathbf{R}$, i.e., $\mathbf{R}_{i}$) can be interpreted as a point in 2D space. With space, here, we mean a space where each feature (e.g., voxel; column of $\mathbf{R}$, i.e., $\mathbf{R}_{j}$) represents a separate axis. In our simulated data, we have two features (e.g., voxel 1 and voxel 2), so our space will have two axes:
```
plt.figure(figsize=(5, 5))
plt.title("A two-dimensional space", fontsize=15)
plt.grid()
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('Activity voxel 1', fontsize=13)
plt.ylabel('Activity voxel 2', fontsize=13)
plt.show()
```
Within this space, each of our patterns (samples) represents a point. The values of each pattern represent the *coordinates* of its location in this space. For example, the coordinates of the first pattern are:
```
print(R[0, :])
```
As such, we can plot this pattern as a point in space:
```
plt.figure(figsize=(5, 5))
plt.title("A two-dimensional space", fontsize=15)
plt.grid()
# We use the "scatter" function to plot this point, but
# we could also have used plt.plot(R[0, 0], R[0, 1], marker='o')
plt.scatter(R[0, 0], R[0, 1], marker='o', s=75)
plt.axhline(0, c='k')
plt.axvline(0, c='k')
plt.xlabel('Activity voxel 1', fontsize=13)
plt.ylabel('Activity voxel 2', fontsize=13)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.show()
```
If we do this for all patterns, we get an ordinary scatter plot of the data:
```
plt.figure(figsize=(5, 5))
plt.title("A two-dimensional space", fontsize=15)
plt.grid()
# We use the "scatter" function to plot this point, but
# we could also have used plt.plot(R[0, 0], R[0, 1], marker='o')
plt.scatter(R[:, 0], R[:, 1], marker='o', s=75)
plt.axhline(0, c='k')
plt.axvline(0, c='k')
plt.xlabel('Activity voxel 1', fontsize=13)
plt.ylabel('Activity voxel 2', fontsize=13)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.show()
```
It is important to realize that both perspectives — as a 2D array and as a set of points in $K$-dimensional space — represents the same data! Practically, pattern analysis algorithms usually expect the data as a 2D array, but (in our experience) the operations and mechanisms implemented by those algorithms are easiest to explain and to understand from the "points in space" perspective.
You might think, "but how does this work for data with more than two features?" Well, the idea of patterns as points in space remains the same: each feature represents a new dimension (or "axis"). For three features, this means that a pattern represents a point in 3D (X, Y, Z) space; for four features, a pattern represents a point in 4D space (like a point moving in 3D space) ... but what about a pattern with 14 features? Or 500? Actually, this is impossible to visualize or even make sense of mentally. As the famous artificial intelligence researcher Geoffrey Hinton put it:
> "To deal with ... a 14 dimensional space, visualize a 3D space and say 'fourteen' very loudly. Everyone does it." (Geoffrey Hinton)
The important thing to understand, though, is that most operations, computations, and algorithms that deal with patterns do not care about whether your data is 2D (two features) or 14D (fourteen features) — we just have to trust the mathematicians that whatever we do on 2D data will generalize to $K$-dimensional data :-)
That said, people still try to visualize >2D data using *dimensionality reduction* techniques. These techniques try to project data to a lower-dimensional space. For example, you can transform a dataset with 500 features (i.e., a 500-dimensional dataset) to a 2D dimensional dataset using techniques such as principal component analysis (PCA), Multidimensional Scaling (MDS), and t-SNE. For example, PCA tries to a subset of uncorrelated lower-dimensional features (e.g., 2) from linear combinations of high-dimensional features (e.g., 4) that still represent as much variance of the high-dimensional components as possible. We'll show you an example below using an implementation of PCA from the machine learning library [scikit-learn](https://scikit-learn.org/stable/), which we'll use extensively in next week's lab:
```
from sklearn.decomposition import PCA
# Let's create a dataset with 100 samples and 4 features
R4D = np.random.normal(0, 1, size=(100, 4))
print("Shape R4D:", R4D.shape)
# We'll instantiate a PCA object that will
# transform our data into 2 components
pca = PCA(n_components=2)
# Fit and transform the data from 4D to 2D
R2D = pca.fit_transform(R4D)
print("Shape R2D:", R2D.shape)
# Plot the result
plt.figure(figsize=(5, 5))
plt.scatter(R2D[:, 0], R2D[:, 1], marker='o', s=75)
plt.axhline(0, c='k')
plt.axvline(0, c='k')
plt.xlabel('PCA component 1', fontsize=13)
plt.ylabel('PCA component 2', fontsize=13)
plt.grid()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo</b> (optional): As discussed, PCA is a specific dimensionality reduction technique that uses linear combinations of features to project the data to a lower-dimensional space with fewer "components". Linear combinations are simply weighted sums of high-dimensional features. In a 4D dimensional space that is project to 2D, PCA component 1 might be computed as $\mathbf{R}_{j=1}\theta_{1}+\mathbf{R}_{j=2}\theta_{2}+\mathbf{R}_{j=3}\theta_{3}+\mathbf{R}_{j=4}\theta_{4}$, where $R_{j=1}$ represents the 4th feature of $\mathbf{R}$ and $\theta_{1}$ represents the <em>weight</em> for the 4th feature.
The weights of the fitted PCA model can be accessed by, confusingly, <tt>pca.components_</tt> (shape: $K_{lower} \times K_{higher}$. Using these weights, can you recompute the lower-dimensional features from the higher-dimensional features yourself? Try to plot it like the figure above and check whether it matches.
</div>
```
''' Implement the (optional) ToDo here. '''
# YOUR CODE HERE
raise NotImplementedError()
```
Note that dimensionality reduction is often used for visualization, but it can also be used as a preprocessing step in pattern analyses. We'll take a look this in more detail next week.
Alright, back to the topic of pattern extraction/estimation. You saw that preparing VBM data for (between-subject) pattern analyses is actually quite straightforward, but unfortunately, preparing functional MRI data for pattern analysis is a little more complicated. The reason is that we are dealing with time series in which different trials ($N$) are "embedded". The next section discusses different methods to "extract" (estimate) these trial-wise patterns.
## Estimating patterns
As we mentioned before, we should prepare our data as an $N$ (samples) $\times$ $K$ (features) array. With fMRI data, our data is formatted as a $X \times Y \times Z \times T$ array; we can flatten the $X$, $Y$, and $Z$ dimensions, but we still have to find a way to "extract" patterns for our $N$ trials from the time series (i.e., the $T$ dimension).
### Important side note: single trials vs. (runwise) average trials
In this section, we often assume that our "samples" refer to different *trials*, i.e., single instances of a stimulus or response (or another experimentally-related factor). This is, however, not the only option. Sometimes, researchers choose to treat multiple repetitions of a trial as a single sample or multiple trials within a condition as a single sample. For example, suppose you design a simple passive-viewing experiment with images belonging two one of three conditions: faces, houses, and chairs. Each condition has ten exemplars (face1, face2, ..., face10, house1, house2, ..., house10, chair1, chair2, ... , chair10) and each exemplar/item is repeated six times. So, in total there are 3 (condition) $\times$ 10 (examplars) $\times$ 6 (repetitions) = 180 trials. Because you don't want to bore the participant to death, you split the 180 trials into two runs (90 each).
Now, there are different ways to define your samples. One is to treat every single trial as a sample (so you'll have a 180 samples). Another way is to treat each exemplar as a sample. If you do so, you'll have to "pool" the pattern estimates across all 6 repetitions (so you'll have $10 \times 3 = 30$ samples). And yet another way is to treat each condition as a sample, so you'll have to pool the pattern estimates across all 6 repetitions and 10 exemplars per condition (so you'll end up with only 3 samples). Lastly, with respect to the latter two approaches, you may choose to only average repetitions and/or exemplars *within* runs. So, for two runs, you end up with either $10 \times 3 \times 2 = 60$ samples (when averaging across repetitions only) or $3 \times 2 = 6$ samples (when averaging across examplars and repetitions).
Whether you should perform your pattern analysis on the trial, examplar, or condition level, and whether you should estimate these patterns across runs or within runs, depends on your research question and analysis technique. For example, if you want to decode exemplars from each other, you obviously should not average across exemplars. Also, some experiments may not have different exemplars per condition (or do not have categorical conditions at all). With respect to the importance of analysis technique: when applying machine learning analyses to fMRI data, people often prefer to split their trials across many (short) runs and — if using a categorical design — prefer to estimate a single pattern per run. This is because samples across runs are not temporally autocorrelated, which is an important assumption in machine learning based analyses. Lastly, for any pattern analysis, averaging across different trials will increase the signal-to-noise ratio (SNR) for any sample (because you average out noise), but will decrease the statistical power of the analysis (because you have fewer samples).
Long story short: whatever you treat as a sample — single trials, (runwise) exemplars or (runwise) conditions — depends on your design, research question, and analysis technique. In the rest of the tutorial, we will usually refer to samples as "trials", as this scenario is easiest to simulate and visualize, but remember that this term may equally well refer to (runwise) exemplar-average or condition-average patterns.
---
To make the issue of estimating patterns from time series a little more concrete, let's simulate some signals. We'll assume that we have a very simple experiment with two conditions (A, B) with ten trials each (interleaved, i.e., ABABAB...AB), a trial duration of 1 second, spaced evenly within a single run of 200 seconds (with a TR of 2 seconds, so 100 timepoints). Note that you are not necessarily limited to discrete categorical designs for all pattern analyses! While for machine learning-based methods (topic of week 2) it is common to have a design with a single categorical feature of interest (or some times a single continuous one), representional similarity analyses (topic of week 3) are often applied to data with more "rich" designs (i.e., designs that include many, often continuously varying, factors of interest). Also, using twenty trials is probably way too few for any pattern analysis, but it'll make the examples (and visualizations) in this section easier to understand.
Alright, let's get to it.
```
TR = 2
N = 20 # 2 x 10 trials
T = 200 # duration in seconds
# t_pad is a little baseline at the
# start and end of the run
t_pad = 10
onsets = np.linspace(t_pad, T - t_pad, N, endpoint=False)
durations = np.ones(onsets.size)
conditions = ['A', 'B'] * (N // 2)
print("Onsets:", onsets, end='\n\n')
print("Conditions:", conditions)
```
We'll use the `simulate_signal` function used in the introductory course to simulate the data. This function is like a GLM in reverse: it assumes that a signal ($R$) is generated as a linear combination between (HRF-convolved) experimental features ($\mathbf{S}$) weighted by some parameters ( $\beta$ ) plus some additive noise ($\epsilon$), and simulates the signal accordingly (you can check out the function by running `simulate_signal??` in a new code cell).
Because we simulate the signal, we can use "ground-truth" activation parameters ( $\beta$ ). In this simulation, we'll determine that the signal responds more strongly to trials of condition A ($\beta = 0.8$) than trials of condition B ($\beta = 0.2$) in *even* voxels (voxel 0, 2, etc.) and vice versa for *odd* voxels (voxel 1, 3, etc.):
```
params_even = np.array([0.8, 0.2])
params_odd = 1 - params_even
```
<div class='alert alert-info'>
<b>ToThink</b> (0 points): Given these simulation parameters, how do you think that the corresponding $N\times K$ pattern array ($\mathbf{R}$) would roughly look like visually (assuming an efficient pattern estimation method)?
</div>
Alright, We simulate some data for, let's say, four voxels ($K = 4$). (Again, you'll usually perform pattern analyses on many more voxels.)
```
from niedu.utils.nii import simulate_signal
K = 4
ts = []
for i in range(K):
# Google "Python modulo" to figure out
# what the line below does!
is_even = (i % 2) == 0
sig, _ = simulate_signal(
onsets,
conditions,
duration=T,
plot=False,
std_noise=0.25,
params_canon=params_even if is_even else params_odd
)
ts.append(sig[:, np.newaxis])
# ts = timeseries
ts = np.hstack(ts)
print("Shape of simulated signals: ", ts.shape)
```
And let's plot these voxels. We'll show the trial onsets as arrows (red = condition A, orange = condition B):
```
import seaborn as sns
fig, axes = plt.subplots(ncols=K, sharex=True, sharey=True, figsize=(10, 12))
t = np.arange(ts.shape[0])
for i, ax in enumerate(axes.flatten()):
# Plot signal
ax.plot(ts[:, i], t, marker='o', ms=4, c='tab:blue')
# Plot trial onsets (as arrows)
for ii, to in enumerate(onsets):
color = 'tab:red' if ii % 2 == 0 else 'tab:orange'
ax.arrow(-1.5, to / TR, dy=0, dx=0.5, color=color, head_width=0.75, head_length=0.25)
ax.set_xlim(-1.5, 2)
ax.set_ylim(0, ts.shape[0])
ax.grid(b=True)
ax.set_title(f'Voxel {i+1}', fontsize=15)
ax.invert_yaxis()
if i == 0:
ax.set_ylabel("Time (volumes)", fontsize=20)
# Common axis labels
fig.text(0.425, -.03, "Activation (A.U.)", fontsize=20)
fig.tight_layout()
sns.despine()
plt.show()
```
<div class='alert alert-success'>
<b>Tip</b>: Matplotlib is a very flexible plotting package, but arguably at the expense of how fast you can implement something. <a href="https://seaborn.pydata.org/">Seaborn</a> is a great package (build on top of Matplotlib) that offers some neat functionality that makes your life easier when plotting in Python. For example, we used the <tt>despine</tt> function to remove the top and right spines to make our plot a little nicer. In this course, we'll mostly use Matplotlib, but we just wanted to make you aware of this awesome package.
</div>
Alright, now we can start discussing methods for pattern estimation! Unfortunately, as pattern analyses are relatively new, there no concensus yet about the "best" method for pattern estimation. In fact, there exist many different methods, which we can roughly divided into two types:
1. Timepoint-based method (for lack of a better name) and
2. GLM-based methods
We'll discuss both of them, but spend a little more time on the latter set of methods as they are more complicated (and are more popular).
### Timepoint-based methods
Timepoint-based methods "extract" patterns by simply using a single timepoint (e.g., 6 seconds after stimulus presentation) or (an average of) multiple timepoints (e.g., 4, 6, and 8 seconds after stimulus presentation).
Below, we visualize how a single-timepoint method would look like (assuming that we'd want to extract the timepoint 6 seconds after stimulus presentation, i.e., around the assumed peak of the BOLD response). The stars represent the values that we would extract (red when condition A, orange when condition B). Note, we only plot the first 60 volumes.
```
fig, axes = plt.subplots(ncols=4, sharex=True, sharey=True, figsize=(10, 12))
t_fmri = np.linspace(0, T, ts.shape[0], endpoint=False)
t = np.arange(ts.shape[0])
for i, ax in enumerate(axes.flatten()):
# Plot signal
ax.plot(ts[:, i], t, marker='o', ms=4, c='tab:blue')
# Plot trial onsets (as arrows)
for ii, to in enumerate(onsets):
plus6 = np.interp(to+6, t_fmri, ts[:, i])
color = 'tab:red' if ii % 2 == 0 else 'tab:orange'
ax.arrow(-1.5, to / TR, dy=0, dx=0.5, color=color, head_width=0.75, head_length=0.25)
ax.plot([plus6, plus6], [(to+6) / TR, (to+6) / TR], marker='*', ms=15, c=color)
ax.set_xlim(-1.5, 2)
ax.set_ylim(0, ts.shape[0] // 2)
ax.grid(b=True)
ax.set_title(f'Voxel {i+1}', fontsize=15)
ax.invert_yaxis()
if i == 0:
ax.set_ylabel("Time (volumes)", fontsize=20)
# Common axis labels
fig.text(0.425, -.03, "Activation (A.U.)", fontsize=20)
fig.tight_layout()
sns.despine()
plt.show()
```
Now, extracting these timepoints 6 seconds after stimulus presentation is easy when this timepoint is a multiple of the scan's TR (here: 2 seconds). For example, to extract the value for the first trial (onset: 10 seconds), we simply take the 8th value in our timeseries, because $(10 + 6) / 2 = 8$. But what if our trial onset + 6 seconds is *not* a multiple of the TR, such as with trial 2 (onset: 19 seconds)? Well, we can interpolate this value! We will use the same function for this operation as we did for slice-timing correction (from the previous course): `interp1d` from the `scipy.interpolate` module.
To refresh your memory: this function takes the timepoints associated with the values (or "frame_times" in Nilearn lingo) and the values itself to generate a new object which we'll later use to do the actual (linear) interpolation. First, let's define the timepoints:
```
t_fmri = np.linspace(0, T, ts.shape[0], endpoint=False)
```
<div class='alert alert-warning'>
<b>ToDo</b> (1 point): The above timepoints assume that all data was acquired at the onset of the volume acquisition ($t=0$, $t=2$, etc.). Suppose that we actually slice-time corrected our data to the middle slice, i.e., the 18th slice (out of 36 slices) — create a new array (using <tt>np.linspace</tt> with timepoints that reflect these slice-time corrected acquisition onsets) and store it in a variable named <tt>t_fmri_middle_slice</tt>.
</div>
```
''' Implement your ToDo here. '''
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
from niedu.tests.nipa.week_1 import test_frame_times_stc
test_frame_times_stc(TR, T, ts.shape[0], t_fmri_middle_slice)
```
For now, let's assume that all data was actually acquired at the start of the volume ($t=0$, $t=2$, etc.). We can "initialize" our interpolator by giving it both the timepoints (`t_fmri`) and the data (`ts`). Note that `ts` is not a single time series, but a 2D array with time series for four voxels (across different columns). By specifying `axis=0`, we tell `interp1d` that the first axis represents the axis that we want to interpolate later:
```
from scipy.interpolate import interp1d
interpolator = interp1d(t_fmri, ts, axis=0)
```
Now, we can give the `interpolator` object any set of timepoints and it will return the linearly interpolated values associated with these timepoints for all four voxels. Let's do this for our trial onsets plus six seconds:
```
onsets_plus_6 = onsets + 6
R_plus6 = interpolator(onsets_plus_6)
print("Shape extracted pattern:", R_plus6.shape)
fig, ax = plt.subplots(figsize=(2, 10))
mapp = ax.imshow(R_plus6)
cbar = fig.colorbar(mapp)
cbar.set_label('Feature value', fontsize=13, rotation=270, labelpad=15)
ax.set_yticks(np.arange(N))
ax.set_xticks(np.arange(K))
ax.set_title(r"$\mathbf{R}$", fontsize=20)
ax.set_xlabel('Voxels', fontsize=15)
ax.set_ylabel('Trials', fontsize=15)
plt.show()
```
Yay, we have extracted our first pattern! Does it look like what you expected given the known mean amplitude of the trials from the two conditions ($\beta_{\mathrm{A,even}} = 0.8, \beta_{\mathrm{B,even}} = 0.2$ and vice versa for odd voxels)?
<div class='alert alert-warning'>
<b>ToDo</b> (3 points): An alternative to the single-timepoint method is to extract, per trial, the <em>average</em> activity within a particular time window, for example 5-7 seconds post-stimulus. One way to do this is by perform interpolation in steps of (for example) 0.1 within the 5-7 post-stimulus time window (i.e., $5.0, 5.1, 5.2, \dots , 6.8, 6.9, 7.0$) and subsequently averaging these values, per trial, into a single activity estimate. Below, we defined these different steps (<tt>t_post_stimulus</tt>) for you already. Use the <tt>interpolator</tt> object to extract the timepoints for these different post-stimulus times relative to our onsets (<tt>onsets</tt> variable) from our data (<tt>ts</tt> variable). Store the extracted patterns in a new variable called <tt>R_av</tt>.
Note: this is a relatively difficult ToDo! Consider skipping it if it takes too long.
</div>
```
''' Implement your ToDo here. '''
t_post_stimulus = np.linspace(5, 7, 21, endpoint=True)
print(t_post_stimulus)
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
from niedu.tests.nipa.week_1 import test_average_extraction
test_average_extraction(onsets, ts, t_post_stimulus, interpolator, R_av)
```
These timepoint-based methods are relatively simple to implement and computationally efficient. Another variation that you might see in the literature is that extracted (averages of) timepoints are baseline-subtracted ($\mathbf{R}_{i} - \mathrm{baseline}_{i}$) or baseline-normalized ($\frac{\mathbf{R}_{i}}{\mathrm{baseline}_{i}}$), where the baseline is usually chosen to be at the stimulus onset or a small window before the stimulus onset. This technique is, as far as we know, not very popular, so we won't discuss it any further in this lab.
### GLM-based methods
One big disadvantage of timepoint-based methods is that it cannot disentangle activity due to different sources (such as trials that are close in time), which is a major problem for fast (event-related) designs. For example, if you present a trial at $t=10$ and another at $t=12$ and subsequently extract the pattern six seconds post-stimulus (at $t=18$ for the second trial), then the activity estimate for the second trial is definitely going to contain activity due to the first trial because of the sluggishness of the HRF.
As such, nowadays GLM-based pattern estimation techniques, which *can* disentangle the contribution of different sources, are more popular than timepoint-based methods. (Although, technically, you can use timepoint-based methods using the GLM with FIR-based designs, but that's beyond the scope of this course.) Again, there are multiple flavors of GLM-based pattern estimation, of which we'll discuss the two most popular ones.
#### Least-squares all (LSA)
The most straightforward GLM-based pattern estimation technique is to fit a single GLM with a design matrix that contains one or more regressors for each sample that you want to estimate (in addition to any confound regressors). The estimated parameters ($\hat{\beta}$) corresponding to our samples from this GLM — representing the relative (de)activation of each voxel for each trial — will then represent our patterns!
This technique is often reffered to as "least-squares all" (LSA). Note that, as explained before, a sample can refer to either a single trial, a set of repetitions of a particuar exemplar, or even a single condition. For now, we'll assume that samples refer to single trials. Often, each sample is modelled by a single (canonical) HRF-convolved regressor (but you could also use more than one regressor, e.g., using a basis set with temporal/dispersion derivatives or a FIR-based basis set), so we'll focus on this approach.
Let's go back to our simulated data. We have a single run containing 20 trials, so ultimately our design matrix should contain twenty columns: one for every trial. We can use the `make_first_level_design_matrix` function from Nilearn to create the design matrix. Importantly, we should make sure to give a separate and unique "trial_type" values for all our trials. If we don't do this (e.g., set trial type to the trial condition: "A" or "B"), then Nilearn won't create separate regressors for our trials.
```
import pandas as pd
from nilearn.glm.first_level import make_first_level_design_matrix
# We have to create a dataframe with onsets/durations/trial_types
# No need for modulation!
events_sim = pd.DataFrame(onsets, columns=['onset'])
events_sim.loc[:, 'duration'] = 1
events_sim.loc[:, 'trial_type'] = ['trial_' + str(i).zfill(2) for i in range(1, N+1)]
# lsa_dm = least squares all design matrix
lsa_dm = make_first_level_design_matrix(
frame_times=t_fmri, # we defined this earlier for interpolation!
events=events_sim,
hrf_model='glover',
drift_model=None # assume data is already high-pass filtered
)
# Check out the created design matrix
# Note that the index represents the frame times
lsa_dm
```
Note that the design matrix contains 21 regressors: 20 trialwise regressors and an intercept (the last column). Let's also plot it using Nilearn:
```
from nilearn.plotting import plot_design_matrix
plot_design_matrix(lsa_dm);
```
And, while we're at it, plot it as time series (rather than a heatmap):
```
fig, ax = plt.subplots(figsize=(12, 12))
for i in range(lsa_dm.shape[1]):
ax.plot(i + lsa_dm.iloc[:, i], np.arange(ts.shape[0]))
ax.set_title("LSA design matrix", fontsize=20)
ax.set_ylim(0, lsa_dm.shape[0]-1)
ax.set_xlabel('')
ax.set_xticks(np.arange(N+1))
ax.set_xticklabels(['trial ' + str(i+1) for i in range(N)] + ['icept'], rotation=-90)
ax.invert_yaxis()
ax.grid()
ax.set_ylabel("Time (volumes)", fontsize=15)
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo/ToThink</b> (2 points): One "problem" with LSA-type design matrices, especially in fast event-related designs, is that they are not very statistically <em>efficient</em>, i.e., they lead to relatively high variance estimates of your parameters ($\hat{\beta}$), mainly due to relatively high predictor variance. Because we used a fixed inter-trial interval (here: 9 seconds), the correlation between "adjacent" trials are (approximately) the same. <br>
Compute the correlation between, for example, the predictors associated with trial 1 and trial 2, using the <tt>pearsonr</tt> function imported below, and store it in a variable named <tt>corr_t1t2</tt> (1 point). Then, try to think of a way to improve the efficiency of this particular LSA design and write it down in the cell below the test cell.
</div>
```
''' Implement your ToDO here. '''
# For more info about the `pearsonr` function, check
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
# Want a challenge? Try to compute the correlation from scratch!
from scipy.stats import pearsonr
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the ToDo above. '''
from niedu.tests.nipa.week_1 import test_t1t2_corr
test_t1t2_corr(lsa_dm, corr_t1t2)
```
YOUR ANSWER HERE
Alright, let's actually fit the model! When dealing with real fMRI data, we'd use Nilearn to fit our GLM, but for now, we'll just use our own implementation of an (OLS) GLM. Note that we can actually fit a *single* GLM for all voxels at the same time by using `ts` (a $T \times K$ matrix) as our dependent variable due to the magic of linear algebra. In other words, we can run $K$ OLS models at once!
```
# Let's use 'X', because it's shorter
X = lsa_dm.values
# Note we can fit our GLM for all K voxels at
# the same time! As such, betas is not a vector,
# but an n_regressor x k_voxel matrix!
beta_hat_all = np.linalg.inv(X.T @ X) @ X.T @ ts
print("Shape beta_hat_all:", beta_hat_all.shape)
# Ah, the beta for the intercept is still in there
# Let's remove it
beta_icept = beta_hat_all[-1, :]
beta_hat = beta_hat_all[:-1, :]
print("Shape beta_hat (intercept removed):", beta_hat.shape)
```
Alright, let's visualize the estimated parameters ($\hat{\beta}$). We'll do this by plotting the scaled regressors (i.e., $X_{j}\hat{\beta}_{j}$) on top of the original signal. Each differently colored line represents a different regressor (so a different trial):
```
fig, axes = plt.subplots(ncols=4, sharex=True, sharey=True, figsize=(10, 12))
t = np.arange(ts.shape[0])
for i, ax in enumerate(axes.flatten()):
# Plot signal
ax.plot(ts[:, i], t, marker='o', ms=4, lw=0.5, c='tab:blue')
# Plot trial onsets (as arrows)
for ii, to in enumerate(onsets):
color = 'tab:red' if ii % 2 == 0 else 'tab:orange'
ax.arrow(-1.5, to / TR, dy=0, dx=0.5, color=color, head_width=0.75, head_length=0.25)
# Compute x*beta for icept only
scaled_icept = lsa_dm.iloc[:, -1].values * beta_icept[i]
for ii in range(N):
this_x = lsa_dm.iloc[:, ii].values
# Compute x*beta for this particular trial (ii)
xb = scaled_icept + this_x * beta_hat[ii, i]
ax.plot(xb, t, lw=2)
ax.set_xlim(-1.5, 2)
ax.set_ylim(0, ts.shape[0] // 2)
ax.grid(b=True)
ax.set_title(f'Voxel {i+1}', fontsize=15)
ax.invert_yaxis()
if i == 0:
ax.set_ylabel("Time (volumes)", fontsize=20)
# Common axis labels
fig.text(0.425, -.03, "Activation (A.U.)", fontsize=20)
fig.tight_layout()
sns.despine()
plt.show()
```
Ultimately, though, the estimated GLM parameters are just another way to estimate our pattern array ($\mathbf{R}$) — this time, we just estimated it using a different method (GLM-based) than before (timepoint-based). Therefore, let's visualize this array as we did with the other methods:
```
fig, ax = plt.subplots(figsize=(2, 10))
mapp = ax.imshow(beta_hat)
cbar = fig.colorbar(mapp)
cbar.set_label(r'$\hat{\beta}$', fontsize=25, rotation=0, labelpad=10)
ax.set_yticks(np.arange(N))
ax.set_xticks(np.arange(K))
ax.set_title(r"$\mathbf{R}$", fontsize=20)
ax.set_xlabel('Voxels', fontsize=15)
ax.set_ylabel('Trials', fontsize=15)
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo</b> (optional, 0 points): It would be nice to visualize the patterns, but this is very hard because we have four dimenions (because we have four voxels)! <br><br>PCA to the rescue! Run PCA on the estimated patterns (<tt>beta_hat</tt>) and store the PCA-transformed array (shape: $20 \times 2$) in a variable named <tt>beta_hat_2d</tt>. Then, try to plot the first two components as a scatterplot. Make it even nicer by plotting the trials from condition A as red points and trials from condition B als orange points.
</div>
```
# YOUR CODE HERE
raise NotImplementedError()
from niedu.tests.nipa.week_1 import test_pca_beta_hat
test_pca_beta_hat(beta_hat, beta_hat_2d)
```
#### Noise normalization
One often used preprocessing step for pattern analyses (using GLM-estimation methods) is to use "noise normalization" on the estimated patterns. There are two flavours: "univariate" and "multivariate" noise normalization. In univariate noise normalization, the estimated parameters ($\hat{\beta}$) are divided (normalized) by the standard deviation of the estimated parameters — which you might recognize as the formula for $t$-values (for a contrast against baseline)!
\begin{align}
t_{c\hat{\beta}} = \frac{c\hat{\beta}}{\sqrt{\hat{\sigma}^{2}c(X^{T}X)^{-1}c^{T}}}
\end{align}
where $\hat{\sigma}^{2}$ is the estimate of the error variance (sum of squared errors divided by the degrees of freedom) and $c(X^{T}X)^{-1}c^{T}$ is the "design variance". Sometimes people disregard the design variance and the degrees of freedom (DF) and instead only use the standard deviation of the noise:
\begin{align}
t_{c\hat{\beta}} \approx \frac{c\hat{\beta}}{\sqrt{\sum (y_{i} - X_{i}\hat{\beta})^{2}}}
\end{align}
<div class='alert alert-info'>
<b>ToThink</b> (1 point): When experiments use a fixed ISI (in the context of single-trial GLMs), the omission of the design variance in univariate noise normalization is warranted. Explain why.
</div>
YOUR ANSWER HERE
Either way, this univariate noise normalization is a way to "down-weigh" the uncertain (noisy) parameter estimates. Although this type of univariate noise normalization seems to lead to better results in both decoding and RSA analyses (e.g., [Misaki et al., 2010](https://www.ncbi.nlm.nih.gov/pubmed/20580933)), the jury is still out on this issue.
Multivariate noise normalization will be discussed in week 3 (RSA), so let's focus for now on the implementation of univariate noise normalization using the approximate method (which disregards design variance). To compute the standard deviation of the noise ($\sqrt{\sum (y_{i} - X_{i}\hat{\beta})^{2}}$), we first need to compute the noise, i.e., the unexplained variance ($y - X\hat{\beta}$) also known as the residuals:
```
residuals = ts - X @ beta_hat_all
print("Shape residuals:", residuals.shape)
```
So, for each voxel ($K=4$), we have a timeseries ($T=100$) with unexplained variance ("noise"). Now, to get the standard deviation across all voxels, we can do the following:
```
std_noise = np.std(residuals, axis=0)
print("Shape noise std:", std_noise.shape)
```
To do the actual normalization step, we simply divide the columns of the pattern matrix (`beta_hat`, which we estimated before) by the estimated noise standard deviation:
```
# unn = univariate noise normalization
# Note that we don't have to do this for each trial (row) separately
# due to Numpy broadcasting!
R_unn = beta_hat / std_noise
print("Shape R_unn:", R_unn.shape)
```
And let's visualize it:
```
fig, ax = plt.subplots(figsize=(2, 10))
mapp = ax.imshow(R_unn)
cbar = fig.colorbar(mapp)
cbar.set_label(r'$t$', fontsize=25, rotation=0, labelpad=10)
ax.set_yticks(np.arange(N))
ax.set_xticks(np.arange(K))
ax.set_title(r"$\mathbf{R}_{unn}$", fontsize=20)
ax.set_xlabel('Voxels', fontsize=15)
ax.set_ylabel('Trials', fontsize=15)
plt.show()
```
<div class='alert alert-info'>
<b>ToThink</b> (1 point): In fact, univariate noise normalization didn't really change the pattern matrix much. Why do you think this is the case for our simulation data? Hint: check out the parameters for the simulation.
</div>
YOUR ANSWER HERE
#### LSA on real data
Alright, enough with all that fake data — let's work with some real data! We'll use the face perception task data from the *NI-edu* dataset, which we briefly mentioned in the fMRI-introduction course.
In the face perception task, participants were presented with images of faces (from the publicly available [Face Research Lab London Set](https://figshare.com/articles/Face_Research_Lab_London_Set/5047666)). In total, frontal face images from 40 different people ("identities") were used, which were either without expression ("neutral") or were smiling. Each face image (from in total 80 faces, i.e., 40 identities $\times$ 2, neutral/smiling) was shown, per participant, 6 times across the 12 runs (3 times per session).
<div class='alert alert-info'>
<b>Mini ToThink</b> (0 points): Why do you think we show the same image multiple times?
</div>
Identities were counterbalanced in terms of biological sex (male vs. female) and ethnicity (Caucasian vs. East-Asian vs. Black). The Face Research Lab London Set also contains the age of the people in the stimulus dataset and (average) attractiveness ratings for all faces from an independent set of raters. In addition, we also had our own participants rate the faces on perceived attractiveness, dominance, and trustworthiness after each session (rating each face, on each dimension, four times in total for robustness). The stimuli were chosen such that we have many different attributes that we could use to model brain responses (e.g., identity, expression, ethnicity, age, average attractiveness, and subjective/personal perceived attractiveness/dominance/trustworthiness).
In this paradigm, stimuli were presented for 1.25 seconds and had a fixed interstimulus interval (ISI) of 3.75 seconds. While sub-optimal for univariate "detection-based" analyses, we used a fixed ISI — rather than jittered — to make sure it can also be used for "single-trial" multivariate analyses. Each run contained 40 stimulus presentations. To keep the participants attentive, a random selection of 5 stimuli (out of 40) were followed by a rating on either perceived attractiveness, dominance, or trustworthiness using a button-box with eight buttons (four per hand) lasting 2.5 seconds. After the rating, a regular ISI of 3.75 seconds followed. See the figure below for a visualization of the paradigm.

First, let's set up all the data that we need for our LSA model. Let's see where our data is located:
```
import os
data_dir = os.path.join(os.path.expanduser('~'), 'NI-edu-data')
print("Downloading Fmriprep data (+- 175MB) ...\n")
!aws s3 sync --no-sign-request s3://openneuro.org/ds003477 {data_dir} --exclude "*" --include "sub-03/ses-1/func/*task-face*run-1*events.tsv"
!aws s3 sync --no-sign-request s3://openneuro.org/ds003477 {data_dir} --exclude "*" --include "derivatives/fmriprep/sub-03/ses-1/func/*task-face*run-1*space-T1w*bold.nii.gz"
!aws s3 sync --no-sign-request s3://openneuro.org/ds003477 {data_dir} --exclude "*" --include "derivatives/fmriprep/sub-03/ses-1/func/*task-face*run-1*space-T1w*mask.nii.gz"
!aws s3 sync --no-sign-request s3://openneuro.org/ds003477 {data_dir} --exclude "*" --include "derivatives/fmriprep/sub-03/ses-1/func/*task-face*run-1*confounds_timeseries.tsv"
print("\nDone!")
```
As you can see, it contains both "raw" (not-preprocessed) subject data (e.g., sub-03) and derivatives, which include Fmriprep-preprocessed data:
```
fprep_sub03 = os.path.join(data_dir, 'derivatives', 'fmriprep', 'sub-03')
print("Contents derivatives/fmriprep/sub-03:", os.listdir(fprep_sub03))
```
There is preprocessed anatomical data and session-specific functional data:
```
fprep_sub03_ses1_func = os.path.join(fprep_sub03, 'ses-1', 'func')
contents = sorted(os.listdir(fprep_sub03_ses1_func))
print("Contents ses-1/func:", '\n'.join(contents))
```
That's a lot of data! Importantly, we will only use the "face" data ("task-face") in T1 space ("space-T1w"), meaning that this dat has not been normalized to a common template (unlike the "task-MNI152NLin2009cAsym" data). Here, we'll only analyze the first run ("run-1") data. Let's define the functional data, the associated functional brain mask (a binary image indicating which voxels are brain and which are not), and the file with timepoint-by-timepoint confounds (such as motion parameters):
```
func = os.path.join(fprep_sub03_ses1_func, 'sub-03_ses-1_task-face_run-1_space-T1w_desc-preproc_bold.nii.gz')
# Notice this neat little trick: we use the string method "replace" to define
# the functional brain mask
func_mask = func.replace('desc-preproc_bold', 'desc-brain_mask')
confs = os.path.join(fprep_sub03_ses1_func, 'sub-03_ses-1_task-face_run-1_desc-confounds_timeseries.tsv')
confs_df = pd.read_csv(confs, sep='\t')
confs_df
```
Finally, we need the events-file with onsets, durations, and trial-types for this particular run:
```
events = os.path.join(data_dir, 'sub-03', 'ses-1', 'func', 'sub-03_ses-1_task-face_run-1_events.tsv')
events_df = pd.read_csv(events, sep='\t')
events_df.query("trial_type != 'rating' and trial_type != 'response'")
```
Now, it's up to you to use this data to fit an LSA model!
<div class='alert alert-warning'>
<b>ToDo</b> (2 points): in this first ToDo, you define your events and the confounds you want to include.<br>
1. Remove all columns except "onset", "duration", and "trial_type". You should end up with a DataFrame with 40 rows and 3 columns. You can check this with the <tt>.shape</tt> attribute of the DataFrame. (Note that, technically, you could model the reponse and rating-related events as well! For now, we'll exclude them.) Name this filtered DataFrame <tt>events_df_filt</tt>.
2. You also need to select specific columns from the confounds DataFrame, as we don't want to include <em>all</em> confounds! For now, include only the motion parameters (<tt>trans_x, trans_y, trans_z, rot_x, rot_y, rot_z</tt>). You should end up with a confounds DataFrame with 342 rows and 6 columns. Name this filtered DataFrame <tt>confs_df_filt</tt>.
</div>
```
''' Implement your ToDo here. '''
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
assert(events_df_filt.shape == (40, 3))
assert(events_df_filt.columns.tolist() == ['onset', 'duration', 'trial_type'])
assert(confs_df_filt.shape == (confs_df.shape[0], 6))
assert(all('trans' in col or 'rot' in col for col in confs_df_filt.columns))
print("Well done!")
```
<div class='alert alert-warning'>
<b>ToDo</b> (2 points): in this Todo, you'll fit your model! Define a <tt>FirstLevelModel</tt> object, name this <tt>flm_todo</tt> and make sure you do the following:<br>
1. Set the correct TR (this is 0.7)
2. Set the slice time reference to 0.5
3. Set the mask image to the one we defined before
4. Use a "glover" HRF
5. Use a "cosine" drift model with a cutoff of 0.01 Hz
6. Do not apply any smoothing
7. Set minimize_memory to true
8. Use an "ols" noise model
Then, fit your model using the functional data (<tt>func</tt>), filtered confounds, and filtered events we defined before.
</div>
```
''' Implement your ToDo here. '''
# Ignore the DeprecationWarning!
from nilearn.glm.first_level import FirstLevelModel
# YOUR CODE HERE
raise NotImplementedError()
""" Tests the above ToDo. """
from niedu.tests.nipa.week_1 import test_lsa_flm
test_lsa_flm(flm_todo, func_mask, func, events_df_filt, confs_df_filt)
```
<div class='alert alert-warning'>
<b>ToDo</b> (2 points): in this Todo, you'll run the single-trial contrasts ("against baseline"). To do so, write a for-loop in which you call the <tt>compute_contrast</tt> method every iteration with a new contrast definition for a new trial. Make sure to output the "betas" (by using <tt>output_type='effect_size'</tt>).
Note that the <tt>compute_contrast</tt> method returns the "unmasked" results (i.e., from all voxels). Make sure that, for each trial, you mask the results using the <tt>func_mask</tt> variable and the <tt>apply_mask</tt> function from Nilearn. Save these masked results (which should be patterns of 66298 voxels) for each trial. After the loop, stack all results in a 2D array with the different trials in different rows and the (flattened) voxels in columns. This array should be of shape 40 (trials) by 65643 (nr. of masked voxels). The variable name of this array should be <tt>R_todo</tt>.
</div>
```
''' Implement your ToDo here. '''
from nilearn.masking import apply_mask
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
from niedu.tests.nipa.week_1 import test_lsa_R
test_lsa_R(R_todo, events_df_filt, flm_todo, func_mask)
```
<div class='alert alert-success'>
<b>Disclaimer</b>: In this ToDo, we asked you <em>not</em> to spatially smooth the data. This is often recommended for pattern analyses, as they arguably use information that is encoded in finely distributed patterns. However, several studies have shown that smoothing may sometimes benefit pattern analyses (e.g., <a href="https://www.frontiersin.org/articles/10.3389/fneur.2017.00222/full">Hendriks et al., 2017</a>). In general, in line with the <a href="https://en.wikipedia.org/wiki/Matched_filter">matched filter theorem</a>, we recommend smoothing your data with a kernel equal to how finegrained you think your experimental feature is encoded in the brain patterns.
</div>
## Dealing with trial correlations
When working with single-trial experimental designs (such as the LSA designs discussed previously), one often occurring problem is correlation between trial predictors and their resulting estimates. Trial correlations in such designs occur when the inter-stimulus interval (ISI) is sufficiently short such that trial predictors overlap and thus correlate. This, in turn, leads to relatively unstable (high-variance) pattern estimates and, as we will see later in this section, trial patterns that correlate with each other (which is sometimes called [pattern drift](https://www.biorxiv.org/content/10.1101/032391v2)).
This is also the case in our data from the NI-edu dataset. In the "face" task, stimuli were presented for 1.25 seconds, followed by a 3.75 ISI, which causes a slightly positive correlation between a given trial ($i$) and the next trial ($i + 1$) and a slightly negative correlation between the trial after that ($i + 2$). We'll show this below by visualizing the correlation matrix of the design matrix:
```
dm_todo = pd.read_csv('dm_todo.tsv', sep='\t')
dm_todo = dm_todo.iloc[:, :40]
fig, ax = plt.subplots(figsize=(8, 8))
# Slightly exaggerate by setting the limits to (-.3, .3)
mapp = ax.imshow(dm_todo.corr(), vmin=-0.3, vmax=0.3)
# Some styling
ax.set_xticks(range(dm_todo.shape[1]))
ax.set_xticklabels(dm_todo.columns, rotation=90)
ax.set_yticks(range(dm_todo.shape[1]))
ax.set_yticklabels(dm_todo.columns)
cbar = plt.colorbar(mapp, shrink=0.825)
cbar.ax.set_ylabel('Correlation', fontsize=15, rotation=-90)
plt.show()
```
<div class='alert alert-info'>
<b>ToThink</b> (1 point): Explain why trials (at index $i$) correlate slightly <em>negatively</em> with the the second trial coming after it (at index $i + 2$). Hint: try to plot it!
</div>
YOUR ANSWER HERE
The trial-by-trial correlation structure in the design leads to a trial-by-trial correlation structure in the estimated patterns as well (as explained by [Soch et al., 2020](https://www.sciencedirect.com/science/article/pii/S1053811919310407)). We show this below by computing and visualizing the $N \times N$ correlation matrix of the patterns:
```
# Load in R_todo if you didn't manage to do the
# previous ToDo
R_todo = np.load('R_todo.npy')
# Compute the NxN correlation matrix
R_corr = np.corrcoef(R_todo)
fig, ax = plt.subplots(figsize=(8, 8))
mapp = ax.imshow(R_corr, vmin=-1, vmax=1)
# Some styling
ax.set_xticks(range(dm_todo.shape[1]))
ax.set_xticklabels(dm_todo.columns, rotation=90)
ax.set_yticks(range(dm_todo.shape[1]))
ax.set_yticklabels(dm_todo.columns)
cbar = plt.colorbar(mapp, shrink=0.825)
cbar.ax.set_ylabel('Correlation', fontsize=15, rotation=-90)
plt.show()
```
This correlation structure across trials poses a problem for representational similarity analysis (the topic of week 3) especially. Although this issue is still debated and far from solved, in this section we highlight two possible solutions to this problem: least-squares separate designs and temporal "uncorrelation".
### Least-squares separate (LSS)
The least-squares separate LSS) design is a slight modifcation of the LSA design ([Mumford et al., 2014](https://www.sciencedirect.com/science/article/pii/S105381191400768X)). In LSS, you fit a separate model per trial. Each model contains one regressor for the trial that you want to estimate and, for each condition in your experimental design (in case of a categorical design), another regressor containing all other trials.
So, suppose you have a run with 30 trials across 3 conditions (A, B, and C); using an LSS approach, you'd fit 30 different models, each containing four regressors (one for the single trial, one for all (other) trials of condition A, one for all (other) trials of condition B, and one for all (other) trials of condition C). The apparent upside of this is that it strongly reduces the collinearity of trials close in time, which in turn makes the trial parameters more efficient to estimate.
<div class='alert alert-info'>
<b>ToThink</b> (1 point): Suppose my experiment contains 90 stimuli which all belong to their own condition (i.e., there are 90 conditions). Explain why LSS provides no improvement over LSA in this case.
</div>
YOUR ANSWER HERE
We'll show this for our example data. It's a bit complicated (and not necessarily the best/fastest/clearest way), but the comments will explain what it's doing. Essentially, what we're doing, for each trial, is to extract that regressor for a standard LSA design and, for each condition, create a single regressor by summing all single-trial regressors from that condition together.
```
# First, well make a standard LSA design matrix
lsa_dm = make_first_level_design_matrix(
frame_times=t_fmri, # we defined this earlier for interpolation!
events=events_sim,
hrf_model='glover',
drift_model=None # assume data is already high-pass filtered
)
# Then, we will loop across trials, making a single GLM
lss_dms = [] # we'll store the design matrices here
# Do not include last column, the intercept, in the loop
for i, col in enumerate(lsa_dm.columns[:-1]):
# Extract the single-trial predictor
single_trial_reg = lsa_dm.loc[:, col]
# Now, we need to create a predictor per condition
# (one for A, one for B). We'll store these in "other_regs"
other_regs = []
# Loop across unique conditions ("A" and "B")
for con in np.unique(conditions):
# Which columns belong to the current condition?
idx = con == np.array(conditions)
# Make sure NOT to include the trial we're currently estimating!
idx[i] = False
# Also, exclude the intercept (last column)
idx = np.append(idx, False)
# Now, extract all N-1 regressors
con_regs = lsa_dm.loc[:, idx]
# And sum them together!
# This creates a single predictor for the current
# condition
con_reg_all = con_regs.sum(axis=1)
# Save for later
other_regs.append(con_reg_all)
# Concatenate the condition regressors (one of A, one for B)
other_regs = pd.concat(other_regs, axis=1)
# Concatenate the single-trial regressor and two condition regressors
this_dm = pd.concat((single_trial_reg, other_regs), axis=1)
# Add back an intercept!
this_dm.loc[:, 'intercept'] = 1
# Give it sensible column names
this_dm.columns = ['trial_to_estimate'] + list(set(conditions)) + ['intercept']
# Save for alter
lss_dms.append(this_dm)
print("We have created %i design matrices!" % len(lss_dms))
```
Alright, now let's check out the first five design matrices, which should estimate the first five trials and contain 4 regressors each (one for the single trial, two for the separate conditions, and one for the intercept):
```
fig, axes = plt.subplots(ncols=5, figsize=(15, 10))
for i, ax in enumerate(axes.flatten()):
plot_design_matrix(lss_dms[i], ax=ax)
ax.set_title("Design for trial %i" % (i+1), fontsize=20)
plt.tight_layout()
plt.show()
```
<div class='alert alert-warning'>
<b>ToDo</b> (optional; 1 bonus point): Can you implement an LSS approach to estimate our patterns on the real data? You can reuse the <tt>flm_todo</tt> you created earlier; the only thing you need to change each time is the design matrix. Because we have 40 trials, you need to fit 40 different models (which takes a while). Note that our experimental design does not necessarily have discrete categories, so your LSS design matrices should only have 3 columns: one for the trial to estimate, one for all other trials, and one for the intercept. After fitting each model, compute the trial-against-baseline contrast for the single trial and save the parameter ("beta") map. Then, after the loop, create the same pattern matrix as the previous ToDo, which should also have the same shape, but name it this time <tt>R_todo_lss</tt>. Note, this is a <em>very</em> hard ToDo, but a great way to test your programming skills :-)
</div>
```
''' Implement your ToDo here. Note that we already created the LSA design matrix for you. '''
func_img = nib.load(func)
n_vol = func_img.shape[-1]
lsa_dm = make_first_level_design_matrix(
frame_times=np.linspace(0, n_vol * 0.7, num=n_vol, endpoint=False),
events=events_df_filt,
drift_model=None
)
# YOUR CODE HERE
raise NotImplementedError()
''' Tests the above ToDo. '''
from niedu.tests.nipa.week_1 import test_lss
test_lss(R_todo_lss, func, flm_todo, lsa_dm, confs_df_filt)
```
<div class='alert alert-success'>
<b>Tip</b>: Programming your own pattern estimation pipeline allows you to be very flexible and is a great way to practice your programming skills, but if you want a more "pre-packaged" tool, I recommend the <a href="https://nibetaseries.readthedocs.io/en/stable/">nibetaseries</a> package. The package's name is derived from a specific analysis technique called "beta-series correlation", which is a type of analysis that allows for resting-state like connectivity analyses of task-based fMRI data (which we won't discuss in this course). For this technique, you need to estimate single-trial activity patterns — just like we need to do for pattern analyses! I've used this package to estimate patterns for pattern analysis and I highly recommend it!
</div>
### Temporal uncorrelation
Another method to deal with trial-by-trial correlations is the "uncorrelation" method by [Soch and colleagues (2020)](https://www.sciencedirect.com/science/article/pii/S1053811919310407). As opposed to the LSS method, the uncorrelation approach takes care of the correlation structure in the data in a post-hoc manner. It does so, in essence, by "removing" the correlations in the data that are due to the correlations in the design in a way that is similar to what prewhitening does in generalized least squares.
Formally, the "uncorrelated" patterns ($R_{\mathrm{unc}}$) are estimated by (matrix) multiplying the square root ($^{\frac{1}{2}}$) of covariance matrix of the LSA design matrix ($X^{T}X$) with the patterns ($R$):
\begin{align}
R_{\mathrm{unc}} = (X^{T}X)^{\frac{1}{2}}R
\end{align}
Here, $(X^{T}X)^{\frac{1}{2}}$ represents the "whitening" matrix which uncorrelates the patterns. Let's implement this in code. Note that we can use the `sqrtm` function from the `scipy.linalg` package to take the square root of a matrix:
```
from scipy.linalg import sqrtm
# Design matrix
X = dm_todo.to_numpy()
R_unc = (X.T @ X) @ R_todo
```
This uncorrelation technique is something we'll see again in week 3 when we'll talk about multivariate noise normalization!
Alright, that was it for this lab! We have covered the basics of experimental design and pattern estimation techniques for fMRI data. Note that there are many other (more advanced) things related to pattern estimation that we haven't discussed, such as standardization of patterns, multivariate noise normalization, [hyperalignment](https://www.sciencedirect.com/science/article/pii/S0896627311007811), etc. etc. Some of these topics will be discussed in week 2 (decoding) or week 3 (RSA).
| github_jupyter |
# Policy compared to Covid-19 Case Rate
All the typical caviates apply...
- for example testing goes up through time... so case rate is skewed through time
## Bring in df and aggregate to index by date for all of UK
```
import numpy as np
import pandas as pd
df = pd.read_csv('cases_analysis.csv')
df.drop(columns = ('Unnamed: 0'), inplace = True)
# make wrapped line plots of cases through time
# so to get uk overall, may need to do several aggregations and merge
# for now let's drop category data and will reattach later
#df = df[df.columns[0:13]]
#df = df.drop(df.iloc[:, 0:3], inplace = True, axis = 1)
#df1 = df.groupby('date').sum()
#df1.drop(df1.iloc[:, 1:2], inplace = True, axis=1)
#df1.drop(columns = ['days_since_first', 'case_rate_in_2_weeks', 'case_rate_on_day', ''])
# nope just gonna start from scratch wayyy easier that way
df = pd.read_csv('UK_cases.csv')
df.drop(columns=['Area name', 'Area code', 'Area type'], inplace=True)
df['Date'] = pd.to_datetime(df['Date'])
df = df.sort_values(by='Date')
# create daily lab confirmed cases
df['Cumulative lab-confirmed cases rate'] = df['Cumulative lab-confirmed cases rate'].astype(float)
df['Daily_lab_confirmed_case_rate'] = df['Cumulative lab-confirmed cases rate'].diff()
df = df.dropna()
# now plot date anf Daily lab_confirmed_case_rate
df = df.set_index('Date')
df['Daily_lab_confirmed_case_rate'].plot()
# create 7 day rolling ave
df['7_day_rolling_ave'] = df['Daily_lab_confirmed_case_rate'].rolling(window=5).mean()
df = df.dropna()
df['7_day_rolling_ave'] = df['7_day_rolling_ave'].astype(float)
df['7_day_rolling_ave'].plot()
# looks nice and smooth now
# now want to put on a zero scale i.e. from one day to the next what is the difference?
# does case rate go up or down? (on 7 day rolling ave)
# I want the % change not the difference, that way it's relative
df['pctchange_in_case_rate'] = df['7_day_rolling_ave'].pct_change()
df = df.dropna()# drop first row
df = df.drop(df.index[0])
# so - means case rate goes down, + means case rate goes up
# now want a % change from current day rolling ave to rolling ave in 2 weeks
df1 = df[['7_day_rolling_ave']]
df['pctchange_in_case_rate'].plot()
# + means cases going up
# then adapt this to a column to line up with 2 weeks in the future
# add date for 2 weeks in future
df1 = df1.reset_index()
from datetime import timedelta
df1['Date_in_2weeks'] = df1['Date'] + timedelta(days=14)
temp = df1[['Date', '7_day_rolling_ave']]
temp.rename(columns = {'7_day_rolling_ave': '7_day_rolling_ave_in_2weeks'}, inplace=True)
df1 = pd.merge(df1, temp, how='left', left_on=('Date_in_2weeks'), right_on=('Date'))
#what's the % change in rolling ave case rate today and two weeks from now
# two weeks from now did case rate go up or down
df1 = df1.dropna()
# now % change in rolling average from current day to two weeks from now
#df1['pct_change_between_now_and_two_weeks'] = df1[['7_day_rolling_ave', '7_day_rolling_ave_in_2weeks']].pct_change(axis=1)
df1['pct_change_between_now_and_two_weeks'] = df1[['7_day_rolling_ave', '7_day_rolling_ave_in_2weeks']].apply(lambda row: (row.iloc[0]-row.iloc[1])/row.iloc[0]*100, axis=1)
# make % change + means increase in number of cases - means decrease in number of cases
df1['pct_change_between_now_and_two_weeks'] = df1['pct_change_between_now_and_two_weeks'] * -1
# making sure 2week movement worked
df1[df1['Date_x'] == '2020-03-16']
# so basically a massive increase in percent change increase at start and after april may start to get the first dips in
#covid cases i.e. if cases in two weeks were to be less than today there would be a negative - number
# percent change between day and two weeks from day
# so do covid cases go up or down based on policies
df1 = df1[['Date_x', 'Date_in_2weeks', '7_day_rolling_ave_in_2weeks','pct_change_between_now_and_two_weeks']]
df = df.reset_index()
df = pd.merge(df, df1, how='left', left_on=('Date'), right_on='Date_x')
df = df.drop(columns = ['Date_x'])
policy = pd.read_csv('Policy_for_analysis.csv')
policy = policy.drop(columns = ['Introduced by'])
policy['Date'] = pd.to_datetime(policy['Date'])
policy = policy.rename(columns = {'Date':'Date_1'})
df = pd.merge(df, policy, how='left', left_on=('Date'), right_on=('Date_1'))
#df = df.drop(columns = ['Date_y'])
# if want just set rows then needs to
policy2 = policy.drop_duplicates(['Date_1'])
df_left = pd.merge(df,policy2.drop_duplicates(),how='left', left_on=('Date'), right_on=('Date_1'))
df
df.isnull().sum()
# so last two weeks and 118 days where no policies eneacted
# there's also the issue of if two or more policies are on the same day, only the first is enacted
```
# What the XGBoost Model Showed
We'll use these F-values to determine the policies to examine as they have the greatest correlation with predicting COVID cases..
1. Deemed significant by policy tracker
2. Social Distancing Measures
2. Testing, surveillance and contact tracing
3. Infection prevention and control
```
df.Category.unique()
# get ave % change in case rate for each catergory
SDM = df[df['Category']=='Social distancing measures']
TSC = df[df['Category']=='Testing, surveillance and contact tracing']
IPC = df[df['Category']=='Infection prevention and control']
# i.e. with such and such policy what is the percent change in confirmed COVID case rate
```
### Deemed significant by tracker
### Social Distancing Measures
```
print('Number of policies', len(SDM))
print(SDM['pct_change_between_now_and_two_weeks'].describe()) # so still an average increase in case rate
SDM['pct_change_between_now_and_two_weeks'].median() # but in general these policys lead to a decrease in case rate
# 23% reduction in case rate if a policy of this type is enacted
Temp = SDM[['Date','Policy','pct_change_between_now_and_two_weeks']].nsmallest(10, 'pct_change_between_now_and_two_weeks')
Policy_list = list(Temp['Policy'].values)
print(Policy_list)
print("key dates seem to be the 21st and 22nd as one group, 03/04 as another, and 13/14th")
# seem to be clusters of dates, could do some unsupervised clustering to get key dates of policy
Temp
```
### Testing, surveillance and contact tracing
```
print('Number of policies', len(TSC))
print(TSC['pct_change_between_now_and_two_weeks'].describe()) # so still an average increase in case rate #ave is higher
TSC['pct_change_between_now_and_two_weeks'].median() # but in general these policys lead to a decrease in case rate
# 26% reduction in case rate if a policy of this type is enacted
# so what were the actual policies?
# can check here to see if other policies on that day
# can also throw in policies from day before and after as resonable to assume influential / relevent
# Top ten policies correlated with drop in case rate
#TSC.nsmallest(10, 'pct_change_between_now_and_two_weeks')
Temp = TSC[['Date','Policy','pct_change_between_now_and_two_weeks']].nsmallest(10, 'pct_change_between_now_and_two_weeks')
Policy_list = list(Temp['Policy'].values)
print(Policy_list)
print("key dates seem to be the 21st and 22nd as one group, 03/04 as another, and 13/14th")
# seem to be clusters of dates, could do some unsupervised clustering to get key dates of policy
Temp
```
### Infection Prevention and Control
```
print('Number of policies', len(IPC))
print(IPC['pct_change_between_now_and_two_weeks'].describe()) # so still an average increase in case rate #ave is higher
IPC['pct_change_between_now_and_two_weeks'].median()
# fairly minimal reduction of 8%
Temp = IPC[['Date','Policy','pct_change_between_now_and_two_weeks']].nsmallest(10, 'pct_change_between_now_and_two_weeks')
Policy_list = list(Temp['Policy'].values)
print(Policy_list)
print("key dates seem to be the 21st and 22nd as one group, 03/04 as another, and 13/14th")
# seem to be clusters of dates, could do some unsupervised clustering to get key dates of policy
Temp
```
### What were the main dates in curbing the spread?
```
df_single = df.drop_duplicates(['Date'])
df_single[['Date', 'pct_change_between_now_and_two_weeks']].nsmallest(30, 'pct_change_between_now_and_two_weeks')
```
Key Dates of Decrease
1. 2020-05-19th to 27th
2. 2020-05-13th to 19th
3. 2020-05-03rd to 5th
4. 2020-06-15th to 19th
5. 2020-04-26th to 30th
```
# corresponding policies
df_sig1 = df[(df['Date'] >= '2020-05-13') & (df['Date'] <= '2020-05-27')]
print(df_sig1.shape)
df_sig2 = df[(df['Date'] >= '2020-05-03') & (df['Date'] <= '2020-05-05')]
print(df_sig2.shape)
df_sig3 = df[(df['Date'] >= '2020-06-15') & (df['Date'] <= '2020-06-19')]
print(df_sig3.shape)
df_sig4 = df[(df['Date'] >= '2020-06-26') & (df['Date'] <= '2020-06-30')]
print(df_sig4.shape)
df_sig = df_sig1.append(df_sig2, ignore_index = True)
df_sig = df_sig.append(df_sig3, ignore_index = True)
df_sig = df_sig.append(df_sig4, ignore_index = True)
SDM_main = SDM.nsmallest(10, ['pct_change_between_now_and_two_weeks'])
TSC_main = TSC.nsmallest(10, ['pct_change_between_now_and_two_weeks'])
IPC_main = IPC.nsmallest(10, ['pct_change_between_now_and_two_weeks'])
df_sig = df_sig.append(SDM_main, ignore_index = False)
df_sig = df_sig.append(TSC_main, ignore_index = False)
df_sig = df_sig.append(IPC_main, ignore_index = False)
df_sig.drop_duplicates(['Policy'], inplace = True)
df_sig.Category.value_counts()
```
### Is There a sig difference between these event types and the norm?
```
# now do ANOVA and compar to norm
# do ANOVA of three events and the remaining values
import researchpy as rp
import seaborn as sns
sns.boxplot(x=boston_df['DIS'])
```
## Significant Policy List
```
# make csv, with date, policy, category, and order of importance
df_sig.to_csv('significant_policies.csv')
```
| github_jupyter |
<h1>Logistic Regression</h1>
Notebook Goals
* Learn how to create a logistic regression model using scikit-learn
<h2> What are some advantages of logistic regression?</h2>
How do you create a logistic regression model using Scikit-Learn? The first thing you need to know is that despite the name logistic regression containing the word regression, logistic regression is a model used for classification. Classification models can be used for tasks like classifying flower species or image recognition. All of this of course depends on the availability and quality of your data. Logistic Regression has some advantages including
* Model training and predictions are relatively fast
* No tuning is usually needed for logistic regression unless you want to regularize your model.
* Finally, it can perform well with a small number of observations.
<h2> Import Libraries</h2>
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from IPython.display import Video
from matplotlib.ticker import FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
```
## Load the Dataset
The Iris dataset is one of datasets scikit-learn comes with that do not require the downloading of any file from some external website. The code below loads the iris dataset.
```
df = pd.read_csv('data/modifiedIris2Classes.csv')
df.head()
```
<h2> Remove Missing or Impute Values </h2>
If you want to build models with your data, null values are (almost) never allowed. It is important to always see how many samples have missing values and for which columns.
```
# Look at the shape of the dataframe
df.shape
# There is a missing value in the Length column which is a feature
df.isnull().sum()
```
<h2> Train Test Split </h2>
```
X_train, X_test, y_train, y_test = train_test_split(df[['petal length (cm)']], df['target'], random_state=0)
```
<h2> Standardize the Data</h2>
Logistic Regression is effected by scale so you need to scale the features in the data before using Logistic Regresison. You can transform the data onto unit scale (mean = 0 and variance = 1) for better performance. Scikit-Learn's `StandardScaler` helps standardize the dataset’s features. Note you fit on the training set and transform on the training and test set.
```
scaler = StandardScaler()
# Fit on training set only.
scaler.fit(X_train)
# Apply transform to both the training set and the test set.
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
```
<h2>Logistic Regression</h2>
<b>Step 1:</b> Import the model you want to use
In sklearn, all machine learning models are implemented as Python classes
```
# This was already imported earlier in the notebook so commenting out
#from sklearn.linear_model import LogisticRegression
```
<b>Step 2:</b> Make an instance of the Model
This is a place where we can tune the hyperparameters of a model. Typically this is where you tune C which is related to regularization
```
clf = LogisticRegression()
```
<b>Step 3:</b> Training the model on the data, storing the information learned from the data
Model is learning the relationship between x (features sepal width, sepal height etc) and y (labels-which species of iris)
```
clf.fit(X_train, y_train)
```
<b>Step 4:</b> Predict the labels of new data (new flowers)
Logistic regression also allows you to see prediction probabilities as well as a prediction. This is not like other algorithms like decision trees for classification which only give you a prediction not a probability.
```
# One observation's petal length after standardization
X_test[0].reshape(1,-1)
print('prediction', clf.predict(X_test[0].reshape(1,-1))[0])
print('probability', clf.predict_proba(X_test[0].reshape(1,-1)))
```
If this is unclear, let's visualize how logistic regression makes predictions by looking at our test data!
```
example_df = pd.DataFrame()
example_df.loc[:, 'petal length (cm)'] = X_test.reshape(-1)
example_df.loc[:, 'target'] = y_test.values
example_df['logistic_preds'] = pd.DataFrame(clf.predict_proba(X_test))[1]
example_df.head()
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10,7));
virginicaFilter = example_df['target'] == 1
versicolorFilter = example_df['target'] == 0
ax.scatter(example_df.loc[virginicaFilter, 'petal length (cm)'].values,
example_df.loc[virginicaFilter, 'logistic_preds'].values,
color = 'g',
s = 60,
label = 'virginica')
ax.scatter(example_df.loc[versicolorFilter, 'petal length (cm)'].values,
example_df.loc[versicolorFilter, 'logistic_preds'].values,
color = 'b',
s = 60,
label = 'versicolor')
ax.axhline(y = .5, c = 'y')
ax.axhspan(.5, 1, alpha=0.05, color='green')
ax.axhspan(0, .4999, alpha=0.05, color='blue')
ax.text(0.5, .6, 'Classified as viginica', fontsize = 16)
ax.text(0.5, .4, 'Classified as versicolor', fontsize = 16)
ax.set_ylim(0,1)
ax.legend(loc = 'lower right', markerscale = 1.0, fontsize = 12)
ax.tick_params(labelsize = 18)
ax.set_xlabel('petal length (cm)', fontsize = 24)
ax.set_ylabel('probability of virginica', fontsize = 24)
ax.set_title('Logistic Regression Predictions', fontsize = 24)
fig.tight_layout()
```
<h2> Measuring Model Performance</h2>
While there are other ways of measuring model performance (precision, recall, F1 Score, ROC Curve, etc), let's keep this simple and use accuracy as our metric.
To do this are going to see how the model performs on new data (test set)
Accuracy is defined as:
(fraction of correct predictions): correct predictions / total number of data points
```
score = clf.score(X_test, y_test)
print(score)
```
Accuracy is one metric, but it doesn't say give much insight into what was wrong. Let's look at a confusion matrix
```
cm = metrics.confusion_matrix(y_test, clf.predict(X_test))
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True,
fmt=".0f",
linewidths=.5,
square = True,
cmap = 'Blues');
plt.ylabel('Actual label', fontsize = 17);
plt.xlabel('Predicted label', fontsize = 17);
plt.title('Accuracy Score: {}'.format(score), size = 17);
plt.tick_params(labelsize= 15)
```
<h2>What went wrong with the confusion matrix? It looks bad!</h2>
```
cm = metrics.confusion_matrix(y_test, clf.predict(X_test))
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True,
fmt=".0f",
linewidths=.5,
square = True,
cmap = 'Blues');
plt.ylabel('Actual label', fontsize = 17);
plt.xlabel('Predicted label', fontsize = 17);
plt.title('Accuracy Score: {}'.format(score), size = 17);
plt.tick_params(labelsize= 15)
# You can comment out the next 4 lines if you like
b, t = plt.ylim() # discover the values for bottom and top
b += 0.5 # Add 0.5 to the bottom
t -= 0.5 # Subtract 0.5 from the top
plt.ylim(b, t) # update the ylim(bottom, top) values
```
Let's look at the same information in a table in a clearer way.
```
# ignore this code
modified_cm = []
for index,value in enumerate(cm):
if index == 0:
modified_cm.append(['TN = ' + str(value[0]), 'FP = ' + str(value[1])])
if index == 1:
modified_cm.append(['FN = ' + str(value[0]), 'TP = ' + str(value[1])])
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=np.array(modified_cm),
fmt="",
annot_kws={"size": 20},
linewidths=.5,
square = True,
cmap = 'Blues',
xticklabels = ['versicolor', 'viginica'],
yticklabels = ['versicolor', 'viginica'],
);
plt.ylabel('Actual label', fontsize = 17);
plt.xlabel('Predicted label', fontsize = 17);
plt.title('Accuracy Score: {:.3f}'.format(score), size = 17);
plt.tick_params(labelsize= 15)
# You can comment out the next 4 lines if you like
b, t = plt.ylim() # discover the values for bottom and top
b += 0.5 # Add 0.5 to the bottom
t -= 0.5 # Subtract 0.5 from the top
plt.ylim(b, t) # update the ylim(bottom, top) values
```
Notice that the score stops improving after a certain number of estimators (decision trees). One way to get a better score would be to include more features in the features matrix.
## Common questions
<h3>What would happen if you change the prediction threshold from .5 for picking a positive class</h3>
By default, and with respect to the underlying assumptions of logistic regression, we predict a positive class when the probability of the class is greater than .5 and predict a negative class otherwise.
If you changed the prediction threshold from .5 to .2, you would predict more true positives but fewer true negatives. You can see this clearly using <a href="http://mfviz.com/binary-predictions/">this visual by Michael Freeman.</a>
<h3>What is the effect of changing the hyperparameter C?</h3>
Looking at the effect of increasing C if you have `l1` regularization. Smaller values specify stronger regularization. The code below shows this for the Wisconsin breast cancer dataset in an effort to mimic Michael Freeman's visualization
See the following file to look at the effect of changing C
```
#Video('imagesanimation/effectOfCLogisticRegression.mp4')
df = pd.read_csv('data/wisconsinBreastCancer.csv')
# Same code was earlier in notebook, but here for clarity
# The rest of the lines in this sectionis just code I used to make the animation above
col_names = ['worst_concave_points']
X = df[col_names].values.reshape(-1,1)
y = df['diagnosis']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X,
y,
random_state = 0)
# Standardize Data
scaler = StandardScaler()
# Fit on training set only.
scaler.fit(X_train)
# Apply transform to both the training set and the test set.
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
for index,c in enumerate(np.linspace(-3, 3, num = 25)):
c_value = 10**c
c_value_str = "{0:0.3f}".format(c_value)
# Keep in mind that there is l2 penalty by default like we have for ridge regression
logreg = LogisticRegression(C = c_value)
logreg.fit(X_train, y_train)
example_df = pd.DataFrame()
example_df.loc[:, 'worst_concave_points'] = X_train.reshape(-1)
example_df.loc[:, 'diagnosis'] = y_train.values
example_df['logistic_preds'] = pd.DataFrame(logreg.predict_proba(X_train))[1]
example_df = example_df.sort_values(['logistic_preds'])
plt.scatter(example_df['worst_concave_points'], example_df['diagnosis'])
plt.plot(example_df['worst_concave_points'], example_df['logistic_preds'].values, color='red')
plt.ylabel('malignant (1) or benign (0)', fontsize = 13)
plt.xlabel('worst_concave_points', fontsize = 13)
plt.title("Logistic Regression (L1) C = " + str(c_value_str), fontsize = 15)
plt.savefig('imagesanimation/' + 'initial' + str(index).zfill(4) + '.png', dpi = 100)
plt.cla()
```
<h3>What is the effect of regularization on accuracy?</h3>
You can look at the video imagesanimation2/logisticRegularizationEffectAccuracy.mp4
```
# Same code was earlier in notebook, but here for clarity
col_names = ['worst_concave_points']
X = df[col_names].values.reshape(-1,1)
y = df['diagnosis']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X,
y,
random_state = 0)
# Standardize Data
scaler = StandardScaler()
# Fit on training set only.
scaler.fit(X_train)
# Apply transform to both the training set and the test set.
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
model_list, coef_list, c_value_list, accuracy_list, example_df_list = [], [], [], [], []
for index,c in enumerate(np.linspace(-3, 3, num = 25)):
c_value = 10**c
c_value_str = "{0:0.3f}".format(c_value)
# Keep in mind that there is l2 penalty by default like we have for ridge regression
logreg = LogisticRegression(C = c_value,
penalty = 'l1',
solver = 'saga',
max_iter = 100000)
logreg.fit(X_train, y_train)
# Subplot (top)
example_df = pd.DataFrame()
example_df.loc[:, 'worst_concave_points'] = X_train.reshape(-1)
example_df.loc[:, 'diagnosis'] = y_train.values
example_df['logistic_preds'] = pd.DataFrame(logreg.predict_proba(X_train))[1]
example_df = example_df.sort_values(['logistic_preds'])
example_df_list.append(example_df)
model_list.append(logreg)
accuracy_list.append(logreg.score(X_test, y_test))
coef_list.append(logreg.coef_[0])
c_value_list.append(c_value)
temp_df = pd.DataFrame(coef_list, index = c_value_list, columns = col_names)
temp_df.loc[:, 'model'] = model_list
# Giving the index a name (it is not a column)
temp_df.index.name = 'C (Inverse of Regularization Strength)'
for index, (c_value,example_df) in enumerate(zip(c_value_list, example_df_list)):
c_value_str = "{0:0.3f}".format(c_value)
fig, axes = plt.subplots(nrows = 2,
ncols = 1,
figsize = (12, 7));
# Just formatting, not relevant for this class
fig.subplots_adjust(wspace=0.1, hspace = .55)
"""
fig.suptitle("Logistic Regression (L1) C = " + str(c_value_str),
fontsize = 15,
y=.94)
"""
# Code is just to make it so you have different colors in the "title"
# https://stackoverflow.com/questions/9350171/figure-title-with-several-colors-in-matplotlib
fig.text(0.45,
0.92,
"Logistic Regression (L1) C = ",
ha="center",
va="bottom",
size=20,
color="black")
fig.text(0.68,
0.92,
str(c_value_str),
ha="center",
va="bottom",
size=20,
color="purple",)
axes[0].scatter(example_df['worst_concave_points'], example_df['diagnosis'])
axes[0].plot(example_df['worst_concave_points'], example_df['logistic_preds'].values, color='red')
axes[0].set_ylabel('malignant (1) or benign (0)', fontsize = 13)
axes[0].set_xlabel('worst_concave_points', fontsize = 11)
axes[1].plot(temp_df.index,
temp_df.loc[:, 'worst_concave_points'],
label = 'worst_concave_points',
color = 'purple');
axes[1].axvspan(c_value - c_value/10,c_value + c_value/10, color='orange', alpha=0.3, zorder = 1);
coefLimits = temp_df.min().min(), temp_df.max().max()
accuracyLimits = min(accuracy_list), max(accuracy_list)
axes[1].tick_params('y', colors='purple');
axes[1].set_ylim(coefLimits)
axes[1].set_yticks(np.linspace(coefLimits[0],coefLimits[1], 11))
axes[1].set_xscale('log')
axes[1].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
axes[1].set_ylabel('weights', color='purple', fontsize = 13)
axes[1].set_xlabel('C', fontsize = 11)
axesTwin=axes[1].twinx()
axesTwin.plot(temp_df.index, accuracy_list, color = 'g')
axesTwin.tick_params('y', colors='g');
axesTwin.set_ylim(accuracyLimits)
axesTwin.set_yticks(np.linspace(accuracyLimits[0],accuracyLimits[1], 11))
axesTwin.set_ylabel('Accuracy', color='g', fontsize = 13);
axes[1].grid();
###
fig.savefig('imagesanimation2/' + 'initial' + str(index).zfill(4) + '.png', dpi = 100)
# If you are really curious, I can share how this works.
#!ffmpeg -framerate 1 -i 'initial%04d.png' -c:v libx264 -r 30 -pix_fmt yuv420p initial_002.mp4
```
| github_jupyter |
## <font color=black>sutils</font>
**Change default region**:
Use the `sutils.reset_profiles()` method and a prompt will appear with options and ask you to select a default region and AMI.
Use the `price_increase` argument to set the maximum bid for each instance. This number will multiple the lowest spot-instance cost such that, for a spot-instance with a base price of \$0.3 with a `price_increase=1.15`, the maximum bid for that instance type would be set at $0.345.
Sometimes your price will still be too low, in that case you will have to use `sutils.reset_profiles()` again with a higher price increase.
```
from spot_connect import sutils
first_profile_b4_change = sutils.load_profiles()['t2.micro']
# Use the reset_profiles command to change the default region and AMI.
sutils.reset_profiles(price_increase=1.15)
print('\nFirst profile before change')
print(first_profile_b4_change)
print('\nFirst profile after change')
print(sutils.load_profiles()['t2.micro'])
```
**Show all profiles**:
```
sutils.load_profiles()
```
## spotted
**SpotInstance class**
The spot instance class is the main feature in `spotted`. Use the class to specify instance configurations and bid on, launch, and connect to spot-instances.
<font color=red>**Warning: the following examples will create live instances on your account. These examples are cheap but they are not free, make sure to terminate them at the end of this notebook.**</font>
```
from spot_connect import spotted
instance = spotted.SpotInstance('monitor', profile='t2.micro')
```
You should now be able to see a live instance in your console as shown in the image below (in the photo the instance name is "instance1"). The `spot-connect` module automatically creates a new security group for each instance with that instance's name. This is so you can identify the instance name quickly (red square below).
<img src='media/live_instance.png'>
Try connecting a live prompt to that instance. Open a command prompt, use `spot_connect instance1` and you will be connected to the instance you just created. This spot-instance is cheap but it is not free. Don't forget to terminate it when you're done.
## instance_manager
**InstanceManager class**:
The instance manager class lets you handle spot instances and access other module functionality directly.
```
from spot_connect import instance_manager
im = instance_manager.InstanceManager()
```
**Launch/re-connect to instance:**
Launch instances directly using the `InstanceManager`. If you use the `launch_instance` command with the name of an instance that is already online it will simply reconnect to that instance as well as add it to `InstanceManager.instances`.
```
im.launch_instance('monitor', profile='t2.micro')
```
**You can now find this instance in the instance manager's list of instances**:
```
im.show_instances()
```
**Run commands**:
We can use the `run` command to execute commands on any instance from the notebook. Use the `cmd=True` option to submit a command.
```
im.instances['monitor'].run('pwd', cmd=True)
```
**Terminate an instance**:
```
im.terminate('monitor')
```
| github_jupyter |
```
import os
import pandas as pd
filepath_old = '/media/sf_VBox_Shared/Arabic/Fiqh/2018-04-24-Fiqh/Fiqh'
filepath_new = '/media/sf_VBox_Shared/Arabic/Fiqh/2018-06-08-Fiqh/'
def get_metadata(filepath):
metadata_dict = {}
for filename in os.listdir(filepath):
with open(os.path.join(filepath, filename)) as f:
metadata = {}
for line in f.readlines():
# TODO: metadata is sometimes inconsistent, (missing # before META,
# and fields not separated by :: but single :)
if line.startswith('#META#'):
splitted = line.split(u'::')
if(len(splitted)==2):
name, value = line.split(u'::')
value = value.strip()
name = name.strip()
# only save metadata that has a value
#if value != 'NODATA':
_, name = name.split(u' ', 1)
name = name.replace(u' ', u'_')
# remove left to right mark
name = name.replace(u"\u200F", u'')
name = name.split(u'.')[-1]
metadata[name] = value
metadata_dict[filename] = metadata
return metadata_dict
metadata_old = get_metadata(filepath_old)
metadata_new = get_metadata(filepath_new)
metadata_old_df = pd.DataFrame.from_dict(metadata_old, orient='index')
metadata_new_df = pd.DataFrame.from_dict(metadata_new, orient='index')
metadata_old_df.index.name = 'filename'
metadata_new_df.index.name = 'filename'
metadata_df.to_csv(os.path.join(filepath, 'metadata-from-files.csv'))
metadata_old_df.info()
metadata_new_df.info()
metadata_old_df.head()
metadata_new_df.head()
for col in metadata_old_df.columns:
if col in metadata_new_df.columns:
print(col, len(set(metadata_old_df[col]).intersection(set(metadata_new_df[col]))))
metadata_old_df.LibURI.value_counts().head()
metadata_new_df.LibURI.value_counts().head()
metadata_old_df.SortField.value_counts().head(), metadata_new_df.SortField.value_counts().head()
metadata_old_df.groupby(['SortField', 'BookTITLE']).size().sort_values(ascending=False).head()
title_to_filename = metadata_old_df[['SortField', 'BookTITLE']].reset_index()
print(len(title_to_filename))
print(title_to_filename[['SortField', 'BookTITLE']].drop_duplicates().shape)
merged = metadata_new_df.reset_index().merge(title_to_filename, left_on=['SortField', 'BookTITLE'], right_on=['SortField', 'BookTITLE'], how='left', suffixes=('_new', '_old'))
merged['filename_old']
merged.filename_old.value_counts().head()
merged[merged.filename_old.isnull()]
merged = merged.dropna(subset=['filename_old'])
merged.to_csv('/media/sf_VBox_Shared/Arabic/Fiqh/merged_metadata.csv', index=False)
```
| github_jupyter |
# USGS Historical Earthquake Events
I'm querying the US Geological Service Common Catalog (ComCat) through their API [here](https://github.com/usgs/libcomcat). It works with bounding boxes, not particular countries, so ran three different downloads for bounding boxes around the Lower 48 states, Alaska, and Hawaii. I'll combine them into a single dataframe, and then filter them by county.
```
import pandas as pd
import geopandas
import matplotlib.pyplot as plt
```
# API Queries
```
## Query generator
# Lower 48 USA
lon = (-125.0011,-66.9326)
lat = (24.9493, 49.5904)
start = '1996-01-01'
end = '2019-01-01'
name = 'lower48.csv'
minmag = 4
maxmag = 9.9
# Remove '-x' to download results, leave it there to get record count.
print(f'getcsv {name} -b {lon[0]} {lon[1]} {lat[0]} {lat[1]} -s {start} -e {end} -f csv -x -m {minmag} {maxmag}')
## Query generator
# Alaska
lon = (-179.1505,-129.9795)
lat = (51.2097, 71.4410)
start = '1996-01-01'
end = '2019-01-01'
name = 'alaska.csv'
minmag = 4
maxmag = 9.9
# Remove '-x' to download results, leave it there to get record count.
print(f'getcsv {name} -b {lon[0]} {lon[1]} {lat[0]} {lat[1]} -s {start} -e {end} -f csv -x -m {minmag} {maxmag}')
## Query generator
# Hawaii
lon = (-160.2471,-154.8066)
lat = (18.9117, 22.2356)
start = '1996-01-01'
end = '2019-01-01'
name = 'hawaii.csv'
minmag = 4
maxmag = 9.9
# Remove '-x' to download results, leave it there to get record count.
print(f'getcsv {name} -b {lon[0]} {lon[1]} {lat[0]} {lat[1]} -s {start} -e {end} -f csv -x -m {minmag} {maxmag}')
```
## Combinining datasets
```
lower48 = pd.read_csv('../data_input/5_USGS_quakes/lower48.csv')
alaska = pd.read_csv('../data_input/5_USGS_quakes/alaska.csv')
hawaii = pd.read_csv('../data_input/5_USGS_quakes/hawaii.csv')
print(lower48.shape, alaska.shape, hawaii.shape)
quakes = pd.concat([lower48, alaska, hawaii], ignore_index=True)
print(quakes.shape)
quakes.head(2)
```
# Map earthquakes to counties
That is, assign the correct county to each earthquake.
```
# Import a shape file with all the counties in the US.
# Note how it doesn't include all the same territories as the
# quake contour map.
counties = geopandas.read_file('../data_input/1_USCounties/')
# Turn state codes from strings to integers
for col in ['STATE_FIPS', 'CNTY_FIPS', 'FIPS']:
counties[col] = counties[col].astype(int)
print(counties.shape)
counties.head()
print(quakes_coords.shape)
quakes_coords.head(2)
# Create geoDF of all the points
quakes_coords = geopandas.GeoDataFrame(
quakes, geometry=geopandas.points_from_xy(quakes.longitude, quakes.latitude))
# Mark those points with their respective counties, keeping the point coordinates
quakes_county = geopandas.sjoin(quakes_coords, counties, how='left', op='within').dropna()
print(quakes_county.shape)
quakes_county.head(2)
# Make FIPS codes back into integers
quakes_county['FIPS'] = quakes_county['FIPS'].astype(int)
# Extract year as its own column
quakes_county['year'] = [t.year for t in pd.to_datetime(quakes_county['time'])]
# Trim unnecessary columns
quakes_county = quakes_county[['FIPS','year','magnitude','geometry']]
print(quakes_county.shape)
quakes_county.head(2)
```
# Plots
```
# These are all the earthquakes in the bounding boxes for the lower 48, Alaska, and Hawaii
fig, ax = plt.subplots(figsize=(20,20))
counties.plot(ax=ax, color='white', edgecolor='black');
quakes_coords.plot(ax=ax, marker='o')
plt.show()
#And the same, but trimmed to only show the earthquakes that happened within the county boundaries.
fig, ax = plt.subplots(figsize=(20,20))
counties.plot(ax=ax, color='white', edgecolor='black');
quakes_county.plot(ax=ax, marker='o')
plt.show()
# And just California
fig, ax = plt.subplots(figsize=(10,10))
counties.plot(ax=ax, color='white', edgecolor='black');
quakes_county.plot(ax=ax, marker='o')
ax.set_xlim(-125,-114)
ax.set_ylim(32,42.1)
plt.show()
# Write to shape file
quakes_county.to_file("../data_output/5__USGS_quakes/quakes1.geojson",
driver='GeoJSON')
```
# Map counties to earthquakes
That is, count how many occurred in each county and add it to the pre-existing list of natural disasters by county (the NOAA dataset).
```
noaa = pd.read_csv('../data_output/5__NOAA/noaa_2.csv')
print(noaa.shape)
noaa.head()
# Organize our earthquake events into a table indexed by FIPS and year.
df1 = quakes_county.drop(columns='geometry').groupby(['FIPS','year']).count().unstack(fill_value=0)
print(df1.shape)
df1.head()
# Merge with the official county list so that all the counties are
# represented in the index even if no earthquakes happened there in
# a given year. Unstack and reset index
official_county_list = sorted(counties['FIPS'].tolist())
df2 = df1.reindex(official_county_list, fill_value=0)
print(df2.shape)
df2.head()
# Unstack and reset the index, so that we turn the indexes into columns
# and fill in all the missing values.
df3 = df2.unstack().reset_index()
# Cleanup columns and their names
df3 = df3.rename(columns={0:'earthquakes'}).drop(columns='level_0')
print(df3.shape)
df3.head()
# Now the data are organized by year and FIPS in the same order as they were
# in the NOAA dataset, so that they can be integrated seamlessly.
noaa['earthquakes'] = df3['earthquakes']
print(noaa.shape)
noaa.head()
# Here are the total events for the last 5 years.
noaa.groupby(['year']).sum().tail()
# Export!
noaa.to_csv('../data_output/5_USGS__quakes/noaa_plus_quakes.csv', index=False)
```
| github_jupyter |
```
import copy
from collections import deque
from rdkit.Chem.Draw import IPythonConsole
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from rdkit import Chem
from rdkit.Chem import RWMol
from enviroment.ChemEnv import ChemEnv
from enviroment.Utils import mol_to_graph_full
from Rewards.rewards import SizeReward, SingleReward, FinalRewardModule
from models import BaseLine
from rdkit import Chem
from rdkit.Chem import rdBase
from rdkit.Chem import Draw
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw import IPythonConsole
from IPython.display import SVG
import networkx as nx
from networkx.readwrite import cytoscape_data
import cyjupyter
from cyjupyter import Cytoscape
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds import rdScaffoldNetwork
from urllib import parse
writer = SummaryWriter(f'./logs_test/_logs/tb')
reward_module = FinalRewardModule(writer, [SizeReward()])
env = ChemEnv(54,reward_module,mol_to_graph_full,writer)
class MolTree():
"""Class for holding molecular iteration tree
"""
def __init__(self,root_mol: Chem.RWMol, idx: int):
"""init fn
Args:
root_mol (Chem.RWMol): the molecule to use as the node
"""
self.root_mol = root_mol
self.idx = idx
self.children = []
def addChild(self, mol: Chem.RWMol):
"""add a child molecule
Args:
mol (Chem.RWMol): [description]
"""
child = MolTree(mol)
self.children.append(child)
def addChildren(self, mols: 'list[Chem.RWMol]', i: int):
"""adds children molecules
Args:
mols (list[Chem.RWMol]): mols to add
i (int): starting idx for node numbering
Returns:
int: number of children added
"""
# self.children += list(map(lambda mol: MolTree(mol), mols))
for j,mol in enumerate(mols):
self.children.append(MolTree(mol,i+j))
return len(self.children)
class Handler():
"""Class for handling model. inference and that sort of stuff"""
def __init__(self, path: str, model: nn.Module, env: ChemEnv):
"""create handler insance
Args:
path (str): path to saved model
model (nn.Module): model for params to be loaded into
env ([type]): Chem environment
"""
self.model = model
# self.model.load(path)
self.env = env
def __get_n_best(self,mol: Chem.RWMol, n: int):
"""gets the top n most likely actions given mol
Args:
mol (Chem.RWMol): mol to set as state
n (int): number of actions to return
Returns:
Torch.tensor: tensor containing the actions
"""
# mol = Chem.RWMol(Chem.MolFromSmiles('CC-N'))
self.env.assignMol(mol)
obs = self.env.getObs()
predictions = self.model(*obs)
_, actions = torch.topk(predictions,n)
return actions
def __run_actions(self, mol: Chem.RWMol, actions: 'list[int]'):
"""calculates new mols updated by actions
Args:
mol (Chem.RWMol): starting structure
actions (list[int]): actions to take
Returns:
list[Chem.RWMol]: newly generated molecules
"""
new_mols = []
for action in torch.squeeze(actions):
action_int = int(action)
mol_copy = copy.deepcopy(mol)
self.env.assignMol(mol_copy)
_,_,_,reward_dict = self.env.step(action_int)
if reward_dict['step_reward'] > 0:
new_mols.append(self.env.StateSpace)
return new_mols
def iterate(self, mol, n):
"""Expands the passed molecule by one step
Args:
mol (Chem.RWMol): base molecule to iterate on
n (int): How many different one step iterations to make
Returns:
list[Chem.RWMol]: The mutated molecules
"""
actions = self.__get_n_best(mol, n)
mols = self.__run_actions(mol,actions)
return mols
def treeSearch(self,initial_mol: Chem.RWMol, width: int, size: int):
"""search chemical space around the initial molecule
Args:
initial_mol (Chem.RWMol): starting
width (int): how many branches to make at each step
size (int): total size of the tree
Returns:
[type]: [description]
"""
molTree = MolTree(initial_mol,0)
queue = deque([molTree])
i = 1
while queue:
if size <= 0:
break
mol_node = queue.pop()
children = self.iterate(mol_node.root_mol, width)
j = mol_node.addChildren(children,i)
i = i+j
for child in mol_node.children:
print(Chem.MolToSmiles(child.root_mol))
queue.appendleft(child)
size -= 1
return molTree
def inference():
pass
def smi2svg(mol):
try:
Chem.rdmolops.Kekulize(mol)
except:
pass
drawer = rdMolDraw2D.MolDraw2DSVG(690, 400)
AllChem.Compute2DCoords(mol)
drawer.DrawMolecule(mol)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace("svg:", "")
return svg
(smi2svg(mol))
mol = Chem.MolFromSmiles("CC-N")
drawer = rdMolDraw2D.MolDraw2DSVG(690, 400)
drawer.DrawMolecule(mol)
drawer.FinishDrawing()
SVG(drawer.GetDrawingText())
def GraphFromMolTree(mol: MolTree):
"""Function for transforming a Molecule Tree to a nx Graph for use with cytoscape
Args:
mol (MolTree): Tree to be converted
Returns:
nx.graph.Graph: converted graph
"""
g = nx.graph.Graph()
queue = deque([mol])
while queue:
print('s')
mol_tree = queue.pop()
mol = mol_tree.root_mol
if g.number_of_nodes() == 0:
print('X')
g.add_node(mol_tree.idx,mol = Chem.MolToSmiles(mol))#, img=smi2svg(mol), hac=mol.GetNumAtoms())
for child in mol_tree.children:
child_mol = child.root_mol
g.add_node(child.idx, mol = Chem.MolToSmiles(child_mol))#, img = smi2svg(mol))
g.add_edge(mol_tree.idx, child.idx)
queue.appendleft(child)
return g
model = BaseLine(54,300,17)
handler = Handler('af',model,env)
mol = Chem.RWMol(Chem.MolFromSmiles('CC-N'))
tree = handler.treeSearch(mol,3,12)
graph = GraphFromMolTree(tree)
cy_g = cytoscape_data(graph)
stobj=[
{'style': [{'css': {
'shape' : 'circle',
'width':100,
'height':100,
# 'border-color': 'rgb(0,0,0)',
# 'border-opacity': .5,
# 'border-width': 0.0,
# 'color': '#4579e8',
'label': 'data(mol)',
'font-size' : 40,
'layout': {'name' : 'grid'}
# 'background-fit':'contain'
},
'selector': 'node'},
{'css': {
'width': 10.0,
"target-arrow-shape": "triangle",
"line-color": "#9dbaea",
"target-arrow-color": "#9dbaea",
"curve-style": "bezier"
},
'selector': 'edge'}
],
}]
cyobj=Cytoscape(data=cy_g, visual_style=stobj[0]['style'])
cyobj
Chem.MolFromSmiles('N1C2SC12')
cy_g
cyg = {'data' : [],
'directed': True,
'multigraph': False,
'elements': {
'nodes': [
{ 'data': { id: 0 } },
{ 'data': { id: 1 } },
{ 'data': { id: 2 } },
{ 'data': { id: 3 } },
{ 'data': { id: 4 } },
{ 'data': { id: 5 } },
{ 'data': { id: 6 } },
{ 'data': { id: 7 } },
{ 'data': { id: 8 } },
{ 'data': { id: 9 } },
{ 'data': { id: 10 } },
{ 'data': { id: 11 } },
{ 'data': { id: 12 } },
{ 'data': { id: 13 } },
{ 'data': { id: 14 } },
{ 'data': { id: 15 } },
{ 'data': { id: 16 } }
],
'edges': [
{ 'data': { 'source': 0, 'target': 1 } },
{ 'data': { 'source': 1, 'target': 2 } },
{ 'data': { 'source': 1, 'target': 3 } },
{ 'data': { 'source': 4, 'target': 5 } },
{ 'data': { 'source': 4, 'target': 6 } },
{ 'data': { 'source': 6, 'target': 7 } },
{ 'data': { 'source': 6, 'target': 8 } },
{ 'data': { 'source': 8, 'target': 9 } },
{ 'data': { 'source': 8, 'target': 10 } },
{ 'data': { 'source': 11, 'target': 12 } },
{ 'data': { 'source': 12, 'target': 13 } },
{ 'data': { 'source': 13, 'target': 14 } },
{ 'data': { 'source': 13, 'target': 15 } }
]
}}
cyobj=Cytoscape(data=cyg, visual_style=stobj[0]['style'])#, layout_name='circle')
cyobj
```
| github_jupyter |
[Reinforcement Learning TF-Agents](https://colab.research.google.com/drive/1FXh1BQgMI5xE1yIV1CQ25TyRVcxvqlbH?usp=sharing)
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
# nice plot figures
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
import matplotlib.animation as animation
# smooth animations
mpl.rc('animation', html='jshtml')
import PIL
import os
import gym
import tf_agents
from tf_agents.environments import suite_atari, suite_gym
from tf_agents.environments.atari_preprocessing import AtariPreprocessing
from tf_agents.environments.atari_wrappers import FrameStack4
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.networks.q_network import QNetwork
from tf_agents.agents.dqn.dqn_agent import DqnAgent
from tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer
from tf_agents.metrics import tf_metrics
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.utils.common import function
# functions to plot animations on a per frame basis
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
anim = animation.FuncAnimation(
fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
plt.close()
return anim
# save an agent's demo (after training)
saved_frames = []
def save_frames(trajectory):
global saved_frames
saved_frames.append(tf_env.pyenv.envs[0].render(mode="rgb_array"))
def play_game_demo(tf_env, the_agent, obs_list, n_steps):
watch_driver = DynamicStepDriver(
tf_env,
the_agent.policy,
observers=[save_frames] + obs_list,
num_steps=n_steps)
final_time_step, final_policy_state = watch_driver.run()
def save_animated_gif(frames): # saved_frames is passed in
image_path = os.path.join("images", "rl", "breakout.gif")
frame_images = [PIL.Image.fromarray(frame) for frame in frames[:150]]
frame_images[0].save(image_path, format='GIF',
append_images=frame_images[1:],
save_all=True,
duration=30,
loop=0)
# %%html
# <img src="images/rl/breakout.gif" /> runs the gif in a jupyter/colab environment
# 8
# install this dependency for LunarLander
# pip install gym[box2d]
test_env = gym.make("LunarLander-v2")
test_env # seems like there is a time limit
test_env.reset() # 8 values from each observation
```
From the source code, we can see that these each 8D observation (x, y, h, v, a, w, l, r) correspond to:
+ x,y: the coordinates of the spaceship. It starts at a random location near (0, 1.4) and must land near the target at (0, 0).
+ h,v: the horizontal and vertical speed of the spaceship. It starts with a small random speed.
+ a,w: the spaceship's angle and angular velocity.
+ l,r: whether the left or right leg touches the ground (1.0) or not (0.0).
```
print(test_env.observation_space) #
print(test_env.action_space, test_env.action_space.n) # 4 possible values
```
Looking at the https://gym.openai.com/envs/LunarLander-v2/, these actions are:
+ do nothing
+ fire left orientation engine
+ fire main engine
+ fire right orientation engine
```
# PG REINFORCE algorithm
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
n_inputs = test_env.observation_space.shape[0]
n_outputs = test_env.action_space.n
model = keras.models.Sequential([
keras.layers.Dense(32, activation="relu", input_shape=[n_inputs]),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(n_outputs, activation="softmax")
])
# play multiple episodes, exploring the environment randomly and recording
# gradients and rewards
def play_one_step(env, obs, model, loss_fn):
with tf.GradientTape() as tape:
probas = model(obs[np.newaxis])
logits = tf.math.log(probas + keras.backend.epsilon())
action = tf.random.categorical(logits, num_samples=1)
loss = tf.reduce_mean(loss_fn(action, probas))
grads = tape.gradient(loss, model.trainable_variables)
return obs, reward, done, grads
def play_multiple_episodes(env, n_episodes, n_max_steps, model, loss_fn):
all_grads, all_rewards = [], []
for episode in range(n_episodes):
current_grads, current_rewards = [], []
obs = env.reset()
for step in range(n_max_steps):
obs, reward, done, grads = play_one_step(env, obs, model, loss_fn)
current_rewards.append(reward)
current_grads.append(grads)
if done:
break
all_grads.append(current_grads)
all_rewards.append(current_rewards)
return all_rewards, all_grads
# compute sum of future discounted rewards and standardize to differentiate
# good and bad decisions
def discount_rewards(discounted, discount_rate):
discounted = np.array(discounted)
for step in range(len(discounted) - 2, -1, -1):
discounted[step] += discounted[step + 1] * discount_rate
return discount
def discount_and_normalize_rewards(all_rewards, discount_rate):
discounted_rewards = [discount_rewards(reward, discount_rate) for reward in all_rewards]
flattened_rewards = np.concatenate(discounted_rewards)
rewards_mean = flattened_rewards.mean()
rewards_stddev = flattened_rewards.std()
return [(reward - rewards_mean) / rewards_stddev for reward in discounted_rewards]
n_iterations = 200
n_episodes_per_update = 16
n_max_steps = 1000
discount_rate = 0.99
env = gym.make("LunarLander-v2")
optimizer = keras.optimizers.Nadam(lr=0.005)
loss_fn = keras.losses.sparse_categorical_crossentropy
# the model outputs probabilities for each class so we use categorical_crossentropy
# and the action is just 1 value (not a 1 hot vector so we use sparse_categorical_crossentropy)
env.seed(42)
# this will take very long, so I'm not calling it for the sake of my computer's mental health
def train(n_iterations, env, n_episodes_per_update, n_max_steps, model, loss_fn, discount_rate):
for iteration in range(n_iterations):
all_rewards, all_grads = play_multiple_episodes(env, n_episodes_per_update, n_max_steps, model, loss_fn)
# for plotting the learning curve with undiscounted rewards
# alternatively, just use a reduce_sum from tf and extract the numpy scalar value using .numpy()
mean_reward = sum(map(sum, all_rewards)) / n_episodes_per_update
print("\rIteration: {}/{}, mean reward: {:.1f} ".format( # \r means that it will not return a new line, it will just replace the current line
iteration + 1, n_iterations, mean_reward), end="")
mean_rewards.append(mean_reward)
all_discounted_rewards = discount_and_normalize_rewards(all_rewards, discount_rate)
all_mean_grads = []
for var_index in range(len(model.trainable_variables)):
mean_grads = tf.reduce_mean(
[final_reward * all_grads[episode_index][step][var_index]
for episode_index, final_rewards in enumerate(all_discounted_rewards)
for step, final_reward in enumerate(final_rewards)], axis=0)
all_mean_grads.append(mean_grads)
optimizer.apply_gradients(zip(all_mean_grads, model.trainable_variables))
# 9 TF-Agents SpaceInvaders-v4
environment_name = "SpaceInvaders-v4"
env = suite_atari.load(
environment_name,
max_episode_steps=27000,
gym_env_wrappers=[AtariPreprocessing, FrameStack4]
)
env
```
+ environment ✓
+ driver ✓
+ observer(s) ✓
+ replay buffer ✓
+ dataset ✓
+ agent with collect policy ✓
+ DQN ✓
+ training loop ✓
```
# environment officially built
tf_env = TFPyEnvironment(env)
dropout_params = [0.4]
fc_params = [512]
conv_params = [(32, (8, 8), 5),
(64, (4, 4), 4),
(64, (3, 3), 1),]
preprocessing_layer = keras.layers.Lambda(lambda obs: tf.cast(obs, np.float32) / 255.) # uint8 beforehand
dqn = QNetwork(
tf_env.observation_spec(),
tf_env.action_spec(),
preprocessing_layers=preprocessing_layer,
conv_layer_params=conv_params,
fc_layer_params=fc_params,
dropout_layer_params=dropout_params,
activation_fn=keras.activations.relu,
)
# dqn agent with collect policy officially built
update_period = 4
train_step = tf.Variable(0)
epsilon_greedy_policy = keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=1.0,
decay_steps=250000 // update_period,
end_learning_rate=0.01,
)
dqn_agent = DqnAgent(
tf_env.time_step_spec(),
tf_env.action_spec(),
q_network=dqn,
optimizer=keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, centered=False),
train_step_counter=train_step,
gamma=0.99,
td_errors_loss_fn=keras.losses.Huber(reduction="none"),
target_update_period=2000,
epsilon_greedy=lambda: epsilon_greedy_policy(train_step)
)
dqn_agent.initialize()
# uniform replay buffer officially built
replay_buffer = TFUniformReplayBuffer(
dqn_agent.collect_data_spec,
batch_size = tf_env.batch_size,
max_length=100000,
)
replay_buffer_observer = replay_buffer.add_batch
# observers + metrics officially built
training_metrics = [
tf_metrics.AverageEpisodeLengthMetric(),
tf_metrics.AverageReturnMetric(),
tf_metrics.NumberOfEpisodes(),
tf_metrics.EnvironmentSteps(),
]
class ShowProgress:
def __init__(self, total):
self.counter = 0
self.total = total
def __call__(self, trajectory):
if not trajectory.is_boundary():
self.counter += 1
if self.counter % 100 == 0:
print("\r{}/{}".format(self.counter, self.total), end="")
# driver officially created
driver = DynamicStepDriver(
tf_env,
dqn_agent.collect_policy,
observers = training_metrics + [ShowProgress(2000)],
num_steps=update_period
)
random_policy = RandomTFPolicy(
tf_env.time_step_spec(),
tf_env.action_spec()
)
initial_driver = DynamicStepDriver(
tf_env,
random_policy,
observers = [replay_buffer.add_batch] + [ShowProgress(2000)],
num_steps=update_period
)
final_time_step, final_policy_state = initial_driver.run()
# dataset officially built
dataset = replay_buffer.as_dataset(
sample_batch_size=64,
num_steps=2,
num_parallel_calls=3,
).prefetch(3)
driver.run = function(driver.run)
dqn_agent.train = function(dqn_agent.train)
# I would train it, but my computer suffers from dementia
# training loop officially built
def training(n_iterations, agent, driver, tf_env, dataset):
time_step = None
initial_policy_state = agent.collect_policy.get_initial_state(tf_env.batch_size)
iterator = iter(dataset) # forgot to do this!
for iteration in range(n_iterations):
time_step, policy_state = driver.run(time_step, policy_state)
trajectories, buffer_info = next(iterator)
train_loss = agent.train(trajectories)
```
| github_jupyter |
# A quick introduction to Blackjax
BlackJAX is an MCMC sampling library based on [JAX](https://github.com/google/jax). BlackJAX provides well-tested and ready to use sampling algorithms. It is also explicitly designed to be modular: it is easy for advanced users to mix-and-match different metrics, integrators, trajectory integrations, etc.
In this notebook we provide a simple example based on basic Hamiltonian Monte Carlo and the NUTS algorithm to showcase the architecture and interfaces in the library
```
import jax
import jax.numpy as jnp
import jax.scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
import blackjax
%load_ext watermark
%watermark -d -m -v -p jax,jaxlib,blackjax
jax.devices()
```
## The problem
We'll generate observations from a normal distribution of known `loc` and `scale` to see if we can recover the parameters in sampling. Let's take a decent-size dataset with 1,000 points:
```
loc, scale = 10, 20
observed = np.random.normal(loc, scale, size=1_000)
def logprob_fn(loc, scale, observed=observed):
"""Univariate Normal"""
logpdf = stats.norm.logpdf(observed, loc, scale)
return jnp.sum(logpdf)
logprob = lambda x: logprob_fn(**x)
```
## HMC
### Sampler parameters
```
inv_mass_matrix = np.array([0.5, 0.5])
num_integration_steps = 60
step_size = 1e-3
hmc = blackjax.hmc(logprob, step_size, inv_mass_matrix, num_integration_steps)
```
### Set the initial state
The initial state of the HMC algorithm requires not only an initial position, but also the potential energy and gradient of the potential energy at this position. BlackJAX provides a `new_state` function to initialize the state from an initial position.
```
initial_position = {"loc": 1.0, "scale": 2.0}
initial_state = hmc.init(initial_position)
initial_state
```
### Build the kernel and inference loop
The HMC kernel is easy to obtain:
```
%%time
hmc_kernel = jax.jit(hmc.step)
```
BlackJAX does not provide a default inference loop, but it easy to implement with JAX's `lax.scan`:
```
def inference_loop(rng_key, kernel, initial_state, num_samples):
@jax.jit
def one_step(state, rng_key):
state, _ = kernel(rng_key, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
```
### Inference
```
%%time
rng_key = jax.random.PRNGKey(0)
states = inference_loop(rng_key, hmc_kernel, initial_state, 10_000)
loc_samples = states.position["loc"].block_until_ready()
scale_samples = states.position["scale"]
fig, (ax, ax1) = plt.subplots(ncols=2, figsize=(15, 6))
ax.plot(loc_samples)
ax.set_xlabel("Samples")
ax.set_ylabel("loc")
ax1.plot(scale_samples)
ax1.set_xlabel("Samples")
ax.set_ylabel("scale")
```
## NUTS
NUTS is a *dynamic* algorithm: the number of integration steps is determined at runtime. We still need to specify a step size and a mass matrix:
```
inv_mass_matrix = np.array([0.5, 0.5])
step_size = 1e-3
nuts = blackjax.nuts(logprob, step_size, inv_mass_matrix)
initial_position = {"loc": 1.0, "scale": 2.0}
initial_state = nuts.init(initial_position)
initial_state
%%time
rng_key = jax.random.PRNGKey(0)
states = inference_loop(rng_key, nuts.step, initial_state, 4_000)
loc_samples = states.position["loc"].block_until_ready()
scale_samples = states.position["scale"]
fig, (ax, ax1) = plt.subplots(ncols=2, figsize=(15, 6))
ax.plot(loc_samples)
ax.set_xlabel("Samples")
ax.set_ylabel("loc")
ax1.plot(scale_samples)
ax1.set_xlabel("Samples")
ax1.set_ylabel("scale")
```
### Use Stan's window adaptation
Specifying the step size and inverse mass matrix is cumbersome. We can use Stan's window adaptation to get reasonable values for them so we have, in practice, no parameter to specify.
The adaptation algorithm takes a function that returns a transition kernel given a step size and an inverse mass matrix:
```
%%time
warmup = blackjax.window_adaptation(
blackjax.nuts,
logprob,
1000,
)
state, kernel, _ = warmup.run(
rng_key,
initial_position,
)
```
We can use the obtained parameters to define a new kernel. Note that we do not have to use the same kernel that was used for the adaptation:
```
%%time
states = inference_loop(rng_key, nuts.step, initial_state, 1_000)
loc_samples = states.position["loc"].block_until_ready()
scale_samples = states.position["scale"]
fig, (ax, ax1) = plt.subplots(ncols=2, figsize=(15, 6))
ax.plot(loc_samples)
ax.set_xlabel("Samples")
ax.set_ylabel("loc")
ax1.plot(scale_samples)
ax1.set_xlabel("Samples")
ax1.set_ylabel("scale")
```
## Sample multiple chains
We can easily sample multiple chains using JAX's `vmap` construct. See the [documentation](https://jax.readthedocs.io/en/latest/jax.html?highlight=vmap#jax.vmap) to understand how the mapping works.
```
num_chains = 4
initial_positions = {"loc": np.ones(num_chains), "scale": 2.0 * np.ones(num_chains)}
initial_states = jax.vmap(nuts.init, in_axes=(0))(initial_positions)
def inference_loop_multiple_chains(
rng_key, kernel, initial_state, num_samples, num_chains
):
def one_step(states, rng_key):
keys = jax.random.split(rng_key, num_chains)
states, _ = jax.vmap(kernel)(keys, states)
return states, states
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
%%time
states = inference_loop_multiple_chains(
rng_key, nuts.step, initial_states, 2_000, num_chains
)
states.position["loc"].block_until_ready()
```
This scales very well to hundreds of chains on CPU, tens of thousand on GPU:
```
%%time
num_chains = 40
initial_positions = {"loc": np.ones(num_chains), "scale": 2.0 * np.ones(num_chains)}
initial_states = jax.vmap(nuts.init, in_axes=(0,))(initial_positions)
states = inference_loop_multiple_chains(
rng_key, nuts.step, initial_states, 1_000, num_chains
)
states.position["loc"].block_until_ready()
```
In this example the result is a dictionnary and each entry has shape `(num_samples, num_chains)`. Here's how to access the samples of the second chains for `loc`:
| github_jupyter |
# Curso de introducción a Python: procesamiento y análisis de datos
La mejor forma de aprender a programar es haciendo algo útil, por lo que esta introducción a Python se centrará alrededor de una tarea común: el _análisis de datos_. En este taller práctico se hará un breve repaso a los conceptos básicos de programación con el fin de automatizar procesos cubriendo la sintaxis de Python (junto a NumPy y matplotlib). Para ello, seguiremos los materiales de [Software-Carpentry](https://software-carpentry.org/) ([ver apuntes](http://swcarpentry.github.io/python-novice-inflammation/)).
__Nuestra herramienta fundamental de trabajo es el Notebook de Jupyter__, podrás conocer más acerca de él en las siguientes clases. Durante el curso te familiarizarás con él y aprenderás a manejarlo (este documento ha sido generado a partir de un notebook).
En esta sesión inicial, veremos los pasos a seguir para que __instales Python y puedas empezar a aprender a tu ritmo.__
## Pasos a seguir:
### 1. Descarga de Python.
La instalación de Python, el Notebook y todos los paquetes que utilizaremos, por separado puede ser una tarea ardua y agotadora, pero no te preocupes: ¡alguien ha hecho ya el trabajo duro!
__[Anaconda](https://continuum.io/anaconda/) es una distribución de Python que recopila muchas de las bibliotecas necesarias en el ámbito de la computación científica__ y desde luego, todas las que necesitaremos en este curso. Además __incluye herramientas para programar en Python, como [Jupyter Notebook](http://jupyter.org/) o [Spyder](https://github.com/spyder-ide/spyder#spyder---the-scientific-python-development-environment)__ (un IDE al estilo de MATLAB).
Lo único que necesitas hacer es:
* Ir a la [página de descargas de Anaconda](http://continuum.io/downloads).
* Seleccionar tu sistema operativo (Windows, OSX, Linux).
* Descargar Anaconda (utilizaremos Python 3.X).
<img src="../images/download_anaconda.png" alt="Download" />
### 2. Instalación de Python.
Consulta las __[instrucciones de instalación](http://docs.continuum.io/anaconda/install.html)__ de Anaconda para tu sistema operativo. En el caso de Windows y OS X, te encontrarás con los típicos instaladores gráficos a los que ya estás acostumbrado. Si te encuentras en Linux, deberás ejectuar el script de instalación desde la consola de comandos, así que recuerda comprobar que tienes bash instalado y asignar permisos de ejecución al script.
__Importante: asegurate de instalar Anaconda sólo para tu usuario y sin permisos de administrador, no son necesarios y te pueden dar problemas más tarde si no tienes derechos de acceso siempre.__
¡Muy bien! Ya tienes instalado ¿pero dónde?
* En __Windows__, desde `Inicio > Anaconda` verás una serie de herramientas de las que ahora dispones ¡no tengas miedo de abrirlas!
* En __OS X__, podrás acceder a un launcher con las mismas herramientas desde la carpeta `anaconda` dentro de tu carpeta personal.
* En __Linux__, debido al gran número de combinaciones de distribuciones más escritorios no tendrás esos accesos directos gráficos (lo que no quiere decir que no puedas crearlos tú a posteriori) pero, como comprobarás, no hacen ninguna falta y no forman parte de nuestra forma de trabajar en el curso.
Ahora, vamos a __actualizar Anaconda__ para asegurarnos de que tenemos nuestra distribución de Python con todos sus paquetes al día para lo que abrimos una __ventana de comandos__ (símbolo de sistema en Windows o terminal en OS X) y ejecutamos los siguientes comandos de actualización (confirmando en el caso de tener que instalar paquetes nuevos):
```
conda update conda
conda update --all
```
Si experimentas cualquier clase de problema durante este proceso, [desinstala tu distribución de Anaconda](http://docs.continuum.io/anaconda/install.html) y vuelve a instalarla donde puedas asegurarte de tener una conexión a internet estable.
Por último, comprueba que Jupyter Notebook funciona correctamente. Escribe esto en una ventana de comandos y espera a que se abra el navegador.
```
jupyter notebook
```
Deberías ver [esta interfaz](https://try.jupyter.org/) (aunque sin archivos).
Ya tenemos nuestra distribución de Python con todos los paquetes que necesitemos (y prácticamente todos los que en un futuro podamos necesitar).
En caso de que tengas cualquier caso de duda durante el proceso, pregúntanos y recuerda que __¡los buscadores de internet son tus mejores amigos!__
_¡A trabajar!_
---
Clase en vídeo, parte del [Curso de Python para científicos e ingenieros](http://cacheme.org/curso-online-python-cientifico-ingenieros/) grabado en la Escuela Politécnica Superior de la Universidad de Alicante.
```
from IPython.display import YouTubeVideo
YouTubeVideo("x4xegDME5C0", width=560, height=315, list="PLGBbVX_WvN7as_DnOGcpkSsUyXB1G_wqb")
```
---
###### Este material es un resumen actualizado del magnífico [Curso de AeroPython](https://github.com/AeroPython/Curso_AeroPython) realizado por: Juan Luis Cano, Mabel Delgado y Álex Sáez
<br/>
##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">(*) El Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>.
| github_jupyter |
<a href="https://colab.research.google.com/github/roupenminassian/Freelance/blob/main/NLP%20(Logistic_Regression)%20for%20Twitter%20Event%20Prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import nltk
from nltk.corpus import twitter_samples
import matplotlib.pyplot as plt
import random
nltk.download('stopwords')
import re
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
from google.colab import drive
drive.mount('/content/drive')
df = pd.read_csv('/content/drive/MyDrive/nlp-getting-started/train.csv')
df2 = pd.read_csv('/content/drive/MyDrive/nlp-getting-started/test.csv')
df2.head()
tweet_test = df2['text']
tweet = df['text']
label = df['target']
tweet2 = []
for i in tweet:
i = re.sub('#', '', i)
i = re.sub(r'https?:\/\/.*[\r\n]*', '', i)
tweet2.append(i)
label_final = []
for i in label:
label_final.append(i)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(tweet2)
stopwords_english = stopwords.words('english')
tweets_clean = []
for word in tweet2:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
tweets_clean.append(word)
stemmer = PorterStemmer()
# Create an empty list to store the stems
tweets_final = []
for word in tweets_clean:
stem_word = stemmer.stem(word) # stemming word
tweets_final.append(stem_word)
def process_tweet(tweet):
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
tweet = re.sub(r'\$\w*', '', tweet)
tweet = re.sub(r'^RT[\s]+', '', tweet)
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
tweet = re.sub(r'#', '', tweet)
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and
word not in string.punctuation):
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
def build_freqs(tweets, ys):
yslist = np.squeeze(ys).tolist()
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
freqs = build_freqs(tweets_final, label_final)
keys = ['earthquak', 'forest', 'flood', 'evacu', 'disast', 'accident', 'wildfir', '...', 'peopl']
data = []
for word in keys:
pos = 0
neg = 0
if (word, 1) in freqs:
pos = freqs[(word, 1)]
if (word, 0) in freqs:
neg = freqs[(word, 0)]
data.append([word, pos, neg])
data
fig, ax = plt.subplots(figsize = (8, 8))
x = np.log([x[1] + 1 for x in data])
y = np.log([x[2] + 1 for x in data])
ax.scatter(x, y)
plt.xlabel("Log Positive count")
plt.ylabel("Log Negative count")
for i in range(0, len(data)):
ax.annotate(data[i][0], (x[i], y[i]), fontsize=12)
ax.plot([0, 9], [0, 9], color = 'red')
plt.show()
print(freqs)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
train_x = vectorizer.fit_transform(tweets_final).toarray()
test_x = vectorizer.transform(tweet_test).toarray()
lr = 0.004
n_epochs = 12000
weights_vector = np.random.random(train_x.shape[1])
def cost_function(pred,truth):
return -truth*np.log(pred) - (1-truth)*np.log(1-pred)
def sigmoid(x):
return 1.0/(1+np.exp(-x))
def lin_mul(x,weights):
return np.dot(x,weights)
for ep in range(n_epochs):
avg_cost = 0
for i in range(len(train_x)):
data_point = train_x[i]
label = label_final[i]
pred_prob = sigmoid(lin_mul(weights_vector,data_point))
avg_cost += cost_function(pred_prob,label)
weights_vector = weights_vector - lr*(pred_prob - label)* data_point
if ep%100==0:
print ("Epoch {} has finished. Error is {}".format(ep+1,avg_cost/4.0))
preds = np.where(sigmoid(lin_mul(test_x,weights_vector))>.75,1,0)
print(preds)
id = pd.DataFrame(df2['id'])
preds = pd.DataFrame(preds, columns=['target'])
final = pd.concat([id, preds], axis=1)
final.to_csv('final_submission.csv',index=False)
```
| github_jupyter |
# NLP model creation and training
```
from fastai.gen_doc.nbdoc import *
from fastai.text import *
```
The main thing here is [`RNNLearner`](/text.learner.html#RNNLearner). There are also some utility functions to help create and update text models.
## Quickly get a learner
```
show_doc(language_model_learner)
```
`bptt` (for backprop trough time) is the number of words we will store the gradient for, and use for the optimization step.
The model used is an [AWD-LSTM](https://arxiv.org/abs/1708.02182) that is built with embeddings of size `emb_sz`, a hidden size of `nh`, and `nl` layers (the `vocab_size` is inferred from the [`data`](/text.data.html#text.data)). All the dropouts are put to values that we found worked pretty well and you can control their strength by adjusting `drop_mult`. If <code>qrnn</code> is True, the model uses [QRNN cells](https://arxiv.org/abs/1611.01576) instead of LSTMs. The flag `tied_weights` control if we should use the same weights for the encoder and the decoder, the flag `bias` controls if the last linear layer (the decoder) has bias or not.
You can specify `pretrained_model` if you want to use the weights of a pretrained model. If you have your own set of weights and the corrsesponding dictionary, you can pass them in `pretrained_fnames`. This should be a list of the name of the weight file and the name of the corresponding dictionary. The dictionary is needed because the function will internally convert the embeddings of the pretrained models to match the dictionary of the [`data`](/text.data.html#text.data) passed (a word may have a different id for the pretrained model). Those two files should be in the models directory of `data.path`.
```
path = untar_data(URLs.IMDB_SAMPLE)
data = TextLMDataBunch.from_csv(path, 'texts.csv')
learn = language_model_learner(data, pretrained_model=URLs.WT103, drop_mult=0.5)
show_doc(text_classifier_learner)
```
`bptt` (for backprop trough time) is the number of words we will store the gradient for, and use for the optimization step.
The model used is the encoder of an [AWD-LSTM](https://arxiv.org/abs/1708.02182) that is built with embeddings of size `emb_sz`, a hidden size of `nh`, and `nl` layers (the `vocab_size` is inferred from the [`data`](/text.data.html#text.data)). All the dropouts are put to values that we found worked pretty well and you can control their strength by adjusting `drop_mult`. If <code>qrnn</code> is True, the model uses [QRNN cells](https://arxiv.org/abs/1611.01576) instead of LSTMs.
The input texts are fed into that model by bunch of `bptt` and only the last `max_len` activations are considerated. This gives us the backbone of our model. The head then consists of:
- a layer that concatenates the final outputs of the RNN with the maximum and average of all the intermediate outputs (on the sequence length dimension),
- blocks of ([`nn.BatchNorm1d`](https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm1d), [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout), [`nn.Linear`](https://pytorch.org/docs/stable/nn.html#torch.nn.Linear), [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU)) layers.
The blocks are defined by the `lin_ftrs` and `drops` arguments. Specifically, the first block will have a number of inputs inferred from the backbone arch and the last one will have a number of outputs equal to data.c (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_ftrs` (of course a block has a number of inputs equal to the number of outputs of the previous block). The dropouts all have a the same value ps if you pass a float, or the corresponding values if you pass a list. Default is to have an intermediate hidden size of 50 (which makes two blocks model_activation -> 50 -> n_classes) with a dropout of 0.1.
```
jekyll_note("Using QRNN require to have cuda installed (same version as pytorhc is using).")
path = untar_data(URLs.IMDB_SAMPLE)
data = TextClasDataBunch.from_csv(path, 'texts.csv')
learn = text_classifier_learner(data, drop_mult=0.5)
show_doc(RNNLearner)
```
Handles the whole creation from <code>data</code> and a `model` with a text data using a certain `bptt`. The `split_func` is used to properly split the model in different groups for gradual unfreezing and differential learning rates. Gradient clipping of `clip` is optionally applied. `adjust`, `alpha` and `beta` are all passed to create an instance of [`RNNTrainer`](/callbacks.rnn.html#RNNTrainer). Can be used for a language model or an RNN classifier. It also handles the conversion of weights from a pretrained model as well as saving or loading the encoder.
```
show_doc(RNNLearner.get_preds)
```
If `ordered=True`, returns the predictions in the order of the dataset, otherwise they will be ordered by the sampler (from the longest text to the shortest). The other arguments are passed [`Learner.get_preds`](/basic_train.html#Learner.get_preds).
### Loading and saving
```
show_doc(RNNLearner.load_encoder)
show_doc(RNNLearner.save_encoder)
show_doc(RNNLearner.load_pretrained)
```
Opens the weights in the `wgts_fname` of `self.model_dir` and the dictionary in `itos_fname` then adapts the pretrained weights to the vocabulary of the <code>data</code>. The two files should be in the models directory of the `learner.path`.
## Utility functions
```
show_doc(lm_split)
show_doc(rnn_classifier_split)
show_doc(convert_weights)
```
Uses the dictionary `stoi_wgts` (mapping of word to id) of the weights to map them to a new dictionary `itos_new` (mapping id to word).
## Get predictions
```
show_doc(LanguageLearner, title_level=3)
show_doc(LanguageLearner.predict)
```
If `no_unk=True` the unknown token is never picked. Words are taken randomly with the distribution of probabilities returned by the model. If `min_p` is not `None`, that value is the minimum probability to be considered in the pool of words. Lowering `temperature` will make the texts less randomized.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(RNNLearner.get_preds)
show_doc(LanguageLearner.show_results)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
## KMEANS CLUSTERING
Project follows the CRISP-DM Process while analyzing their data.
PROBLEM :
PREDICT THE CLUSTER OF CUSTOMERS BASED ON ANNUAL INCOME AND SPENDING TO BRING VALUABLE INSIGHTS FOR THE MALL.
## Questions :
## 1.Which cluster has both spending good score and income?
## 2.On which cluster should company concentrate to increase sales?
## 3.which cluster has maximum probability to get into high spending score?
# IMPORTING THE DATASET AND LIBRARIES
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
# Importing the dataset
dataset = pd.read_csv(r'C:\Users\neeraj\OneDrive\Desktop\data challenge\Mall_Customers.csv')
X=dataset.iloc[:,:].values
```
## Explore the Dataset
```
dataset.head()
dataset.info()
dataset.isnull().sum()
```
## Check for categories in object variable(categorical variable)
```
dataset['Genre'].value_counts()
```
## Replace categories by one hot encoding
Here this method works fine as there are only 2 categories in object variable
```
labelencoder_X=LabelEncoder()
X[:,1]= labelencoder_X.fit_transform(X[:,1])
Data=pd.DataFrame(X)
```
## Now check for categorical values if any
```
Data.head()
```
## loading data (test and train)
```
x= dataset.iloc[:, [3,4]].values
Final=pd.DataFrame(x)
Final.head()
```
## USING ELBOW METHOD FOR OPTIMAL CLUSTERS
Here I have used a function which taken in the 'i' value and returns the graph between 'i' and WCSS(Sum of squares of distances within clusters)
```
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
```
## Training the model
```
kmeans = KMeans(n_clusters = 5, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(x)
print(y_kmeans)
```
## LETS VISUALISE OUR RESULT
```
# Visualising the clusters
plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
plt.scatter(x[y_kmeans == 3, 0], x[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
plt.scatter(x[y_kmeans == 4, 0], x[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()
```
## Our insights are :
## 1. Cluster 1 has an average income of 60k and has spending score of 50 on an average .
## 2. Cluster 2 has an average of 90k and has spending score of 18 on an average .
## 3.Cluster 3 has an average income of 30k and has spending score of 20 on an average .
## 4.Cluster 4 has an average income of 30k and has spending score of 80 on an average.
## 5.Cluster 5 has an average income of 85k and has spending score of 80 on an average.
## Deeper intution(Answering our questions)
## Customers Belonging to cluster 4 and 5 are having good spending score so are valuable for our mall .(Can give special cards , discounts etc)
## Customers Belonging to cluster 2 should have a high spending score henceforth the company should concentrate on these type of customers for increasing profits.
## Customers Belonging to cluster 1 should be given discounts to increase the spending score
| github_jupyter |
# HyperParameter Tuning
### `keras.wrappers.scikit_learn`
Example adapted from: [https://github.com/fchollet/keras/blob/master/examples/mnist_sklearn_wrapper.py]()
## Problem:
Builds simple CNN models on MNIST and uses sklearn's GridSearchCV to find best model
```
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.wrappers.scikit_learn import KerasClassifier
from keras import backend as K
from sklearn.model_selection import GridSearchCV
```
# Data Preparation
```
nb_classes = 10
# input image dimensions
img_rows, img_cols = 28, 28
# load training data and do basic data normalization
(X_train, y_train), (X_test, y_test) = mnist.load_data()
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
```
## Build Model
```
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
'''Creates model comprised of 2 convolutional layers followed by dense layers
dense_layer_sizes: List of layer sizes. This list has one number for each layer
nb_filters: Number of convolutional filters in each convolutional layer
nb_conv: Convolutional kernel size
nb_pool: Size of pooling area for max pooling
'''
model = Sequential()
model.add(Conv2D(filters, (kernel_size, kernel_size),
padding='valid', input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(filters, (kernel_size, kernel_size)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
dense_size_candidates = [[32], [64], [32, 32], [64, 64]]
my_classifier = KerasClassifier(make_model, batch_size=32)
```
## GridSearch HyperParameters
```
validator = GridSearchCV(my_classifier,
param_grid={'dense_layer_sizes': dense_size_candidates,
# nb_epoch is avail for tuning even when not
# an argument to model building function
'epochs': [3, 6],
'filters': [8],
'kernel_size': [3],
'pool_size': [2]},
scoring='neg_log_loss',
n_jobs=1)
validator.fit(X_train, y_train)
print('The parameters of the best model are: ')
print(validator.best_params_)
# validator.best_estimator_ returns sklearn-wrapped version of best model.
# validator.best_estimator_.model returns the (unwrapped) keras model
best_model = validator.best_estimator_.model
metric_names = best_model.metrics_names
metric_values = best_model.evaluate(X_test, y_test)
for metric, value in zip(metric_names, metric_values):
print(metric, ': ', value)
```
---
# There's more:
The `GridSearchCV` model in scikit-learn performs a complete search, considering **all** the possible combinations of Hyper-parameters we want to optimise.
If we want to apply for an optmised and bounded search in the hyper-parameter space, I strongly suggest to take a look at:
* `Keras + hyperopt == hyperas`: [http://maxpumperla.github.io/hyperas/](http://maxpumperla.github.io/hyperas/)
| github_jupyter |
### Keras implementation of Brain CNN
```
import tensorflow as tf
import numpy as np
import sklearn.metrics
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint
from keras.callbacks import Callback
from keras.callbacks import EarlyStopping
IMG_WIDTH = 64
IMG_HEIGHT = 64
SNAPS = 8
SLICE = 5
CLASSES = 2
CHANNELS = 1
conv1_filter = 4
conv2_filter = 8
conv3_filter = 16
conv4_filter = 32
conv5_filter = 4
conv6_filter = 4
experiment = '1snap3d'
path = '/work/aaung/datasets/' + experiment + '/'
_04847_img = np.load(path + '4847_' + experiment + '-image.npy')
_04799_img = np.load(path + '4799_' + experiment + '-image.npy')
_04820_img = np.load(path + '4820_' + experiment + '-image.npy')
_05675_img = np.load(path + '5675_' + experiment + '-image.npy')
_05680_img = np.load(path + '5680_' + experiment + '-image.npy')
_05710_img = np.load(path + '5710_' + experiment + '-image.npy')
_04847_lbl = np.load(path + '4847_' + experiment + '-label-onehot.npy')
_04799_lbl = np.load(path + '4799_' + experiment + '-label-onehot.npy')
_04820_lbl = np.load(path + '4820_' + experiment + '-label-onehot.npy')
_05675_lbl = np.load(path + '5675_' + experiment + '-label-onehot.npy')
_05680_lbl = np.load(path + '5680_' + experiment + '-label-onehot.npy')
_05710_lbl = np.load(path + '5710_' + experiment + '-label-onehot.npy')
```
### Leave on example out validation
```
# n = 24
train_img = np.vstack((_04847_img[n:,], _04799_img[n:,], _04820_img[n:,], _05675_img[n:,], _05680_img[n:,], _05710_img[n:,]))
train_lbl = np.vstack((_04847_lbl[n:,], _04799_lbl[n:,], _04820_lbl[n:,], _05675_lbl[n:,], _05680_lbl[n:,], _05710_lbl[n:,]))
val_img = np.vstack((_04847_img[:n,], _04799_img[:n,], _04820_img[:n,], _05675_img[:n,], _05680_img[:n,], _05710_img[:n,]))
val_lbl = np.vstack((_04847_lbl[:n,], _04799_lbl[:n,], _04820_lbl[:n,], _05675_lbl[:n,], _05680_lbl[:n,], _05710_lbl[:n,]))
# Cross Subject
# train_img = np.vstack((_05710_img, _04847_img, _04799_img, _05675_img, _05680_img))
# train_lbl = np.vstack((_05710_lbl, _04847_lbl, _04799_lbl, _05675_lbl, _05680_lbl))
# val_img = _04820_img
# val_lbl = _04820_lbl
STRIP_HEIGHT = train_img.shape[2]
STRIP_WIDTH = train_img.shape[3]
print train_img.shape
print val_img.shape
print train_lbl.shape
print val_lbl.shape
np.random.seed(0)
# shuffle = np.random.permutation(database.shape[0])
# test = database[shuffle[0:100],:]
# val = database[shuffle[100:200],:]
# train = database[shuffle[200:],:]
xtrain = train_img[:,SLICE,:,:]
xtrain = np.reshape(xtrain, (xtrain.shape[0], xtrain.shape[1], xtrain.shape[2], 1))
ytrain = train_lbl
xval = val_img[:,SLICE,:,:]
xval = np.reshape(xval, (xval.shape[0], xval.shape[1], xval.shape[2], 1))
yval = val_lbl
print xtrain.shape
print ytrain.shape
print xval.shape
print yval.shape
%matplotlib inline
import matplotlib.pyplot as plt
for i in range(5):
plt.show(plt.imshow(xtrain[i,:,:,0]))
class EvaluateValidation(Callback):
def __init__(self, test_data):
self.test_data = test_data
def on_epoch_end(self, epoch, logs={}):
x, y = self.test_data
loss, acc = self.model.evaluate(x, y, verbose=0)
print('\nValidation loss: {}, acc: {}\n'.format(loss, acc))
### Model ###
model = Sequential()
mde = 0
k_init = 'he_normal'
ridge = 0.0005
model.add(Convolution2D(conv1_filter, kernel_size=(3, 3), strides=(1, 1),
padding='same', data_format="channels_last", activation=None, use_bias=True,
kernel_regularizer=l2(ridge),
kernel_initializer=k_init, bias_initializer='zeros', input_shape=(STRIP_HEIGHT, STRIP_WIDTH, CHANNELS)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(conv2_filter, kernel_size=(5, 5), strides=(1, 1),
padding='same', data_format="channels_last", activation=None, use_bias=True,
kernel_regularizer=l2(ridge),
kernel_initializer=k_init, bias_initializer='zeros'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2), padding='valid'))
model.add(Convolution2D(conv3_filter, kernel_size=(7, 7), strides=(1, 1),
padding='same', data_format="channels_last", activation=None, use_bias=True,
kernel_regularizer=l2(ridge),
kernel_initializer=k_init, bias_initializer='zeros'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(conv4_filter, kernel_size=(9, 9), strides=(1, 1),
padding='same', data_format="channels_last", activation=None, use_bias=True,
kernel_regularizer=l2(ridge),
kernel_initializer=k_init, bias_initializer='zeros'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2), strides=2, padding='valid'))
model.add(Flatten())
model.add(Dense(1024, kernel_initializer=k_init,kernel_regularizer=l2(ridge)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(512, kernel_initializer=k_init,kernel_regularizer=l2(ridge)))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(256, kernel_initializer=k_init,kernel_regularizer=l2(ridge)))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(CLASSES, kernel_initializer=k_init,kernel_regularizer=l2(ridge)))
# model.add(BatchNormalization())
model.add(Activation('softmax'))
Lr = 1e-4
dcy = 1e-5
m = 0.5
batch_sz = 25
epoch = 25
# sgd = SGD(lr=Lr, momentum=m, decay=dcy, nesterov=True)
adam = Adam(lr=Lr, decay=dcy)
model.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.summary()
print('learning rate: %f, decay: %f' %(Lr, dcy))
from keras.backend import get_session
get_session().run(tf.global_variables_initializer())
a = model.fit(xtrain, ytrain, batch_size = batch_sz, epochs= epoch, verbose = 1,
callbacks=[EvaluateValidation((xval, yval))])
loss_and_metrics = model.evaluate(xval, yval, batch_size=batch_sz)
print "Loss and accuracy: ", loss_and_metrics
y_pred_one_hot = model.predict(xval, batch_size=128)
y_pred = np.argmax(y_pred_one_hot, axis=1)
y_true = np.argmax(yval, axis=1)
print "Test loss: {}".format(loss_and_metrics[0])
print "Test Acc: {} %".format(loss_and_metrics[1] * 100)
print "Precision", sklearn.metrics.precision_score(y_true, y_pred)
print "Recall", sklearn.metrics.recall_score(y_true, y_pred)
print "f1_score", sklearn.metrics.f1_score(y_true, y_pred)
print "confusion_matrix"
print sklearn.metrics.confusion_matrix(y_true, y_pred, labels=[0, 1])
fpr, tpr, tresholds = sklearn.metrics.roc_curve(y_true, y_pred)
ras = sklearn.metrics.auc(fpr, tpr)
roauc_score = sklearn.metrics.roc_auc_score(y_true, y_pred)
print ras
print roauc_score
print "{}".format(loss_and_metrics[0])
print "{}".format(loss_and_metrics[1] * 100)
print sklearn.metrics.precision_score(y_true, y_pred)
print sklearn.metrics.recall_score(y_true, y_pred)
print sklearn.metrics.f1_score(y_true, y_pred)
print sklearn.metrics.confusion_matrix(y_true, y_pred, labels=[0, 1])
# Compute ROC curve and ROC area for each class
n_classes = 2
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = sklearn.metrics.roc_curve(yval[:, i], y_pred_one_hot[:, i])
roc_auc[i] = sklearn.metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = sklearn.metrics.roc_curve(yval.ravel(), y_pred_one_hot.ravel())
roc_auc["micro"] = sklearn.metrics.auc(fpr["micro"], tpr["micro"])
roauc_score = sklearn.metrics.roc_auc_score(y_true, y_pred)
print roauc_score
plt.figure()
lw = 2
plt.plot(fpr["micro"], tpr["micro"], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc["micro"])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.title('ROC classifying Faces of class {} (Pure class {} vs Mixed class {})'.format(CLASS, CLASS, CLASS))
plt.legend(loc="lower right")
plt.show()
print tpr["micro"]
print fpr["micro"]
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import sys
sys.path.insert(1, '../oracle-polimi-contest-2019')
from evaluation_script import read_file
from collections import Counter
import similaripy as sim
from scipy import *
from scipy.sparse import *
import string
import unidecode
def create_name_letters_matrix(df):
df = df[['record_id','name']]
df.name = df.name.astype(str) # convert to string
df.name = df.name.str.lower() # lowercase
df.name = df.name.str.translate(str.maketrans('', '', string.punctuation)) # remove punctuation
# remove accented letters
no_accents = []
for s in df.name:
no_accents.append(unidecode.unidecode(s))
df.name = no_accents
# create return matrix
columns = ['record_id','name','a','b','c','d','e','f','g','h','i','j','k','l',
'm','n','o','p','q','r','s','t','u','v','w','x','y','z']
name_letters_matrix = pd.DataFrame(columns=columns)
name_letters_matrix.record_id = df.record_id.copy()
name_letters_matrix.name = df.name.copy()
# count occurence of each letter and add the columns to the return df
for l in tqdm(['a','b','c','d','e','f','g','h','i','j','k','l','m','n',
'o','p','q','r','s','t','u','v','w','x','y','z']):
new_col = []
for (i,n) in zip(name_letters_matrix.index, name_letters_matrix.name):
new_col.append(n.count(l))
name_letters_matrix[l] = new_col
return name_letters_matrix
def get_mcn_matrix_train(train):
group = train[['name', 'linked_id']].groupby('linked_id').apply(lambda x: list(x['name']))
link_mc_name = {}
for (l, names) in tqdm(zip(group.keys(), group)):
link_mc_name[l] = Counter(names).most_common(1)[0][0]
most_common_name = pd.DataFrame.from_dict(link_mc_name, orient='index', columns=['most_common_name'])
df_train_clean = pd.merge(train, most_common_name, how='left', left_on='linked_id', right_index=True)
df_train_clean = df_train_clean.drop_duplicates(subset=['linked_id','most_common_name']).drop(['record_id', 'name'], axis=1)
df_train_clean = df_train_clean.rename(columns={"linked_id":"record_id", "most_common_name":"name"})
m_train = create_name_letters_matrix(df_train_clean)
m_train = m_train.reset_index(drop=True)
return m_train
def cosine_similarity(m_train, m_test, path='val_cosine', k=10):
m_train_csr = csr_matrix(m_train.drop(['record_id','name'], axis=1))
m_test_csr = csr_matrix(m_test.drop(['record_id','name'], axis=1))
output = sim.cosine(m_test_csr, m_train_csr.T, k=k)
save_npz(path + '.npz', output.tocsr())
return output.tocsr()
def clean_cosine_output(output, df_test, m_train):
output = output.tocsr()
r_nnz = output.nonzero()[0]
c_nnz = output.nonzero()[1]
l = []
for i in tqdm(range(len(r_nnz))):
l.append([output[r_nnz[i], c_nnz[i]],r_nnz[i],c_nnz[i]])
l.sort(key= lambda x: (x[1], -x[0]))
rec_id = [x[1] for x in l]
rec_id = [df_test.at[i,'record_id'] for i in tqdm(rec_id)]
lin_id = [x[2] for x in l]
lin_id = [m_train.at[i,'record_id'] for i in tqdm(lin_id)]
scores = [x[0] for x in l]
df = pd.DataFrame()
df['queried_record_id'] = rec_id
df['predicted_record_id'] = lin_id
df['cosine_score'] = scores
return df
# Splitting Train in Train-Validation set
train = read_file("../dataset/original/train.csv")
train = train.drop(['modification', 'type'], axis=1)
train['name'] = train['name'].str.lower()
from sklearn.model_selection import train_test_split
target = train.linked_id
X_train, X_val, y_train, y_val = train_test_split(train, target, test_size=0.33, random_state=42)
m_train = get_mcn_matrix_train(X_train)
m_train
m_test = create_name_letters_matrix(X_val)
cosine_output = cosine_similarity(m_train, m_test)
X_val = X_val.reset_index(drop=True)
# Extract top10 from cosine similarity and create xgboost skeleton dataframe: validation set becomes xgboost train
xgb_train_df = clean_cosine_output(cosine_output, X_val, m_train)
xgb_train_df
```
## The same for the real test set
```
test = read_file("../oracle-polimi-contest-2019/test_data.csv")
test = test.drop(['modification', 'type'], axis=1)
test['name'] = test['name'].str.lower()
m_train_full = get_mcn_matrix_train(train)
m_test_full = create_name_letters_matrix(test)
m_train_full.shape
m_test_full.shape
full_cosine_out = cosine_similarity(m_train_full, m_test_full, path='full_cosine_sim')
xgb_test_df = clean_cosine_output(full_cosine_out, test, m_train_full)
xgb_test_df
```
# Extract features
```
def adding_names(xgb_df, m_train, m_test):
xgb_df = df.merge(m_train[['record_id', 'name']], left_on='predicted_record_id', right_on='record_id').drop('record_id', axis=1)
xgb_df = xgb_df.rename(columns={'name': 'predicted_record_name'})
xgb_df = xgb_df.merge(m_test[['record_id', 'name']], left_on='queried_record_id', right_on='record_id' ).rename(columns={'name':'queried_name'})
xgb_df = xgb_df.drop('record_id', axis=1)
return xgb_df
def extract_target(predicted, linked):
res = np.empty(len(predicted))
res = np.where(predicted == linked, 1, 0)
return res
def train_target(xgb_df_train, X_val):
xgb_df_train = xgb_df_train.merge(X_val[['record_id', 'linked_id']], left_on='queried_record_id', right_on='record_id')
xgb_df_train = xgb_df_train.drop('record_id', axis=1)
xgb_df_train['linked_id'] = xgb_df_train['linked_id'].astype(int)
xgb_df_train['target'] = extract_target(xgb_df_train.predicted_record_id.values, xgb_df_train.linked_id.values)
return xgb_df_train.drop('linked_id', axis=1)
def extract_editdistance(queried_name, predicted_name):
res = np.empty(len(queried_name))
for i in tqdm(range(len(queried_name))):
res[i] = editdistance.eval(queried_name[i], predicted_name[i])
return res
xgb_train_df = train_target(xgb_train_df, X_val)
xgb_train_df['editdistance'] = extract_editdistance(xgb_train_df.predicted_record_name.values, xgb_train_df.queried_name.values)
# TODO da concludere questa parte: aggiungere le stesse features anche per xgb_test_df
import xgboost as xgb
group = xgb_train_df.groupby('queried_record_id').size().values
ranker = xgb.XGBRanker()
ranker.fit(df_xgb.drop(['queried_record_id', 'target', 'nysiis_distance'], axis=1), df_xgb['target'], group=group)
# Get predictions
predictions = ranker.predict(xgb_test_df[['predicted_record_id', 'score', 'editdistance']])
xgb_test_df['predictions'] = predictions
df_predictions = xgb_test_df[['queried_record_id', 'predicted_record_id', 'predictions']]
```
# Extract Submission
```
rec_pred = []
for (r,p) in zip(df_predictions.predicted_record_id, df_predictions.predictions):
rec_pred.append((r, p))
rec_pred
df_predictions['rec_pred'] = rec_pred
group_queried = df_predictions[['queried_record_id', 'rec_pred']].groupby('queried_record_id').apply(lambda x: list(x['rec_pred']))
df_predictions = pd.DataFrame(group_queried).reset_index().rename(columns={0 : 'rec_pred'})
def reorder_preds(preds):
sorted_list = []
for i in range(len(preds)):
l = sorted(preds[i], key=lambda t: t[1], reverse=True)
l = [x[0] for x in l]
sorted_list.append(l)
return sorted_list
df_predictions['ordered_preds'] = reorder_preds(df_predictions.rec_pred.values)
df_predictions = df_predictions[['queried_record_id', 'ordered_preds']].rename(columns={'ordered_preds': 'predicted_record_id'})
new_col = []
for t in tqdm(df_predictions.predicted_record_id):
new_col.append(' '.join([str(x) for x in t]))
new_col
# Adding missing values
missing_values = {'queried_record_id' : ['12026587-TST-MR', '13009531-TST-MR', '12091134-TST-M', '12091134-NV0-TST-CP'],
'predicted_record_id': [10111147, 10111147, 10111147, 10111147]}
missing_df = pd.DataFrame(missing_values)
missing_df
df_predictions.predicted_record_id = new_col
df_predictions = pd.concat([df_predictions, missing_df])
df_predictions.to_csv('xgb_sub2.csv', index=False)
```
| github_jupyter |
### Creating superposition states associated with discretized probability distributions
#### Prerequisites
Here are a few things you should be up to speed on before we start:
- [Python fundamentals](https://qiskit.org/textbook/ch-prerequisites/python-and-jupyter-notebooks.html)
- [Programming quantum computers using Qiskit](https://qiskit.org/textbook/ch-prerequisites/qiskit.html)
- [Single qubit gates](https://qiskit.org/textbook/ch-states/single-qubit-gates.html)
Additiona resources can be found [here](https://github.com/QForestCommunity/launchpad/blob/master/README.md).
#### Dependencies
We also need a couple of Python packages to build our distribution encoder:
- [Qiskit](https://qiskit.org/)
- [Numpy](https://numpy.org/)
- [SciPy](https://www.scipy.org/)
- [Matplotlib](https://matplotlib.org/)
#### Contributors
[Sashwat Anagolum](https://github.com/SashwatAnagolum)
#### Qiskit Package Versions
```
import qiskit
qiskit.__qiskit_version__
```
#### Introduction
Given a probability distribution $p$, we want to create a quantum state $|\psi\rangle$ such that
$$|\psi\rangle = \sum_{i} \sqrt{p_i} |i\rangle$$
where $|i\rangle$ represents one of an orthonormal set of states.
While we don't known when (for what kinds of distributions) we can do this, we do know that if you can efficiently integrate over a distribution classically, then we can efficiently construct a quantum state associated with a discretized version of that distribution.
It may seem kind of trivial - we can integrate over the distribution classicaly, so why not just create the mixed state shown here?
$$\sum_i p_i |i\rangle \langle i |$$
If all we needed to do was sample from the distribution, we could use this state - but then if we were efficiently integrating the distribution classicaly, say using Monte Carlo methods, we might as well sample from the classical distribution as well.
The reason we avoid generating the distribution as a mixed quantum state is that we often need to perfom further, uniquely quantum, processing on it after creation - in this case, we cannot use the mixed state apporach.
#### Encoding the distribution
If we wanted to create a $N$ region discretization, we would need $n = log N$ qubits to represent the distribution. Let's look at a super simple case to start off: $N = 2$, so $n = 1$.
We have probabilities $p_{0}^{(1)}$ and $p_1^{(1)}$, of a random variable following the distribution lying in region $0$ and region $1$, respectively, with $p^{(i)}_{j}$ representing the probability of measuring a random variable in region $j$ if it follows the discretized distribution over $i$ qubits.
Since we only use one qubit, all we need to do is integrate over region $0$ to find the probability of a variable lying within it. Let's take a quick look at the Bloch sphere:

If a qubit is rotated about the y-axis by angle $\theta$, then the probability of measuring it as zero is given by $\cos (\frac{\theta}{2})^2$ - so we can figure out how much to rotate a qubit by if we're using it to encode a distribution:
$$ \theta = 2 * \cos^{-1} \left ( \sqrt{p_{0}^{(1)}}\right )$$
$$p_{0}^{(1)} = \int_{x^{(1)}_{0}}^{x_{1}^{(1)}}p(x) dx$$
Where $x^{(1)}_{0}$ and $x_{1}^{(1)}$ are the first and second region boundaries when 1 qubit is used. This leaves us with
$$|\psi \rangle = \sqrt{p_{0}^{(1)}} |0\rangle + \sqrt{p_{1}^{(1)}} |1\rangle$$
Awesome!
Now that we know how to do it for distributions with two regions, let's see if we can expand it to include more regions - i.e., can we convert a quantum state encoding a $N$ region discretization into one encoding a discretization with $2N$ regions?
To get started, let's avoid all the complicated integration stuff we'll need to do later by defining a function $f(i, n)$ such that
$$f(i, n) = \frac{\int_{x_{k}^{(n + 1)}}^{x_{k + 1}^{(n + 1)}} p(x) dx}{\int^{x_{i + 1}^{(n)}}_{x_{i}^{(n)}} p(x) dx}$$
Where $k = 2 * \left ( \frac{i}{2} - \frac{i \% 2}{2} \right )$. The equation above probably looks a little hopeless, but all it does it computes the conditional probability of a value lying in the left subregion of region $i$ (when we have $N$ regions), given that it lies in region $i$.
Why do we need this?
We're assuming that dividing the distribution into $N$ regions is just an intermediary step in the process of dividing it into the desired $2^{m}$ regions - so $x_{k}^{(n + 1)}$ refers to the same boundary that $x_{i}^{(n)}$ does.
Now that we've defined $f(i, n)$, all we need to do to figure out how much to rotate the $(n + 1)^{th}$ qubit is compute
$$\theta_{i}^{(n + 1)} = 2 * \cos^{-1} \left ( \sqrt{f(i, n)}\right )$$
Now all we need to do is rotate the $(n + 1)^{th}$ qubit by $\theta_{i}^{(n + 1)}$ conditioned on the state $|i\rangle$ represented using $n$ qubits:
$$\sqrt{p_{i}^{(n)}}|i\rangle \rightarrow \sqrt{p^{(n + 1)}_{k}}|k\rangle + \sqrt{p^{(n + 1)}_{k + 1}}|k+1\rangle$$
Since we showed that constructing a state for $n = 1$ was possible, and given a $2^n$ region discretization, we could convert into a distribution with $2^{(n + 1)}$ regions, we just inductively proved that we can construct a superposition state corresponding to a $2^n, n \in \mathbb{N}$ region discretized distribution - pretty cool!
Now that we've gotten the concepts down, let's move on to building our own quantum distribution encoder.
#### Required modules
```
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import Aer, execute, QuantumCircuit
from qiskit.circuit.library.standard_gates import RYGate
from qiskit.tools.visualization import circuit_drawer
from numpy import pi, e, sqrt, arccos, log2
from scipy.integrate import quad
%matplotlib inline
import matplotlib.pyplot as plt
```
Let's define a function representing our distribution, so that we can change super quickly whenever we want to. We'll start off with a super simple function, like $N(0, 2)$:
```
def distribution(x):
"""
Returns the value of a chosen probability distribution at the given value
of x. Mess around with this function to see how the encoder works!
The current distribution being used is N(0, 2).
"""
# Use these with normal distributions
mu = 0
sigma = 2
return (((e ** (-0.5 * ((x - mu) / sigma) ** 2)) / (sigma * sqrt(2 * pi))) / 0.99993665)
```
The 0.99993665 is a normalisation factor used to make sure the sum of probabilities over the regions we've chosen adds up to 1.
Next, let's create everything else we need to compute $f(i, n)$:
```
def integrate(dist, lower, upper):
"""
Perform integration using numpy's quad method. We can use parametrized
distributions as well by using this syntax instead:
quad(integrand, lower, upper, args=(tupleOfArgsForIntegrand))
"""
return quad(dist, lower, upper)[0]
def computeRegionProbability(dist, regBounds, numRegions, j):
"""
Given a distribution dist, a list of adjacent regions regBounds, the
current level of discretization numRegions, a region number j, computes
the probability that the value random variable following dist lies in
region j given that it lies in the larger region made up of regions
[(j // 2) * 2, ((j + 2) // 2) * 2]
"""
totalRegions = len(regBounds) - 1
k = 2 * j
prob = integrate(dist, regBounds[(totalRegions // numRegions) * k],
regBounds[(totalRegions // numRegions) * (k + 1)]) / integrate(
dist, regBounds[(totalRegions // numRegions) * ((k // 2) * 2)],
regBounds[(totalRegions // numRegions) * (((k + 2) // 2) * 2)])
return prob
```
$computeRegionProbability$ gives us the value of $f(i, n)$. We're finally ready to start writing the quantum part of our program - let's start by creating the registers and circuit we need:
```
def encodeDist(dist, regBounds):
numQubits = int(log2(len(regBounds) - 1))
a = QuantumRegister(2 * numQubits - 2)
c = ClassicalRegister(numQubits)
qc = QuantumCircuit(a, c)
```
Now we can create the looping construct we need to be able to iteratively divide the distribution into $2^m$ regions, starting from $n = 1$ ($2$ regions), and dividing until $n = log N$ ($N$ regions). We need to loop over the different regions in the current , and compute the value of $f(i, n)$ for each one:
```
for i in range(numQubits):
numRegions = int(2 ** (i + 1))
for j in range(numRegions // 2):
prob = computeRegionProbability(dist, regBounds, numRegions, j)
```
Now we need to apply the controlled rotations - but we also need to write in a special case for $n = 1$, because there are no qubits to condition the rotation on:
```
if not i:
qc.ry(2 * arccos(sqrt(prob)), a[2 * numQubits - 3])
```
Since we'll be using gates with an arbitrary number of control qubits, we use the ControlledGate:
```
else:
cGate = RYGate(2 * arccos(sqrt(prob))).control(i)
```
We know that we need to use the qubits indexed by $[0, 1, ..., i - 1]$ as control qubits, and the $n^{th}$ one as the target - but before we can apply the gate we need to perform a few bit flips to make sure that the $n^{th}$ qubit is rotated only when the control qubits are in the state $|i\rangle$. We can figure out which qubits to flip using this function:
```
def getFlipList(i, j, numQubits):
"""
Given the current level of desired level of discretization, the
current level of discretization i and a region number j,
returns the binary bit string associated with j in the form of
a list of bits to be flipped.
"""
binString = str(bin(j))[2:]
binString = ("0" * (numQubits - len(binString))) + binString
bitFlips = []
for k in range(numQubits - i, numQubits):
if binString[k] == '0':
bitFlips.append(3 * numQubits - 3 - k - i)
return bitFlips
```
Here the variable j represents the region number, which we convert to binary, and then flip qubits so that the resulting binary string is all ones. After finding out which qubits we need to flip, we can create a controlled gate and append it to the quantum circuit back in $encodeDist$:
```
for k in listOfFlips:
qc.x(a[k])
qubitsUsed = [a[k] for k in
range(2 * numQubits - 2 - i, 2 * numQubits - 2)]
qubitsUsed.append(a[2 * numQubits - 3 - i])
qc.append(cGate, qubitsUsed)
for k in listOfFlips:
qc.x(a[k])
```
All that's left is to return the quantum circuit:
```
return qc, a, c
```
Here's the entire function, so that we can run it in the notebook:
```
def encodeDist(dist, regBounds):
"""
Discretize the distribution dist into multiple regions with boundaries
given by regBounds, and store the associated quantum superposition
state in a new quantum register reg. Please make sure the number of
regions is a power of 2, i.e. len(regBounds) = (2 ** n) + 1.
Additionally, the number of regions is limited to a maximum of
2^(n // 2 + 1), where n is the number of qubits available in the backend
being used - this is due to the requirement of (n - 2) ancilla qubits in
order to perform (n - 1) control operations with minimal possible depth.
Returns a new quantum circuit containing the instructions and registers
needed to create the superposition state, along with the size of the
quantum register.
"""
numQubits = int(log2(len(regBounds) - 1))
a = QuantumRegister(2 * numQubits - 2)
c = ClassicalRegister(numQubits)
qc = QuantumCircuit(a, c)
for i in range(numQubits):
numRegions = int(2 ** (i + 1))
for j in range(numRegions // 2):
prob = computeRegionProbability(dist, regBounds, numRegions, j)
if not i:
qc.ry(2 * arccos(sqrt(prob)), a[2 * numQubits - 3])
else:
cGate = RYGate(2 * arccos(sqrt(prob))).control(i)
listOfFlips = getFlipList(i, j, numQubits)
for k in listOfFlips:
qc.x(a[k])
qubitsUsed = [a[k] for k in
range(2 * numQubits - 2 - i, 2 * numQubits - 2)]
qubitsUsed.append(a[2 * numQubits - 3 - i])
qc.append(cGate, qubitsUsed)
for k in listOfFlips:
qc.x(a[k])
return qc, a, c
```
Finally, we can call our function, and compare the results with those from a classical computer - we also need a helper function that pads bit strings for us, so that we can plot the classical results on the same axis as the quantum ones:
```
def pad(x, numQubits):
"""
Utility function that returns a left padded version of the bit string
passed.
"""
string = str(x)[2:]
string = ('0' * (numQubits - len(string))) + string
return string
regBounds = [i for i in range(-16, 17)]
qc, a, c = encodeDist(distribution, regBounds)
numQubits = (qc.num_qubits + 2) // 2
for i in range(numQubits - 2, 2 * numQubits - 2):
qc.measure(a[i], c[i - (numQubits - 2)])
backend = Aer.get_backend('qasm_simulator')
shots = 100000
job = execute(qc, backend=backend, shots=shots)
results = job.result().get_counts()
resultsX = []
resultsY = []
for i in [pad(bin(x), numQubits) for x in range(2 ** (numQubits))]:
resultsX.append(i)
if i in results.keys():
resultsY.append(results[i])
else:
resultsY.append(0)
truthDisc = [integrate(distribution, regBounds[i], regBounds[i + 1]) * shots for i in range(
len(regBounds) - 1)]
plt.figure(figsize=[16, 9])
plt.plot(resultsX, resultsY)
plt.plot(resultsX, truthDisc, '--')
plt.legend(['quantum estimate', 'classical estimate'])
plt.show()
```
Let's take a look at the quantum circuit:
```
circuit_drawer(qc, output='mpl')
```
#### Things to do next
Looks like we're done - awesome!
Taking all the functions from this notebook and pasting them into a python file will give you a working copy of this program, provided you have all the dependencies installed - if you want a regular python file instead, you can get a copy [here](https://github.com/SashwatAnagolum/DoNew/blob/master/loadProbDist/loadProbDist.py).
A possible next step after getting the hang of encoding distributions is to figure out ways to process the quantum state further, leading to purely quantum transformed versions of the distribution.
Let me know if you figure out any other ways we can work with the quantum state we get using this circuit, or if you have any other questions - you can reach me at [sashwat.anagolum@gmail.com](mailto:sashwat.anagolum@gmail.com)
| github_jupyter |
```
# Install old version of scikit-learn, see https://github.com/SeldonIO/seldon-core/issues/2059
!pip install -UIv scikit-learn==0.20.3
!pip install azure-storage-file-datalake azure-identity azure-storage-blob pandas joblib
### ENTER YOUR DETAILS ###
storage_account_name = ""
client_id = ""
tenant_id = ""
client_secret = "" # client secret value of the service principal
connection_string = "" # blob storage connection string
# run `oc whoami --show-token` to get your token
# do not use quotes
%env OPENSHIFT_TOKEN=
%env STORAGE_ACCOUNT_NAME=
import pandas as pd
from sklearn.linear_model import LogisticRegression
from joblib import dump, load
from azure.identity import ClientSecretCredential
from azure.storage.filedatalake import DataLakeServiceClient
from azure.core._match_conditions import MatchConditions
from azure.storage.filedatalake._models import ContentSettings
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, __version__
def initialize_storage_account_ad(storage_account_name, client_id, client_secret, tenant_id):
try:
global service_client
credential = ClientSecretCredential(tenant_id, client_id, client_secret)
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format(
"https", storage_account_name), credential=credential)
except Exception as e:
print(e)
def download_file_from_directory(dataset):
try:
file_system_client = service_client.get_file_system_client(file_system="mycontainer")
directory_client = file_system_client.get_directory_client("sample")
local_file = open(dataset,'wb')
file_client = directory_client.get_file_client(dataset)
download = file_client.download_file()
downloaded_bytes = download.readall()
local_file.write(downloaded_bytes)
local_file.close()
except Exception as e:
print(e)
# Initialize and download Iris dataset from Azure Data Lake
initialize_storage_account_ad(storage_account_name, client_id, client_secret, tenant_id)
download_file_from_directory("iris.data")
# Read training data set
train_df = pd.read_csv("iris.data", header=None, names=["sepal_length", "sepal_width", "petal_length", "petal_width", "class"])
y = pd.factorize(train_df["class"])[0]
train_df.pop("class")
X = train_df.values
# Train model
clf = LogisticRegression()
clf.fit(X,y)
# Test model
print(X[0:2])
print(clf.predict(X[0:2]))
# Save model to local disk
dump(clf, 'model.joblib')
# Save model to Azure Blob Storage
local_file_name = "model.joblib"
upload_path = "sklearn/model.joblib"
try:
blob_service_client = BlobServiceClient.from_connection_string(connection_string)
blob_client = blob_service_client.get_blob_client(container="mycontainer", blob=upload_path)
print("\nUploading to Azure Storage as blob:\n\t" + upload_path)
# Upload the created file
with open(local_file_name, "rb") as data:
blob_client.upload_blob(data)
except Exception as ex:
print('Exception:')
print(ex)
%%bash
curl -O https://mirror.openshift.com/pub/openshift-v4/clients/oc/4.6/linux/oc.tar.gz
tar xzf oc.tar.gz
cp oc /opt/app-root/bin/
%%bash
# Test oc
oc login --server https://openshift.default.svc.cluster.local --insecure-skip-tls-verify --token=$OPENSHIFT_TOKEN
# Run model in Seldon
oc apply -n odh -f - <<EOF
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: example
spec:
name: iris
predictors:
- graph:
children: []
implementation: SKLEARN_SERVER
modelUri: https://$STORAGE_ACCOUNT_NAME.blob.core.windows.net/mycontainer/sklearn/model.joblib
name: classifier
name: default
replicas: 1
EOF
%%bash
# Test model in Seldon
MODEL_URL=example-default.odh.svc.cluster.local:8000
curl -X POST $MODEL_URL/api/v1.0/predictions \
-H 'Content-Type: application/json' \
-d '{ "data": { "ndarray": [[1,2,3,4]] } }'
```
| github_jupyter |
#python deep_dream.py path_to_your_base_image.jpg prefix_for_results
#python deep_dream.py img/mypic.jpg results/dream
#from __future__ import print_function
from tensorflow import keras
import numpy as np
import argparse
from keras.applications import inception_v3
from keras import backend as K
from keras.preprocessing import image
from keras.applications.inception_v3 import preprocess_input
from keras.applications.inception_v3 import decode_predictions
from keras.models import Model, load_model
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
```
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
from tensorflow import keras
from keras.applications import inception_v3
from keras.applications.inception_v3 import decode_predictions
from keras.models import Model, load_model
import keras.backend as K
from keras.preprocessing.image import load_img, img_to_array
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from numpy.linalg import norm
import scipy
import pickle
from os import listdir
from os.path import isfile, join
import operator
from PIL import Image
from keras.preprocessing import image
import os
import math
import PIL.Image
from sklearn.metrics import pairwise
import matplotlib.pyplot as plt
from keras.applications.inception_v3 import preprocess_input
from sklearn import linear_model
from sklearn import metrics
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
import pandas as pd
from scipy import stats
import tensorflow as tf
K.backend()
K.set_learning_phase(0)
model = inception_v3.InceptionV3(weights='imagenet',include_top=False)
dream = model.input
print('Model loaded.')
import os
import cav
working_dir = '/Users/tyler/Desktop/dissertation/programming/tcav_on_azure'
concept = 'horizontal'
cav_dict = {}
layer_names = ['mixed0','mixed1','mixed2','mixed3','mixed4','mixed5','mixed6','mixed7','mixed8','mixed9','mixed10']
#layer_names = ['mixed0']
for layer_name in layer_names:
subpath = concept + '-random500_0-' + layer_name
cav_path = 'cav_dir/' + subpath + '-linear-0.1.pkl'
path = os.path.join(working_dir, cav_path)
this_cav = cav.CAV.load_cav(path)
cav_dict[layer_name] = this_cav.cavs[0]
'''
concept = 'striped_sub_1'
layer_names = ['mixed0','mixed1','mixed2','mixed3','mixed4','mixed5','mixed6','mixed7','mixed8','mixed9','mixed10']
layer_names = ['mixed6']
for layer_name in layer_names:
subpath = concept + '-random500_0-' + layer_name
cav_path = 'cav_dir/' + subpath + '-linear-0.1.pkl'
path = os.path.join(working_dir, cav_path)
this_cav = cav.CAV.load_cav(path)
cav_dict[layer_name] = this_cav.cavs[0]
'''
concept_p = 'grassland_sub_3'
concept_n = 'N_0'
target_class = 'zebra'
split_seed = 1
#cav_dict = {}
replace_these = ['mixed7','mixed8','mixed9','mixed10']
for layer in replace_these:
acts_p,_ = get_acts_for_concept(concept_p,layer)
acts_n,_ = get_acts_for_concept(concept_n,layer)
#_,acts_class = get_acts_for_concept(target_class,layer)
x = np.concatenate((acts_p,acts_n))
y = np.concatenate((np.zeros(500),np.ones(500)))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, stratify=y,random_state=split_seed)
x_train_p_list,x_train_n_list =[],[]
for idx,a in enumerate(x_train):
if y_train[idx] == 0:
x_train_p_list.append(a)
else:
x_train_n_list.append(a)
x_train_p, x_train_n = np.array(x_train_p_list),np.array(x_train_n_list)
mu_p = (x_train_p).mean(axis=0)
mu_n = (x_train_n).mean(axis=0)
cav_params = mu_p - mu_n
cav_dict[layer] = cav_params
#for layer in layer_names:
# if layer not in cav_dict:
# cav_dict[layer] = ''
step = 0.02 # Gradient ascent step size
num_octave = 4 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 30 # Number of ascent steps per scale
max_loss = 100000000000
#result_prefix = '/home/tyler/Desktop/tcav_on_azure/results/test'
size_dict = {'mixed0': 313600,'mixed1': 352800,'mixed2': 352800,'mixed3': 221952,'mixed4': 221952,'mixed5': 221952,'mixed6': 221952,'mixed7': 221952,'mixed8': 81920,'mixed9': 131072,'mixed10': 131072}
settings = {
'features': {
#'mixed0': 0,#/313600,
#'mixed1': 1,#/352800,
#'mixed2': 0,#/352800,
#'mixed3': 0,#/221952,
#'mixed4': 0,#/221952,
#'mixed5': 0,#/221952,
#'mixed6': 0,#/221952,
'mixed7': 1,#/221952,
'mixed8': 1,#/81920,
'mixed9': 1,#/131072,
'mixed10': 1#/131072
},}
#cav_dict['mixed9'] = pickle.load(open('mu_great_dane_9','rb'))
#cav_dict['mixed8'] = pickle.load(open('mu_great_dane_8','rb'))
#cav_dict['mixed7'] = pickle.load(open('mu_great_dane_7','rb'))
#cav_dict['mixed6'] = pickle.load(open('mu_great_dane_6','rb'))
layer_dict = dict([(layer.name, layer) for layer in model.layers])
sess = K.get_session()
loss_2 = K.variable(0.)
for layer_name in settings['features']:
coeff = settings['features'][layer_name]
assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
coeff = settings['features'][layer_name]
acts = layer_dict[layer_name].output
flat_acts = K.flatten(acts)
len_of_acts = flat_acts.shape[0]
print(len_of_acts)
layer_cav = K.variable(cav_dict[layer_name].reshape(-1,1))
#layer_cav_slice = K.slice(layer_cav,0,flat_acts.shape[0])
n = layer_cav.shape[0]
print(n, layer_name)
n_tensor = K.constant(n.value/1000)
features_shape = tf.shape(flat_acts)
H = features_shape[0]
#W = features_shape[2]
#layer_cav_slice = K.reshape(layer_cav, shape=[H,1])
print(H)
layer_cav_slice = K.slice(layer_cav,(0,0),(H,1))
flat_acts_slice = K.reshape(flat_acts, shape=[1,H])
print('layer_cav shape is ' + str(layer_cav_slice.shape))
print('acts shape is ' + str(flat_acts_slice.shape))
#loss_2 += coeff * K.dot(K.reshape(acts,(1,n)),layer_cav)
#scaling = K.prod(K.cast(K.shape(acts), 'float32'))
loss_2 += coeff * K.dot(flat_acts_slice,layer_cav_slice) #/ scaling
#loss_2 += coeff * eu_distance(acts,layer_cav)
#loss_2 -= K.sum(K.abs(K.reshape(acts,(n,1))-layer_cav),axis=0,keepdims=False)
#loss_2 += cosine_distance((flat_acts,layer_cav))
#loss_2 += K.dot(K.reshape(acts,(1,n)),layer_cav) / n_tensor
#print(loss_2.shape)
#loss_2 += 1000 * K.sum(K.square(model.input)) / (3 * 299 * 299)
#loss_2 -= 1 * K.sum(K.abs(model.input))
#loss_2 = loss
grads_2 = K.gradients(loss_2, model.input)[0]
grads_2 /= K.maximum(K.mean(K.abs(grads_2)), K.epsilon())
outputs_2 = [loss_2, grads_2, acts]
fetch_loss_and_grads_2 = K.function([model.input], outputs_2)
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads_2([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
jitter = 2*(np.random.random((img.shape[1], img.shape[2], 3)) - 0.5) * jitter_setting
jitter = np.expand_dims(jitter, axis=0)
#x += jitter
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
if i % 5 == 0:
print('..Loss value at', i, ':', loss_value)
x += step * grad_values
#x -= jitter
return x
```
## With Scaling
```
base_image_path = os.path.join(working_dir,'concepts/horse_sub_1/img252.jpg')
base_image_path = os.path.join(working_dir,'concepts/noise_white/img1.jpg')
#base_image_path = os.path.join(working_dir,'sky.jpg')
jitter_setting = .1
tf.logging.set_verbosity(0)
img_pic = image.load_img(base_image_path, target_size=(350, 350))
#img = image.img_to_array(img_pic)
img = preprocess_image(base_image_path)
img = resize_img(img,(299,299,3))
#img = np.expand_dims(img, axis=0) / 255
#jitter = .1*(np.random.random((img.shape[1], img.shape[2], 3)) - 0.5) * jitter_setting
#jitter = np.expand_dims(jitter, axis=0)
#img += jitter
if K.image_data_format() == 'channels_first':
original_shape = img.shape[2:]
else:
original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
x,y = shape
if x < 400 and y < 400:
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])
for shape in successive_shapes:
print('Processing image shape', shape)
img = resize_img(img, shape)
img = gradient_ascent(img,
iterations=iterations,
step=step,
max_loss=max_loss)
upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
same_size_original = resize_img(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = resize_img(original_img, shape)
img -= jitter
save_img(img, fname='results/tmp.png')
#img_path = 'concepts/striped_sub_1/striped_0004.jpg'
img_path = 'results/tmp.png'
show_img = image.load_img(img_path)
show_img
#decode_predictions(preds, top=3)
model = load_model('v3_model.h5')
#get_prediction(prep(img_path))
preds = sess.run(endpoints_v3['prediction'], {endpoints_v3['input']: prep(img_path)})
preds.shape
sess = K.get_session()
endpoints_v3 = dict(
input=model.inputs[0].name,
input_tensor=model.inputs[0],
logit=model.outputs[0].name,
prediction=model.outputs[0].name,
prediction_tensor=model.outputs[0],)
def get_prediction(img):
img = preprocess_input(img)
preds = sess.run(endpoints_v3['prediction'], {endpoints_v3['input']: img})
top = decode_predictions(preds, top=3)
return top
def prep(path):
img_pic = image.load_img(path, target_size=(299, 299))
img = image.img_to_array(img_pic)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
return img
#save_img(img, fname='results/zebra/0_1_2_3.png')
#show_img = image.load_img('results/striped_7_8_9.png', target_size=(299, 299))
#show_img
img_pic
#img_in = image.load_img(base_image_path, target_size=(299, 299))
# 1. run model
# 2. run bottlenecks_tensors
# 3. sess = K.get_session()
img = preprocess_image(base_image_path)
bottleneck_name = 'mixed9'
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{model.input: img})
#layer_9_acts.shape
img.shape
successive_shapes
img = resize_img(img, shape)
img.shape
#img = preprocess_image(base_image_path)
bottleneck_name = 'mixed9'
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{model.input: img})
layer_9_acts.shape
img.shape
img = gradient_ascent(img,
iterations=iterations,
step=step,
max_loss=max_loss)
x = img
eval_loss_and_grads(x)
layer_cav = K.constant(cav_dict[layer_name].reshape(-1,1))
n = layer_cav.shape[0]
print(n, layer_name)
#n_tensor = K.constant(n.value/1000)
coeff = settings['features'][layer_name]
acts = layer_dict[layer_name].output
#flat_acts = K.variable(K.reshape(acts,(1,n)))
#loss_2 += coeff * K.dot(K.reshape(acts,(1,n)),layer_cav) / n_tensor
acts
acts_sq = K.squeeze(acts,axis = 1)
acts_sq
flat_acts
layer_cav
K.slice(acts)
layer_cav
layer_dict['mixed9'].output
cav_dict['mixed9'].shape[0]
cav_dict['mixed9'].shape[0] / 2048
x / 288
model.layers.o
layer_cav.set_shape(acts.shape)
#image.img_to_array(img_pic)
## No scaling
tf.logging.set_verbosity(0)
base_image_path = 'concepts/striped_sub_1/striped_0004.jpg'
base_image_path = '/home/tyler/Desktop/tcav_on_azure/concepts/noise_white/img1.jpg'
img_pic = image.load_img(base_image_path, target_size=(299, 299))
img = image.img_to_array(img_pic)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
jitter = 2*(np.random.random((img.shape[1], img.shape[2], 3)) - 0.5) * .05
jitter = np.expand_dims(jitter, axis=0)
img += jitter
#original_img = np.copy(img)
img = gradient_ascent(img,iterations=iterations,step=step,max_loss=max_loss)
img -= jitter
img_name = 'placeholder'
save_img(img, fname='results/' + img_name + '.png')
#flat_act = np.reshape(np.asarray(acts).squeeze(), -1)
#flat_act_norm = keras.utils.normalize(flat_act)
#loss2 = euclidean_distance(vec_norm(layer_9_cav),flat_act_norm)
#loss_2 += K.sum(K.square(K.reshape(acts,(131072,)) - layer_9_cav_K))
#loss_2 += K.dot(K.reshape(acts,(1,131072)),K.transpose(layer_9_cav_K))
layer_name = 'mixed9'
layer_out = layer_dict[layer_name].output
layer_out
img_in = shrunk_original_img
img_in.shape
new_acts = fetch_loss_and_grads_2([img_in])[0]
new_acts
layer_9_acts[0][5][0]
new_acts[0][5][0]
```
## New Loss
```
def get_loss(this_img):
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: this_img})
flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
loss += euclidean_distance(vec_norm(layer_9_cav),vec_norm(flat_act))
return loss
get_loss(original_img)
original_img.shape
sess = K.get_session()
#my_graph = tf.get_default_graph()
#my_graph.get_collection()
sess
model.input
this_img = original_img
loss = K.variable(0.)
layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{model.input: this_img})
flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
loss += euclidean_distance(vec_norm(layer_9_cav),vec_norm(flat_act))
#K.clear_session()
layer_9_acts = layer_dict[layer_name].output
layer_9_acts
x.shape
sess.run(bottlenecks_tensors[bottleneck_name],
{self.ends['input']: examples})
#sess.run(bottlenecks_tensors[bottleneck_name],{model.input: img})
#layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: img})
#flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
#layer_9_acts = sess.run(bottlenecks_tensors[bottleneck_name],{endpoints_v3['input']: x})
#flat_act = np.reshape(np.asarray(layer_9_acts).squeeze(), -1)
#euclidean_distance(vec_norm(layer_9_cav),vec_norm(flat_act))
```
## Static functions
```
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate tensors.
img = load_img(image_path)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a tensor into a valid image.
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255.
x = np.clip(x, 0, 255).astype('uint8')
return x
def resize_img(img, size):
img = np.copy(img)
if K.image_data_format() == 'channels_first':
factors = (1, 1,
float(size[0]) / img.shape[2],
float(size[1]) / img.shape[3])
else:
factors = (1,
float(size[0]) / img.shape[1],
float(size[1]) / img.shape[2],
1)
return scipy.ndimage.zoom(img, factors, order=1)
def euclidean_distance(a,b):
return np.linalg.norm(a-b)
def vec_norm(vec):
return vec / np.linalg.norm(vec)
def get_bottleneck_tensors():
"""Add Inception bottlenecks and their pre-Relu versions to endpoints dict."""
graph = tf.get_default_graph()
bn_endpoints = {}
for op in graph.get_operations():
# change this below string to change which layers are considered bottlenecks
# use 'ConcatV2' for InceptionV3
# use 'MaxPool' for VGG16 (for example)
if 'ConcatV2' in op.type:
name = op.name.split('/')[0]
bn_endpoints[name] = op.outputs[0]
return bn_endpoints
endpoints_v3 = dict(
input=model.inputs[0].name,
input_tensor=model.inputs[0],
logit=model.outputs[0].name,
prediction=model.outputs[0].name,
prediction_tensor=model.outputs[0],
)
bottlenecks_tensors = get_bottleneck_tensors()
bottleneck_name = 'mixed9'
def save_img(img, fname):
pil_img = deprocess_image(np.copy(img))
scipy.misc.imsave(fname, pil_img)
def eu_distance(A,B):
return K.sum(K.abs(A-B),axis=1,keepdims=True)
#Process:
# Load the original image.
# Define a number of processing scales (i.e. image shapes), from smallest to largest.
# Resize the original image to the smallest scale.
# For every scale, starting with the smallest (i.e. current one):
# Run gradient ascent
# Upscale image to the next scale
# Reinject the detail that was lost at upscaling time
# Stop when we are back to the original size.
#To obtain the detail lost during upscaling, we simply take the original image, shrink it down, upscale it,
# and compare the result to the (resized) original image.
def prep2(filename):
shape=(299, 299)
img = np.array(PIL.Image.open(open(filename, 'rb')).convert('RGB').resize(shape, PIL.Image.BILINEAR))
# Normalize pixel values to between 0 and 1.
img = np.float32(img) / 255.0
if not (len(img.shape) == 3 and img.shape[2] == 3):
return None
else:
return img
this_img = np.expand_dims(prep2('concepts/random500_0/ILSVRC2012_val_00001172.JPEG'),axis=0)
def get_acts_for_concept(concept,layer):
concept_dir = os.path.join(working_dir,'concepts/'+concept)
image_list = files_from_dir_ext(concept_dir,'jp')
image_list.sort()
act_path = os.path.join(working_dir,'final_acts/' + concept + '-' + layer + '.pkl')
n = size_dict[layer]
nn = size_dict_orig[layer]
try:
this_dict = pickle.load(open(act_path, 'rb'))
except:
this_dict = {}
#print(nn)
acts_ran = np.zeros((len(image_list),n))
orig = np.zeros((len(image_list),nn[1],nn[2],nn[3]))
for idx,image_path in enumerate(image_list):
if image_path not in this_dict:
img = prep2(os.path.join(concept_dir,image_path))
this_img = np.expand_dims(img, axis=0)
acts_orig = get_acts_for_layer_new(layer,this_img)
acts_ran[idx] = acts_orig.reshape(-1)
orig[idx] = acts_orig
this_dict[image_path] = (acts_orig.reshape(-1),acts_orig)
else:
acts_ran[idx],orig[idx] = this_dict[image_path]
#print('acts already exist')
pickle.dump(this_dict,open(act_path, 'wb'))
return acts_ran,orig
def files_from_dir_ext(a_dir,ext):
onlyfiles = [f for f in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, f))]
this_ext = [e for e in onlyfiles if ext in e.lower()]
return this_ext
layer_dict = dict([(layer.name, layer) for layer in model.layers])
sess = K.get_session()
acts_mixed0_f = K.function([model.input],[layer_dict['mixed0'].output])
acts_mixed1_f = K.function([model.input],[layer_dict['mixed1'].output])
acts_mixed2_f = K.function([model.input],[layer_dict['mixed2'].output])
acts_mixed3_f = K.function([model.input],[layer_dict['mixed3'].output])
acts_mixed4_f = K.function([model.input],[layer_dict['mixed4'].output])
acts_mixed5_f = K.function([model.input],[layer_dict['mixed5'].output])
acts_mixed6_f = K.function([model.input],[layer_dict['mixed6'].output])
acts_mixed7_f = K.function([model.input],[layer_dict['mixed7'].output])
acts_mixed8_f = K.function([model.input],[layer_dict['mixed8'].output])
acts_mixed9_f = K.function([model.input],[layer_dict['mixed9'].output])
acts_mixed10_f = K.function([model.input],[layer_dict['mixed10'].output])
def get_acts_for_layer_new(layer_name,input_img):
acts = None
if layer_name=='mixed0':
acts = acts_mixed0_f([input_img])[0]
if layer_name=='mixed1':
acts = acts_mixed1_f([input_img])[0]
if layer_name=='mixed2':
acts = acts_mixed2_f([input_img])[0]
if layer_name=='mixed3':
acts = acts_mixed3_f([input_img])[0]
if layer_name=='mixed4':
acts = acts_mixed4_f([input_img])[0]
if layer_name=='mixed5':
acts = acts_mixed5_f([input_img])[0]
if layer_name=='mixed6':
acts = acts_mixed6_f([input_img])[0]
if layer_name=='mixed7':
acts = acts_mixed7_f([input_img])[0]
if layer_name=='mixed8':
acts = acts_mixed8_f([input_img])[0]
if layer_name=='mixed9':
acts = acts_mixed9_f([input_img])[0]
if layer_name=='mixed10':
acts = acts_mixed10_f([input_img])[0]
return acts
bn_names = ['mixed0','mixed1','mixed2','mixed3','mixed4','mixed5','mixed6','mixed7','mixed8','mixed9','mixed10']
size_dict = {}
for bn in bn_names:
acts_orig = get_acts_for_layer_new(bn,this_img)
size_dict[bn] = acts_orig.reshape(-1).shape[0]
size_dict_orig = {}
for bn in bn_names:
acts_orig = get_acts_for_layer_new(bn,this_img)
size_dict_orig[bn] = acts_orig.shape
```
| github_jupyter |
# T81-558: Applications of Deep Neural Networks
**Module 14: Other Neural Network Techniques**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 14 Video Material
* Part 14.1: What is AutoML [[Video]](https://www.youtube.com/watch?v=TFUysIR5AB0&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_01_automl.ipynb)
* Part 14.2: Using Denoising AutoEncoders in Keras [[Video]](https://www.youtube.com/watch?v=4bTSu6_fucc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_02_auto_encode.ipynb)
* Part 14.3: Training an Intrusion Detection System with KDD99 [[Video]](https://www.youtube.com/watch?v=1ySn6h2A68I&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_03_anomaly.ipynb)
* **Part 14.4: Anomaly Detection in Keras** [[Video]](https://www.youtube.com/watch?v=VgyKQ5MTDFc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_04_ids_kdd99.ipynb)
* Part 14.5: The Deep Learning Technologies I am Excited About [[Video]]() [[Notebook]](t81_558_class_14_05_new_tech.ipynb)
# Part 14.4: Training an Intrusion Detection System with KDD99
The [KDD-99 dataset](http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html) is very famous in the security field and almost a "hello world" of intrusion detection systems in machine learning.
# Read in Raw KDD-99 Dataset
```
import pandas as pd
from tensorflow.keras.utils import get_file
try:
path = get_file('kddcup.data_10_percent.gz', origin='http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz')
except:
print('Error downloading')
raise
print(path)
# This file is a CSV, just no CSV extension or headers
# Download from: http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html
df = pd.read_csv(path, header=None)
print("Read {} rows.".format(len(df)))
# df = df.sample(frac=0.1, replace=False) # Uncomment this line to sample only 10% of the dataset
df.dropna(inplace=True,axis=1) # For now, just drop NA's (rows with missing values)
# The CSV file has no column heads, so add them
df.columns = [
'duration',
'protocol_type',
'service',
'flag',
'src_bytes',
'dst_bytes',
'land',
'wrong_fragment',
'urgent',
'hot',
'num_failed_logins',
'logged_in',
'num_compromised',
'root_shell',
'su_attempted',
'num_root',
'num_file_creations',
'num_shells',
'num_access_files',
'num_outbound_cmds',
'is_host_login',
'is_guest_login',
'count',
'srv_count',
'serror_rate',
'srv_serror_rate',
'rerror_rate',
'srv_rerror_rate',
'same_srv_rate',
'diff_srv_rate',
'srv_diff_host_rate',
'dst_host_count',
'dst_host_srv_count',
'dst_host_same_srv_rate',
'dst_host_diff_srv_rate',
'dst_host_same_src_port_rate',
'dst_host_srv_diff_host_rate',
'dst_host_serror_rate',
'dst_host_srv_serror_rate',
'dst_host_rerror_rate',
'dst_host_srv_rerror_rate',
'outcome'
]
# display 5 rows
df[0:5]
```
# Analyzing a Dataset
The following script can be used to give a high-level overview of how a dataset appears.
```
ENCODING = 'utf-8'
def expand_categories(values):
result = []
s = values.value_counts()
t = float(len(values))
for v in s.index:
result.append("{}:{}%".format(v,round(100*(s[v]/t),2)))
return "[{}]".format(",".join(result))
def analyze(df):
print()
cols = df.columns.values
total = float(len(df))
print("{} rows".format(int(total)))
for col in cols:
uniques = df[col].unique()
unique_count = len(uniques)
if unique_count>100:
print("** {}:{} ({}%)".format(col,unique_count,int(((unique_count)/total)*100)))
else:
print("** {}:{}".format(col,expand_categories(df[col])))
expand_categories(df[col])
# Analyze KDD-99
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
analyze(df)
```
# Encode the feature vector
Encode every row in the database. This is not instant!
```
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Now encode the feature vector
encode_numeric_zscore(df, 'duration')
encode_text_dummy(df, 'protocol_type')
encode_text_dummy(df, 'service')
encode_text_dummy(df, 'flag')
encode_numeric_zscore(df, 'src_bytes')
encode_numeric_zscore(df, 'dst_bytes')
encode_text_dummy(df, 'land')
encode_numeric_zscore(df, 'wrong_fragment')
encode_numeric_zscore(df, 'urgent')
encode_numeric_zscore(df, 'hot')
encode_numeric_zscore(df, 'num_failed_logins')
encode_text_dummy(df, 'logged_in')
encode_numeric_zscore(df, 'num_compromised')
encode_numeric_zscore(df, 'root_shell')
encode_numeric_zscore(df, 'su_attempted')
encode_numeric_zscore(df, 'num_root')
encode_numeric_zscore(df, 'num_file_creations')
encode_numeric_zscore(df, 'num_shells')
encode_numeric_zscore(df, 'num_access_files')
encode_numeric_zscore(df, 'num_outbound_cmds')
encode_text_dummy(df, 'is_host_login')
encode_text_dummy(df, 'is_guest_login')
encode_numeric_zscore(df, 'count')
encode_numeric_zscore(df, 'srv_count')
encode_numeric_zscore(df, 'serror_rate')
encode_numeric_zscore(df, 'srv_serror_rate')
encode_numeric_zscore(df, 'rerror_rate')
encode_numeric_zscore(df, 'srv_rerror_rate')
encode_numeric_zscore(df, 'same_srv_rate')
encode_numeric_zscore(df, 'diff_srv_rate')
encode_numeric_zscore(df, 'srv_diff_host_rate')
encode_numeric_zscore(df, 'dst_host_count')
encode_numeric_zscore(df, 'dst_host_srv_count')
encode_numeric_zscore(df, 'dst_host_same_srv_rate')
encode_numeric_zscore(df, 'dst_host_diff_srv_rate')
encode_numeric_zscore(df, 'dst_host_same_src_port_rate')
encode_numeric_zscore(df, 'dst_host_srv_diff_host_rate')
encode_numeric_zscore(df, 'dst_host_serror_rate')
encode_numeric_zscore(df, 'dst_host_srv_serror_rate')
encode_numeric_zscore(df, 'dst_host_rerror_rate')
encode_numeric_zscore(df, 'dst_host_srv_rerror_rate')
# display 5 rows
df.dropna(inplace=True,axis=1)
df[0:5]
# This is the numeric feature vector, as it goes to the neural net
# Convert to numpy - Classification
x_columns = df.columns.drop('outcome')
x = df[x_columns].values
dummies = pd.get_dummies(df['outcome']) # Classification
outcomes = dummies.columns
num_classes = len(outcomes)
y = dummies.values
df.groupby('outcome')['outcome'].count()
```
# Train the Neural Network
```
import pandas as pd
import io
import requests
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping
# Create a test/train split. 25% test
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Create neural net
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(50, input_dim=x.shape[1], activation='relu'))
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),
callbacks=[monitor],verbose=2,epochs=1000)
# Measure accuracy
pred = model.predict(x_test)
pred = np.argmax(pred,axis=1)
y_eval = np.argmax(y_test,axis=1)
score = metrics.accuracy_score(y_eval, pred)
print("Validation score: {}".format(score))
```
| github_jupyter |
# Implementing AdaBoost
When the trees in the forest are trees of depth 1 (also known as decision stumps) and we
perform boosting instead of bagging, the resulting algorithm is called AdaBoost.
AdaBoost adjusts the dataset at each iteration by performing the following actions:
- Selecting a decision stump
- Increasing the weighting of cases that the decision stump labeled incorrectly while reducing the weighting of correctly labeled cases
This iterative weight adjustment causes each new classifier in the ensemble to prioritize
training the incorrectly labeled cases. As a result, the model adjusts by targeting highlyweighted
data points.
Eventually, the stumps are combined to form a final classifier.
## Implementing AdaBoost in OpenCV
Although OpenCV provides a very efficient implementation of AdaBoost, it is hidden
under the Haar cascade classifier. Haar cascade classifiers are a very popular tool for face
detection, which we can illustrate through the example of the Lena image:
```
import cv2
img_bgr = cv2.imread('../data/lena.jpg', cv2.IMREAD_COLOR)
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
```
After loading the image in both color and grayscale, we load a pretrained Haar cascade:
```
filename = '../data/haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(filename)
```
The classifier will then detect faces present in the image using the following function call:
```
faces = face_cascade.detectMultiScale(img_gray, 1.1, 5)
```
Note that the algorithm operates only on grayscale images. That's why we saved two
pictures of Lena, one to which we can apply the classifier (`img_gray`), and one on which we
can draw the resulting bounding box (`img_bgr`):
```
color = (255, 0, 0)
thickness = 2
for (x, y, w, h) in faces:
cv2.rectangle(img_bgr, (x, y), (x + w, y + h),
color, thickness)
```
Then we can plot the image using the following code:
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(10, 6))
plt.imshow(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB));
```
Obviously, this picture contains only a single face. However, the preceding code will work
even on images where multiple faces could be detected. Try it out!
## Implementing AdaBoost in scikit-learn
In scikit-learn, AdaBoost is just another ensemble estimator. We can create an ensemble
from 50 decision stumps as follows:
```
from sklearn.ensemble import AdaBoostClassifier
ada = AdaBoostClassifier(n_estimators=50,
random_state=456)
```
We can load the breast cancer set once more and split it 75-25:
```
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X = cancer.data
y = cancer.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=456
)
```
Then fit and score AdaBoost using the familiar procedure:
```
ada.fit(X_train, y_train)
ada.score(X_test, y_test)
```
The result is remarkable, 96.5% accuracy!
We might want to compare this result to a random forest. However, to be fair, we should
make the trees in the forest all decision stumps. Then we will know the difference between
bagging and boosting:
```
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=50,
max_depth=1,
random_state=456)
forest.fit(X_train, y_train)
forest.score(X_test, y_test)
```
Of course, if we let the trees be as deep as needed, we might get a better score:
```
forest = RandomForestClassifier(n_estimators=50,
random_state=456)
forest.fit(X_train, y_train)
forest.score(X_test, y_test)
```
Wow, 99.3% accuracy score is incredible when the random forest classifier was allowed to be as deep as possible.
As a last step in this chapter, let's talk about how to combine different types of models into
an ensemble.
| github_jupyter |
# List Comprehensions Lab
Complete the following set of exercises to solidify your knowledge of list comprehensions.
```
import os
import numpy as np
import pandas as pd
```
### 1. Use a list comprehension to create and print a list of consecutive integers starting with 1 and ending with 50.
### 2. Use a list comprehension to create and print a list of even numbers starting with 2 and ending with 200.
### 3. Use a list comprehension to create and print a list containing all elements of the 10 x 4 Numpy array below.
```
a = np.array([[0.84062117, 0.48006452, 0.7876326 , 0.77109654],
[0.44409793, 0.09014516, 0.81835917, 0.87645456],
[0.7066597 , 0.09610873, 0.41247947, 0.57433389],
[0.29960807, 0.42315023, 0.34452557, 0.4751035 ],
[0.17003563, 0.46843998, 0.92796258, 0.69814654],
[0.41290051, 0.19561071, 0.16284783, 0.97016248],
[0.71725408, 0.87702738, 0.31244595, 0.76615487],
[0.20754036, 0.57871812, 0.07214068, 0.40356048],
[0.12149553, 0.53222417, 0.9976855 , 0.12536346],
[0.80930099, 0.50962849, 0.94555126, 0.33364763]])
```
### 4. Add a condition to the list comprehension above so that only values greater than or equal to 0.5 are printed.
### 5. Use a list comprehension to create and print a list containing all elements of the 5 x 2 x 3 Numpy array below.
```
b = np.array([[[0.55867166, 0.06210792, 0.08147297],
[0.82579068, 0.91512478, 0.06833034]],
[[0.05440634, 0.65857693, 0.30296619],
[0.06769833, 0.96031863, 0.51293743]],
[[0.09143215, 0.71893382, 0.45850679],
[0.58256464, 0.59005654, 0.56266457]],
[[0.71600294, 0.87392666, 0.11434044],
[0.8694668 , 0.65669313, 0.10708681]],
[[0.07529684, 0.46470767, 0.47984544],
[0.65368638, 0.14901286, 0.23760688]]])
```
### 5. Add a condition to the list comprehension above so that the last value in each subarray is printed, but only if it is less than or equal to 0.5.
### 6. Use a list comprehension to select and print the names of all CSV files in the */data* directory.
### 7. Use a list comprehension and the Pandas `read_csv` and `concat` methods to read all CSV files in the */data* directory and combine them into a single data frame. Display the top 10 rows of the resulting data frame.
### 8. Use a list comprehension to select and print the column numbers for columns from the data set whose median is less than 0.48.
### 9. Use a list comprehension to add a new column (20) to the data frame whose values are the values in column 19 minus 0.1. Display the top 10 rows of the resulting data frame.
### 10. Use a list comprehension to extract and print all values from the data set that are between 0.7 and 0.75.
| github_jupyter |
# An Introduction to FEAST v2.0
FEAST v2.0 is a Python implementation of the Fugitive Emmissions Abatement Simulation Toolkit (FEAST) published by the Environmental Assessment and Optimization group at Stanford University. FEAST v2.0 generates similar results to FEAST v1.0 and includes some updates to the code structure to make the model more accessible. Extended documentation of FEAST is available [here](https://github.com/EAOgroup/FEAST/blob/master/Archive/FEAST_v1.0/FEASTDocumentation.pdf).
This tutorial gives an example of how to generate a realization of the default scenario in FEAST v2.0, analyze results, and change settings to generate a custom realization. The tutorial is interactive, so feel free to experiment with the code cells and discover how your changes affect the results.
## Running the default scenario
The default scenario simulates four leak detection and repair (LDAR) programs over a 10 year period. Leak distribution data sets, LDAR parameters and gas field properties are all assumed in order to generate the results.
Producing a single realization of the default scenario requires two lines of code: one to load the function *field_simulation* to the active python kernel, and the second to call the function. The code cell below illustrates the commands. The optional argument *dir_out* specifies the directory in which to save results from the simulation. It will take about one minute to complete the simulation.
```
from field_simulation import field_simulation
field_simulation(dir_out='../Results')
```
Each new realization is saved under the name "realization0," and the final integer is incremented by one with each new realization generated. The results can be viewed by using the built in plotting functions. There are three plotting functions available. The first produces a time series of the leakage in single realization file. It is shown in the code cell below.
```
# First the necessary functions are loaded to the active kernel
from GeneralClassesFunctions import plotting_functions
# Then the time series plotting function is called with a path to a
# specific results file
plotting_functions.time_series('../Results/realization0.p')
```
The other two plotting functions accumulate the data from all realizations in a directory. In order to illustrate their utility, multiple realizations should be used. For illustration purposes, four more realizations are generated below. To suppress the time step updates from *field_simulation()*, the optional command *display_status=False* was added.
```
for ind in range(0,4):
print("Currently evaluating iteration number " + str(ind))
field_simulation(display_status=False, dir_out='../Results')
```
Now there are five realizations of the default scenario in the "Results" folder. The *summary_plotter* function compiles results from all five to show the mean net present value, the estimated uncertainty in the sample mean from the mean of infinite realizations of the same scenario, and the types of costs and benefits that contributed to to the net present value. *summary_plotter* was already loaded to the kernel as part of the *plotting_functions* module, so it is called directly in the cell below.
```
# summary_plotter requires a path to a results directory as an input
plotting_functions.summary_plotter('../Results')
```
*hist_plotter* allows the leak repair performance of each LDAR program to be evaluated without regard to financial value. The function generates a histogram of the sizes of leaks found by each program. Like *summary_plotter*, *hist_plotter* combines results from all realizations in a directory. Unlike *summary_plotter*, *hist_plotter* generates the plots in separate windows from the notebook by default. An optional *inline=True* command was added to ensure that the plots pop up in this notebook.
```
plotting_functions.hist_plotter('../Results', inline=True)
```
FEAST has the capability to rapidly calculate the value of improving detection technology or changing operating procedures. Users can define any parameters they choose in existing LDAR program simulations, and more ambitious users can create their own LDAR program modules. The cell below illustrates how unique technology instances can be generated and simulated simultaneously for easy comparison. The call to *field_simulation* uses the option argument *dir_out* to define a directory to place the results in.
```
# This cell compares the performance of three AIR LDAR programs
# with varying camera sensitivities.
# First, the modules neaded to create the AIR objects must be
# imported to the kernel
from DetectionModules import ir
from GeneralClassesFunctions import simulation_classes
# The loop is used to generate 5 independent realizations of the
# desired simulation
for ind in range(0,5):
print("Currently evaluating iteration number " + str(ind))
# Before creating the LDAR objects, a few properties of the
# simulation need to be set.
# The default GasField settings are used
gas_field = simulation_classes.GasField()
# A time step of 10 days is specified (instead of the default
# timestep of 1 day) to speed up the simulation
time = simulation_classes.Time(delta_t = 10)
# Each camera is defined below by its noise equivalent
# temperature difference (netd).
# In the default scenario, the netd is 0.015 K
Default_AIR = ir.AIR(time=time, gas_field=gas_field)
Better_AIR = ir.AIR(time=time, gas_field=gas_field, netd=0.005)
Best_AIR = ir.AIR(time=time, gas_field=gas_field, netd=0.001)
# All of the tetchnologies are combined into a dict to be passed
# to field_simulation()
tech_dict = {'Default_AIR': Default_AIR, 'Better_AIR': Better_AIR,
'Best_AIR': Best_AIR}
# field_simulation is called with the predefined objects,
# and an output directory is specified
field_simulation(time=time, gas_field=gas_field, tech_dict=tech_dict,
dir_out='../Results/AIR_Sample', display_status=False)
```
The function *hist_plotter* shows how the improved sensitivity affects the size of leaks detected:
```
plotting_functions.hist_plotter('../Results/AIR_Sample',inline=True)
```
*summary_plotter* is used to illustrate the financial value of improving camera sensitivity.
```
plotting_functions.summary_plotter('../Results/AIR_Sample')
```
The above AIR example gives a glimpse into the possible analyses using FEAST v2.0. Any of the default parameters in FEAST v2.0 can be modified from the command line, stored in an object and used in a gas field simulation. The model is open source and freely available so that code can be customized and new technology modules can be added by private users.
The default parameters in FEAST v2.0 are intended to provide a realistic starting point but should be customized to accurately portray any particular gas field or LDAR program. In this tutorial, a sample size of five realizations was used to demonstrate the plotting functions, but a larger sample size should be used in any rigorous analysis in order to understand the stochastic error in the model.
Please contact chandler.kemp@gmail.com with any questions or suggestions regarding the code contained in FEAST.
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
import torch
from allennlp.nn import util
import sys
sys.path.insert(0, "../../gector")
sys.path.insert(0, "../../")
from gector.gec_model import GecBERTModel
vocab_path = "../../data/output_vocabulary"
model_paths = "../../models/Exp_005_roberta_base_coldstep_2_fixed_vocab/best.th"
model_name = 'roberta'
model_1 = GecBERTModel(vocab_path=vocab_path,
model_paths=[model_paths],
max_len=50,
min_len=3,
iterations=5,
min_error_probability=0.0,
min_probability=0.0,
lowercase_tokens=0,
model_name= model_name,
special_tokens_fix=1,
log=False,
confidence=0,
is_ensemble=0,
weigths=None,
use_cpu=False)
vocab_path = "../../data/output_vocabulary"
model_paths = "../../models/Exp_049_2_roberta_base_stage_3_gold/model_state_epoch_3.th"
model_name = 'roberta'
model_2 = GecBERTModel(vocab_path=vocab_path,
model_paths=[model_paths],
max_len=50,
min_len=3,
iterations=5,
min_error_probability=0.0,
min_probability=0.0,
lowercase_tokens=0,
model_name= model_name,
special_tokens_fix=1,
log=False,
confidence=0,
is_ensemble=0,
weigths=None,
use_cpu=False)
def get_embedings_for_batch(words_batch, model):
batch = model.preprocess(words_batch)
batch = util.move_to_device(batch[0].as_tensor_dict(), 0 if torch.cuda.is_available() else -1)
embed = model.models[0].text_field_embedder(batch['tokens'])
tensors = []
for i in range(len(words_batch)):
tensors.append(embed[i][batch['tokens']['mask'][i]==1].mean(dim=0).cpu().detach().numpy())
return tensors
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pickle
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import numpy as np
import os
import glob
def read_lines(fn):
if not os.path.exists(fn):
return []
with open(fn, 'r', encoding='utf-8') as f:
text = f.read()
lines = text.split("\n")
if lines[-1] == '':
return lines[:-1]
else:
return lines
from tqdm.auto import tqdm
def get_embedings_for_text(src_text, model, batch_size=32):
embedings = []
batch = []
for sent in tqdm(src_text):
batch.append(sent.split())
if len(batch) == batch_size:
batch_embed = get_embedings_for_batch(batch, model)
embedings.extend(batch_embed)
batch = []
batch_embed = get_embedings_for_batch(batch, model)
embedings.extend(batch_embed)
return embedings
```
### Fce
```
fce_texts_src = read_lines("../../data_parallel/fce/fce_train_src")
fce_embed_1 = get_embedings_for_text(fce_texts_src, model_1, batch_size=32)
fce_embed_2 = get_embedings_for_text(fce_texts_src, model_2, batch_size=32)
fce_cos = np.diag(cosine_similarity(fce_embed_1,fce_embed_2))
import pickle
with open("fce_embed_1_large.pickle", "wb") as f:
pickle.dump(fce_embed_1, f)
with open("fce_embed_2_large.pickle", "wb") as f:
pickle.dump(fce_embed_2, f)
with open("fce_cos_large.pickle", "wb") as f:
pickle.dump(fce_cos, f)
```
for base
```
with open("fce_embed_1.pickle", "rb") as f:
fce_embed_1 = pickle.load(f)
with open("fce_embed_2.pickle", "rb") as f:
fce_embed_2 = pickle.load(f)
fce_cos = np.diag(cosine_similarity(fce_embed_1,fce_embed_2))
with open("fce_cos.pickle", "wb") as f:
pickle.dump(fce_cos, f)
fce_cos.min()
fce_cos.max()
fce_cos.mean()
```
for large
```
fce_cos.min()
fce_cos.max()
fce_cos.mean()
```
### Nucle
```
nucle_texts_src = read_lines("../../data_parallel/nucle/nucle_src")
nucle_embed_1 = get_embedings_for_text(nucle_texts_src, model_1, batch_size=32)
nucle_embed_2 = get_embedings_for_text(nucle_texts_src, model_2, batch_size=32)
with open("nucle_embed_2_large.pickle", "wb") as f:
pickle.dump(nucle_embed_2, f)
with open("nucle_embed_1_large.pickle", "wb") as f:
pickle.dump(nucle_embed_1, f)
nucle_cos = np.diag(cosine_similarity(nucle_embed_1,nucle_embed_2))
with open("nucle_cos_large.pickle", "wb") as f:
pickle.dump(nucle_cos, f)
nucle_cos.min()
nucle_cos.max()
nucle_cos.mean()
```
### Lang8
```
lang8_texts_src = read_lines("../../data_parallel/lang8/lang8_src")
lang8_embed_1 = get_embedings_for_text(lang8_texts_src, model_1, batch_size=32)
lang8_embed_2 = get_embedings_for_text(lang8_texts_src, model_2, batch_size=32)
with open("lang8_embed_2.pickle", "wb") as f:
pickle.dump(lang8_embed_2, f)
with open("lang8_embed_1.pickle", "wb") as f:
pickle.dump(lang8_embed_1, f)
#lang8_cos = np.diag(cosine_similarity(lang8_embed_1,lang8_embed_2))
with open("lang8_cos.pickle", "wb") as f:
pickle.dump(lang8_cos, f)
#lang8_cos.min()
#lang8_cos.max()
#lang8_cos.mean()
lang8_embed_1_large = get_embedings_for_text(lang8_texts_src, model_1, batch_size=32)
lang8_embed_2_large = get_embedings_for_text(lang8_texts_src, model_2, batch_size=32)
with open("lang8_embed_2_large.pickle", "wb") as f:
pickle.dump(lang8_embed_2_large, f)
with open("lang8_embed_1_large.pickle", "wb") as f:
pickle.dump(lang8_embed_1_large, f)
lang8_cos_large = np.diag(cosine_similarity(lang8_embed_1_large,lang8_embed_2_large))
with open("lang8_cos_large.pickle", "wb") as f:
pickle.dump(lang8_cos_large, f)
del lang8_embed_2
del lang8_embed_1
del fce_embed_2
del fce_embed_1
del nucle_embed_2
del nucle_embed_1
del fce_cos
del nucle_cos
#del lang8_cos
```
### Try large models
```
vocab_path = "../../data/output_vocabulary"
model_paths = "../../models/Exp_008_roberta_large/best.th"
model_name = 'roberta-large'
model_1 = GecBERTModel(vocab_path=vocab_path,
model_paths=[model_paths],
max_len=50,
min_len=3,
iterations=5,
min_error_probability=0.0,
min_probability=0.0,
lowercase_tokens=0,
model_name= model_name,
special_tokens_fix=1,
log=False,
confidence=0,
is_ensemble=0,
weigths=None,
use_cpu=False)
vocab_path = "../../data/output_vocabulary"
model_paths = "../../models/Exp_037_roberta_large_st3/model_state_epoch_1.th"
model_name = 'roberta-large'
model_2 = GecBERTModel(vocab_path=vocab_path,
model_paths=[model_paths],
max_len=50,
min_len=3,
iterations=5,
min_error_probability=0.0,
min_probability=0.0,
lowercase_tokens=0,
model_name= model_name,
special_tokens_fix=1,
log=False,
confidence=0,
is_ensemble=0,
weigths=None,
use_cpu=False)
# from transformers import pipeline, RobertaForMaskedLM, RobertaTokenizer
# model = RobertaForMaskedLM.from_pretrained("youscan/ukr-roberta-base")
# tokenizer = RobertaTokenizer.from_pretrained("youscan/ukr-roberta-base")
#os.listdir('../../models/Exp_005_roberta_base_coldstep_2_fixed_vocab/')
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModel
# from transformers import pipeline, RobertaForMaskedLM, RobertaTokenizer
# model = RobertaForMaskedLM.from_pretrained("ukr-roberta-base")
# tokenizer = RobertaTokenizer.from_pretrained("ukr-roberta-base")
tokenizer = AutoTokenizer.from_pretrained("youscan/ukr-roberta-base")
# input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
# outputs = model(input_ids)
# last_hidden_states = outputs[0]
#sorted(os.listdir("../models/Exp_046_roberta_base_stage_2_new/"))
```
| github_jupyter |
```
import types
def all_saptak():
names = ["Sa", "Re_", "Re", "Ga_", "Ga", "Ma", "Ma__", "Pa", "Dha_", "Dha", "Ni_", "Ni"]
mandra = [n.lower() for n in names]
tar = [n.upper() for n in names]
return tuple(mandra + names + tar)
def window(item, items, width=7):
index = items.index(item)
start = index - width
end = index + width + 1
if start < 0:
start = 0
if end > len(items):
end = len(items)
return items[start:end]
window("Sa", all_saptak())
import random
import numpy as np
def take (seq, n):
return [next(seq) for i in range(n)]
def get_next(probs):
r = random.random() #random.uniform()?
index = 0
while(r >= 0 and index < len(probs)):
r -= probs[index]
index += 1
return all_saptak()[index - 1]
def aalap(initial, beats=4, transition_up=None, transition_down=None):
current = initial
scale = all_saptak()
yield initial
while True:
aroha = random.choice([True, False])
for i in range(beats):
if aroha:
current = get_next([transition_up[current][v] for v in scale])
else:
current = get_next([transition_down[current][v] for v in scale])
yield current
probs = [row.strip().split(",") for row in open("/home/vikrant/Downloads/prob_matrix.txt")]
probs = [[float(f) for f in row ]for row in probs]
transpose = list(zip(*probs))
def add(a, b):
return a+b
add(1, 2)
args = [1,2]
add(*args)
add
list(zip([1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]))
transpose[5]
def read_prob_from_file(filename):
"""
file is csv with every column as transition probability for one swar.
for 36 swar there are 36 columns and every column contains 36 rows
"""
with open(filename) as f:
data = [[float(f) for f in row.strip().split(",")] for row in f]
return list(zip(*data)) #transpose
def convert_to_transition(matrix):
"""
matrix rows are transition probabilties for given swar.
it is matrix of size 36x36
"""
swar = all_saptak()
return {swar[i] : {swar[j]: item for j, item in enumerate(row)} for i, row in enumerate(matrix)}
transition_up = convert_to_transition(transpose)
transition_up
a = aalap("Sa", 4, transition_up, transition_up)
take(a, 16)
take(a, 16)
transition_up['Dha']
convert_to_transition(read_prob_from_file("/home/vikrant/Downloads/prob_matrix.txt"))["Dha"]
```
### Functions to work out probabilities from notations ###
```
def old_all_octaves(file):
with open(file) as f:
return f.read().strip().replace("\n",",").split(",")
def old_octaves():
return ['sa','lre','re','lga','ga','ma','mau','pa','lda','da','lni','ni',
'sA','lrE','rE','lgA','gA','mA','mAu','pA','ldA','dA','lnI','nI',
'SA','lRE','RE','lGA','GA','MA','MAu','PA','lDA','DA','lNI','NI']
def old_to_new(file):
"""
sa, lre, re ... -> sa, re_, re, ga_, ga...
"""
with open(file) as f:
s = f.read()
newoctave = all_saptak()
for i, item in enumerate(old_octaves()):
s = s.replace(item, newoctave[i])
return s
def transiotion_hist_up(data):
"""
data is saragam string read from file
"""
saptak = all_saptak() +('',)
data = data.strip().replace("\n",",,").split(",")
hist = {i:{j:0 for j in saptak} for i in saptak}
for i, s in enumerate(data[:-1]):
hist[s][data[i+1]] += 1
return hist
def remove_empty(data):
for v in data.values():
del v['']
del data['']
return data
def compute_prob(hist):
def divide(a, b):
return a/b if b > 0 else 0
hist = remove_empty(hist)
probs = {}
for k, v in hist.items():
probs[k] = {k1: divide(v1,sum(v.values())) for k1, v1 in v.items()}
return probs
def transiotion_hist_down(data):
"""
data is saragam string read from file
"""
saptak = all_saptak() +('',)
data = data.strip().replace("\n",",,").split(",")
hist = {i:{j:0 for j in saptak} for i in saptak}
for i, s in enumerate(data[1:]):
hist[s][data[i-1]] += 1
return hist
def transiotion_prob_up(file):
return compute_prob(transiotion_hist_up(file))
def transiotion_prob_down(file):
return compute_prob(transiotion_hist_down(file))
def test_probs():
p1 = transiotion_prob_up(old_to_new("/home/vikrant/Downloads/Bhoop1.txt"))
p2 = convert_to_transition(read_prob_from_file("/home/vikrant/Downloads/prob_matrix.txt"))
for k in p1:
v1 = p1[k]
v2 = p2[k]
for j in v1:
assert abs(v1[j] - v2[j])<= 0.001
old_all_octaves("/home/vikrant/Downloads/AllOctaves.txt" )
help([].extend)
old_to_new("/home/vikrant/Downloads/Bhoop1.txt").strip().replace("\n",",").replace(",,",",").split(",")
p1 = transiotion_prob_down(old_to_new("/home/vikrant/Downloads/Bhoop1.txt"))
p2 = convert_to_transition(read_prob_from_file("/home/vikrant/Downloads/prob_matrix.txt"))
len(p1) == len(p2)
p1['Dha']
p2['Dha']
test_probs()
%%file bhoop.csv
SA,SA,Dha,Pa,Ga,Re,Sa,Re,Ga,Ga,Pa,Ga,Dha,Pa,Ga,Ga
Ga,Pa,Dha,SA,RE,SA,Dha,Pa,SA,Pa,Dha,Pa,Ga,Re,Sa,Sa
Ga,Ga,Pa,Dha,Pa,SA,SA,SA,Dha,Dha,SA,RE,GA,RE,SA,Dha
GA,GA,RE,SA,RE,RE,SA,Dha,SA,Pa,Dha,Pa,Ga,Re,Sa,Sa
Ga,Re,Ga,Ga,Sa,Re,Sa,Sa,Sa,Sa,Sa,dha,Sa,Re,Ga,Ga
Pa,Ga,Pa,Pa,Dha,Dha,Pa,Pa,Ga,Pa,Dha,SA,Dha,Pa,Ga,Sa
Pa,Ga,Ga,Re,Ga,Pa,SA,Dha,SA,SA,SA,SA,Dha,Re,SA,SA
Dha,Dha,Dha,Dha,SA,RE,GA,RE,SA,SA,Dha,Pa,Dha,SA,Dha,Pa
Ga,Re,Ga,Ga,Ga,Re,Pa,Ga,Dha,Pa,Dha,SA,Dha,Pa,Ga,Sa
Sa,Re,Ga,Pa,Ga,Re,Sa,Sa,Re,Pa,Pa,Pa,Re,Ga,Ga,Re
Ga,Ga,Pa,Ga,Re,Ga,Pa,Dha,SA,SA,SA,SA,Dha,Dha,Pa,Ga,Pa
Dha,RE,SA,SA,Dha,Dha,Pa,Ga,Re,Ga,Pa,Dha,SA,Pa,Dha,SA,Dha,SA,Dha,Pa,Ga,Re,Sa
Pa,Ga,Ga,Ga,Pa,Pa,SA,Dha,SA,SA,SA,SA,SA,RE,GA,RE,SA,SA
SA,Dha,Dha,SA,SA,SA,RE,RE,Dha,SA,Pa,Dha,SA,SA,Dha,Dha,Pa
Ga,Ga,Pa,Ga,Re,Ga,Pa,Dha,SA,SA,RE,GA,RE,SA,Dha,Pa,Dha,SA,Dha,Pa,Ga,Re,Ga,Pa,Ga,Re,Sa
Sa,dha,dha,Sa
dha,Sa,Re
Sa,Re
dha,Sa
Sa,Re,Ga,Re,Ga,Sa,Re,dha,Sa
Sa,Re,Ga,Re,Ga,Pa,Ga,Re,Pa,Ga,dha,dha,Sa
Ga,Pa,Dha,Ga,Ga,Ga,Pa
Ga,Pa,Dha,Pa,Ga,Re,Sa
Ga,Pa,Dha,SA,SA,Dha,Pa,Ga,Re,Ga,Re,Pa,Ga,Re,Sa
Ga,Re,Sa,Re,Ga,Pa,Dha,SA,Pa,Dha,SA,RE,GA,RE,SA
Dha,SA,RE,SA,Dha,SA,Dha,Pa,Ga,Pa,Dha,Pa,Ga,Pa,Ga,Re,Sa,dha,dha,Sa
with open("bhoop.csv") as f:
data = f.read()
a = aalap("Sa", 8, transiotion_prob_up(data), transiotion_prob_down(data))
transiotion_prob_down(data)['Ga']
take(a, 16)
```
### Aalap with nyaas ###
```
def aalap_nyaas(initial, beats=8, nyaas = None, transition_up=None, transition_down=None):
current = initial
scale = all_saptak()
yield initial
while True:
if current in nyaas:
aroha = random.choice([True, False])
for i in range(beats):
if aroha:
current = get_next([transition_up[current][v] for v in scale])
else:
current = get_next([transition_down[current][v] for v in scale])
yield current
a = aalap_nyaas("Sa", beats=8, nyaas=['sa','Sa','SA','re','Re','RE','ga','Ga','GA'],
transition_up=transiotion_prob_up(data), transition_down=transiotion_prob_down(data))
for i,item in enumerate(take(a, 32)):
print(i+1, item)
from collections import deque
def search(seq, subseq, end=100):
def compare(source, dest):
for item in dest:
return any(["".join(item).lower() in "".join(source).lower() for item in dest])
n = len(max(subseq, key=len))
window = deque(take(seq, n), n)
for i in range(n, end):
if compare(window, subseq):
yield i-n
window = deque(take(seq, n), n)
else:
window.append(next(seq))
def count(seq):
return sum(1 for i in seq)
a = aalap_nyaas("Sa", beats=8, nyaas=['sa','Sa','SA','re','Re','RE','ga','Ga','GA'],
transition_up=transiotion_prob_up(data), transition_down=transiotion_prob_down(data))
pakad = [["dha","dha","sa"],["ga","re","pa","ga"],["dha","pa","ga","re"]]
sum([count(search(a,pakad, 64)) for i in range(1000)])/1000
1024/16
def subset_prob(probs, start, end):
subset = probs[start:end]
newvalues = [v/sum(subset) for v in subset]
return [0 for i in range(start)] + newvalues + [0 for i in range(end, len(probs))]
subset_prob([0.1,0.2,0.3,0.1,0.2,0.2],0,3)
def aalap_bounded(beats=8, top_bound = 5, transition_up=None, transition_down=None):
initial = 'Sa'
scale = all_saptak()
yield initial
current = initial
index = scale.index(initial)
if top_bound > 0:
aroha = True
else:
aroha = False
for i in range(beats):
if aroha:
current = get_next(subset_prob([transition_up[current][v] for v in scale], 0, index + top_bound))
if scale.index(current) == index + top_bound:
print(current, scale.index(current), top_bound+index)
aroha = False
else:
current = get_next(subset_prob([transition_down[current][v] for v in scale], 0, index + top_bound))
yield current
a = aalap_bounded(beats=64, top_bound=12,
transition_up=transiotion_prob_up(data), transition_down=transiotion_prob_down(data))
for i,j in enumerate(a):
print(i, j)
a = aalap("Sa",8,transiotion_prob_up(data), transiotion_prob_up(data))
take(a, 16)
take(a, 32)
def transition_probability(data):
data = data.strip().replace("\n",",,").split(",")
hist = {}
for i, item in enumerate(data[:-1]):
if item and data[i+1]:
itemd = hist.get(item, {})
itemd[data[i+1]] = itemd.get(data[i+1], 0) + 1
hist[item] = itemd
prob = {}
for k in hist:
total = sum(hist[k].values())
prob[k] = {j: v/total for j,v in hist[k].items()}
return prob
p = transition_probability(data)
p.keys()
p
def sample(items, probs):
r = random.random() #random.uniform()?
index = 0
while(r >= 0 and index < len(probs)):
r -= probs[index]
index += 1
return items[index - 1]
def aalap_(initial, probs):
current = initial
while True:
yield current
targets = [item for item in probs[current]]
probability = [probs[current][item] for item in targets]
current = sample(targets, probability)
sample(list(p['Sa'].keys()), [p['Sa'][k] for k in p['Sa'].keys()])
a = aalap_("Sa", p)
sum([count(search(a,pakad,32)) for i in range(1000)])/1000
%%file bhoop1.csv
SA,SA,Dha,Pa,Ga,Re,Sa,Re,Ga,Ga,Pa,Ga,Dha,Pa,Ga,Ga
Ga,Pa,Dha,SA,RE,SA,Dha,Pa,SA,Pa,Dha,Pa,Ga,Re,Sa,Sa
Ga,Ga,Pa,Dha,Pa,SA,SA,SA,Dha,Dha,SA,RE,GA,RE,SA,Dha
GA,GA,RE,SA,RE,RE,SA,Dha,SA,Pa,Dha,Pa,Ga,Re,Sa,Sa
Ga,Re,Ga,Ga,Sa,Re,Sa,Sa,Sa,Sa,Sa,dha,Sa,Re,Ga,Ga
Pa,Ga,Pa,Pa,Dha,Dha,Pa,Pa,Ga,Pa,Dha,SA,Dha,Pa,Ga,Sa
Pa,Ga,Ga,Re,Ga,Pa,SA,Dha,SA,SA,SA,SA,Dha,Re,SA,SA
Dha,Dha,Dha,Dha,SA,RE,GA,RE,SA,SA,Dha,Pa,Dha,SA,Dha,Pa
Ga,Re,Ga,Ga,Ga,Re,Pa,Ga,Dha,Pa,Dha,SA,Dha,Pa,Ga,Sa
Sa,Re,Ga,Pa,Ga,Re,Sa,Sa,Re,Pa,Pa,Pa,Re,Ga,Ga,Re
Ga,GaPa,Ga,Re,Ga,Pa,Dha,SA,SA,SA,SA,Dha,Dha,Pa,Ga,Pa
DhaRE,SA,SA,Dha,Dha,Pa,Ga,Re,GaPa,DhaSA,PaDha,SA,DhaSA,DhaPa,GaRe,Sa
Pa,Ga,Ga,Ga,Pa,Pa,SA,Dha,SA,SA,SA,SA,SARE,GARE,SA,SA
SA,Dha,Dha,SA,SA,SA,RE,RE,DhaSA,PaDha,SA,SA,Dha,Dha,Pa
Ga,GaPa,Ga,Re,Ga,Pa,Dha,SA,SARE,GARE,SA,DhaPa,DhaSA,DhaPa,GaRe,GaPa,GaRe,Sa
Sa,dha,dha,Sa
dha,Sa,Re
Sa,Re
dha,Sa
Sa,Re,Ga,Re,Ga,Sa,Re,dha,Sa
Sa,Re,Ga,Re,Ga,Pa,Ga,Re,Pa,Ga,dha,dha,Sa
Ga,Pa,Dha,Ga,Ga,Ga,Pa
Ga,Pa,Dha,Pa,Ga,Re,Sa
Ga,Pa,Dha,SA,SA,Dha,Pa,Ga,Re,Ga,Re,Pa,Ga,Re,Sa
Ga,Re,Sa,Re,Ga,Pa,Dha,SA,Pa,Dha,SA,RE,GA,RE,SA
Dha,SA,RE,SA,Dha,SA,Dha,Pa,Ga,Pa,Dha,Pa,Ga,Pa,Ga,Re,Sa,dha,dha,Sa
bhoop1 = transition_probability(open("bhoop1.csv").read())
a = aalap_("Sa", bhoop1)
sum([count(search(a,pakad,32)) for i in range(1000)])/1000
a = aalap_("Sa", bhoop1)
take(a, 32)
bhoop1
tune = """
SA,SA,Dha,Pa,Ga,Re,Sa,Re,Ga,Ga,Pa,Ga,Dha,Pa,Ga,Ga
Ga,Pa,Dha,SA,RE,SA,Dha,Pa,SA,Pa,Dha,Pa,Ga,Re,Sa,Sa
Ga,Ga,Pa,Dha,Pa,SA,SA,SA,Dha,Dha,SA,RE,GA,RE,SA,Dha
GA,GA,RE,SA,RE,RE,SA,Dha,SA,Pa,Dha,Pa,Ga,Re,Sa,Sa
Ga,Re,Ga,Ga,Sa,Re,Sa,Sa,Sa,Sa,Sa,dha,Sa,Re,Ga,Ga
Pa,Ga,Pa,Pa,Dha,Dha,Pa,Pa,Ga,Pa,Dha,SA,Dha,Pa,Ga,Sa
Pa,Ga,Ga,Re,Ga,Pa,SA,Dha,SA,SA,SA,SA,Dha,Re,SA,SA
Dha,Dha,Dha,Dha,SA,RE,GA,RE,SA,SA,Dha,Pa,Dha,SA,Dha,Pa
Ga,Re,Ga,Ga,Ga,Re,Pa,Ga,Dha,Pa,Dha,SA,Dha,Pa,Ga,Sa
Sa,dha,dha,Sa
dha,Sa,Re
Sa,Re
dha,Sa
Sa,Re,Ga,Re,Ga,Sa,Re,dha,Sa
Sa,Re,Ga,Re,Ga,Pa,Ga,Re,Pa,Ga,dha,dha,Sa
Ga,Pa,Dha,Ga,Ga,Ga,Pa
Ga,Pa,Dha,Pa,Ga,Re,Sa
Ga,Pa,Dha,SA,SA,Dha,Pa,Ga,Re,Ga,Re,Pa,Ga,Re,Sa
Ga,Re,Sa,Re,Ga,Pa,Dha,SA,Pa,Dha,SA,RE,GA,RE,SA
Dha,SA,RE,SA,Dha,SA,Dha,Pa,Ga,Pa,Dha,Pa,Ga,Pa,Ga,Re,Sa,dha,dha,Sa
"""
tune = tune.strip().replace("\n",",").replace(",,",",").split(",")
from matplotlib import pyplot
scale = all_saptak()
pyplot.plot([scale.index(s) for s in tune])
tune
```
| github_jupyter |
# AIMSim Demo
This notebook demonstrates the key uses of _AIMSim_ as a graphical user interface, command line tool, and scripting utility. For detailed explanations and to view the source code for _AIMSim_, visit our [documentation page](https://vlachosgroup.github.io/AIMSim/).
## Installing _AIMSim_
For users with Python already in use on their devices, it is _highly_ recommended to first create a virtual environment before installing _AIMSim_. This package has a large number of dependencies with only a handful of versions supported, so conflicts are likely unless a virtual environment is used.
For new Python users, the authors recommended installing anaconda navigator to manage dependencies for _AIMSim_ and make installation easier overall. Once anaconda navigator is ready, create a new environment with Python 3.7, open a terminal or command prompt in this environment, and follow the instructions below.
We reccomend installing _AIMSim_ using the commands shown below (omit exclamation points and the %%capture, unless you are running in a Jupyter notebook):
```
%%capture
!pip install aimsim
```
Now, start the _AIMSim_ GUI by typing `python -m aimsim` or simply `aimsim` into the command line.
## Graphical User Interface Walkthrough
For most users, the Graphical User Interface (GUI) will provide access to all the key functionalities in _AIMSim_. The GUI works by serving the user with drop downs and text fields which represent settings that would otherwise need to be configured in a file by hand. This file is written to the disk by the GUI as part of execution so that the file can be used as a 'starting point' for more advanced use cases.
**Important Note**: Jupyter Notebook _cannot_ run _AIMSim_ from Binder. In order to actually run the _AIMSim_ GUI alongside this tutorial, you will need to download this notebook and run it from a local installation of Jupyter, or follow the installation instructions above and start _AIMSim_ from there. You can install Jupyter [here](https://jupyter.org/install).
<div>
<img src="attachment:image-6.png" width="250"/>
</div>
### A. Database File
This field accepts a file or directory path containing an input set of molecules in one of the accepted formats: SMILES strings, Protein Data Bank files, and excel files containing these data types.
Example:
`/Users/chemist/Desktop/SMILES_database.smi`
#### A1. Similarity Plots
Checking this box will generate a similarity distribution with _AIMSim's_ default color scheme and labels. To customize this plot further, edit the configuration file produced by _AIMSim_ by clicking `Open Config`, then re-submit the file through the command line interface.
Example:
<div>
<img src="attachment:image-4.png" width="200"/>
</div>
In addition to the similarity distribution, this will create a heatmap showing pairwise comparisons between the two species. As above, edit the configuration file to control the appearance of this plot.
Example:
<div>
<img src="attachment:image-5.png" width="200"/>
</div>
#### A2. Property Similarity Checkboxes
Like in the previous two examples, checking this box will create a plot showing how a provided molecular property varies according to the chosen molecular fingerprint. For this to work, data must be provided in a comma-separated value format (which can be generated using Excel with Save As... -> CSV) where the rightmost column is a numerical value (the property of interest).
Example:
| SMILES | Boiling Point |
|--------|---------------|
| C | -161.6 |
| CC | -89 |
| CCC | -42 |
### B. Target Molecule
Provide a SMILES string representing a single molecule for comparison to the provided database of molecules. In the screenshot above, the provided molecule is "CO", methanol. Any valid SMILES strings are accepted, and any errors in the SMILES string will not affect the execution of other tasks.
#### B1. Similarity Heatmap
Like the similarity heatmap shown above, this checkbox will generate a similarity distribution for the single target molecule specified above to the entire molecular database. This is particularly useful when considering a new addition to a dataset, where _AIMSim_ can help in determining if the provided molecule's structural motif's are already well represented in the data.
### C. Similarity Measure
This dropdown includes all of the similarity metrics currently implemented in _AIMSim_. The default selected metric is likely a great starting point for most users, and the additional metrics are provided for advanced users or more specific use cases.
Available Similarity Measures are automatically updated according to the fingerprint currently selected. Not all metrics are compatible with all fingerprints, and _AIMSim_ recognizes will only allow the user to select valid combinations.
Below is a complete list of all similarity measures currently implemented in _AIMSim_.
| # | Name | Input Aliases |
| -- | ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | l0\_similarity | \- |
| 2 | l1\_similarity | manhattan\_similarity, taxicab\_similarity, city\_block\_similarity, snake\_similarity |
| 3 | l2\_similarity | euclidean\_similarity |
| 4 | cosine | driver-kroeber, ochiai |
| 5 | dice | sorenson, gleason |
| 6 | dice\_2 | \- |
| 7 | dice\_3 | \- |
| 8 | tanimoto | jaccard-tanimoto |
| 9 | simple\_matching | sokal-michener, rand |
| 10 | rogers-tanimoto | \- |
| 11 | russel-rao | \- |
| 12 | forbes | \- |
| 13 | simpson | \- |
| 14 | braun-blanquet | \- |
| 15 | baroni-urbani-buser | \- |
| 16 | kulczynski | \- |
| 17 | sokal-sneath | sokal-sneath\_1 |
| 18 | sokal-sneath\_2 | sokal-sneath-2, symmetric\_sokal\_sneath, symmetric-sokal-sneath, |
| 19 | sokal-sneath\_3 | sokal-sneath-3 |
| 20 | sokal-sneath\_4 | sokal-sneath-4 |
| 21 | jaccard | \- |
| 22 | faith | \- |
| 23 | michael | \- |
| 24 | mountford | \- |
| 25 | rogot-goldberg | \- |
| 26 | hawkins-dotson | \- |
| 27 | maxwell-pilliner | \- |
| 28 | harris-lahey | \- |
| 29 | consonni−todeschini\_1 | consonni−todeschini-1 |
| 30 | consonni−todeschini\_2 | consonni−todeschini-2 |
| 31 | consonni−todeschini\_3 | consonni−todeschini-3 |
| 32 | consonni−todeschini\_4 | consonni−todeschini-4 |
| 33 | consonni−todeschini\_5 | consonni−todeschini-5 |
| 34 | austin-colwell | \- |
| 35 | yule\_1 | yule-1 |
| 36 | yule\_2 | yule-2 |
| 37 | holiday-fossum | fossum, holiday\_fossum |
| 38 | holiday-dennis | dennis, holiday\_dennis |
| 39 | cole\_1 | cole-1 |
| 40 | cole\_2 | cole-2 |
| 41 | dispersion | choi |
| 42 | goodman-kruskal | goodman\_kruskal |
| 43 | pearson-heron | pearson\_heron |
| 44 | sorgenfrei | \- |
| 45 | cohen | \- |
| 46 | peirce\_1 | peirce-1 |
| 47 | peirce\_2 | peirce-2 |
### D. Molecular Descriptor
This dropdown includes all of the molecular descriptors, mainly fingerprints, currently implemented in _AIMSim_:
|#|Fingerprint|
|---|---|
|1|morgan|
|2|topological|
|3|daylight|
Each of these fingerprints should be generally applicable for chemical problems, though they are all provided to serve as an easy way to compare the results according to fingerprinting approach.
Additional descriptors are included with _AIMSim_ which are not mathematically compatible with some of the similarity measures. When such a descriptor is selected, the corresponding similarity measure will be removed from the dropdown.
#### D1. Show Experimental Descriptors
This checkbox adds additional molecular descriptors into the `Molecular Descriptor` dropdown. These are marked as _experimental_ because they are generated using third-party libraries over which we have very little or no control. The descriptors generated by these libraries should be used only when the user has a very specific need for a descriptor as implemented in one of the packages below:
- [ccbmlib](https://doi.org/10.12688/f1000research.22292.2): All molecular fingerprints included in the `ccbmlib` library have been reproduced in _AIMSim_. Read about these fingerprints [in the `ccbmlib` repository](https://github.com/vogt-m/ccbmlib).
- [mordred](https://doi.org/10.1186/s13321-018-0258-y): All 1000+ descriptors included in `mordred` are available in _AIMSim_, though as of Januray 2022 it seems that `mordred` is no longer being maintained and has a significant amount of bugs. Use at your own risk.
- [PaDELPy](https://doi.org/10.1002/jcc.21707): [This package](https://github.com/ecrl/padelpy) provides access to all of the molecular descriptors included as part of the PaDEL-Descriptor standalone Java program.
### E. Run
Pressing this button will call a number of input checkers to verify that the information entered into the fields above is valid, and then the tasks will be passed into _AIMSim_ for execution. Additional input to _AIMSim_ needed for some tasks may be requested from the command line.
For large collections of molecules with substantial run times, your operating system may report that _AIMSim_ has stopped responding and should be closed. This is likely not the case, and _AIMSim_ is simply executing your requested tasks. If unsure, try checking the `Verbose` checkbox discussed below, which will provide near-constant output while _AIMSim_ is running.
### F. Open Config
Using your system's default text editor, this button will open the configuration file generated by _AIMSim_ after pressing the run button. This is useful for fine-tuning your plots or re-running the exact same tasks in the future. This configuration file can also access additional functionality present in _AIMSim_ which is not included in the GUI, such as the sampling ratio for the data (covered in greater depth in the __Command Line and Configuration Files__ section below). To use this configuration file, include the name of the file after your call to _AIMSim_ on the command line, i.e.:
`aimsim aimsim-ui-config.yaml` or `python -m aimsim aimsim-ui-config.yaml`
Because of the way Python install libraries like _AIMSim_, this file will likely be saved somewhere difficult to find among many other internal Python files. It is highly recommended to make a copy of this file in a more readily accessible location, or copy the contents of this file into another one. The name of the file can also be changed to something more meaningful (i.e., JWB-Solvent-Screen-123.yaml) as long as the file extension (.yaml) is still included.
### G. Verbose
Selecting this checkbox will cause _AIMSim_ to emit near-constant updates to the command line on its status during execution. This is useful to confirm that _AIMSim_ is executing and has not crashed, and also to provide additional information about errors in the input data.
For large datasets, this may generate a _significant_ amount of command line output. A pairwise comparison of 10,000 molecules would require 100,000,000 (10,000 \* 10,000) operations, generating at least that many lines of text in the console.
Example __Verbose__ output:
```
Reading SMILES strings from C:\path\to\file\small.smi
Processing O=S(C1=CC=CC=C1)(N2CCOCC2)=O (1/5)
Processing O=S(C1=CC=C(C(C)(C)C)C=C1)(N2CCOCC2)=O (2/5)
Processing O=S(C1=CC=C(C2=CC=CC=C2)C=C1)(N3CCOCC3)=O (3/5)
Processing O=S(C1=CC=C(OC)C=C1)(N2CCOCC2)=O (4/5)
Processing O=S(C1=CC=C(SC)C=C1)(N2CCOCC2)=O (5/5)
Computing similarity of molecule num 1 against 1
Computing similarity of molecule num 2 against 1
Computing similarity of molecule num 3 against 1
Computing similarity of molecule num 4 against 1
Computing similarity of molecule num 5 against 1
Computing similarity of molecule num 1 against 2
```
### H. Outlier Check
Checking this will have _AIMSim_ create an Isolation Forest (read more about this in [Sklearn's documentation](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html)) to identify possible outliers in the input database of molecules. The results from this approach are _non-deterministic_ because of the underlying algorithm driving the Isolation Forest, so this feature is intended to be a "sanity check" rather than a quantitative measure of 'outlier-ness'. To truly determine how different a single example molecule is to a set of molecules, use the `Compare Target Molecule` functionality discussed above.
### I. Enable Multiple Workers
This checkbox will enable multiprocessing, speeding up execution time on the data. By default, _AIMSim_ will use __all__ physical cores available on your machine, which may impact performance of other programs.
The user should only enable this option with datasets off a few hundred or more molecules. This is because there is additional processing time associated with creating and destroying multiple processes, so for small data sets it is faster to simply execute the comparisons serially.
## Command Line and Configuration Files
For users who prefer to use _AIMSim_ without a user interface, a command line interface is provided. This requires the user to manually write configuration files, but allows access to more granular control and some additional features which are not included in the GUI. This can be invoked by typing `aimsim config.yaml` into your terminal or command window, where `config.yaml` is a configuration file you have provided or copied from the _AIMSim_ repository.
Below is a 'maximum specification' file to be used with _AIMSim_, showing all possible settings and tasks which _AIMSim_ can ingest. Any overall settings which are left out will be inferred by _AIMSim_, and any tasks which are not included will simply not be executed. Each field used in the file is explained afterward.
### Maximum Specification File
```
is_verbose (bool):
molecule_database (str): # path to excel / csv/ text file
molecule_database_source_type (str): # Type of source file. 'excel', 'csv', 'text'
similarity_measure (str): #Set to determine if auto identification required
fingerprint_type (str): # Set to determine if auto identification required
measure_id_subsample (float): # [0, 1] Subsample used for measure search
sampling_ratio (float): # [0, 1] Subsample used for all tasks
n_workers (int / str): # [int, 'auto'] number of processes, or let AIMSim decide
global_random_seed (int / str): # int or 'random'
tasks:
compare_target_molecule:
target_molecule_smiles (str):
draw_molecule (bool): # If true, strucures of target, most and least similar molecules are displayed
similarity_plot_settings:
plot_color (str): # Set a color recognized by matplotlib
shade (bool): # If true, the similarity density is shaded
plot_title (str):
log_file_path (str):
visualize_dataset:
heatmap_plot_settings:
cmap (str): # matplotlib recognized cmap (color map) used for heatmap.
plot_title (str):
annotate (bool): # If true, heatmap is annotated
similarity_plot_settings:
plot_color (str):
shade (bool): # If true, the similarity density is shaded
embedding_plot_settings:
plot_title (str):
embedding:
method (str): # algorithm used for embedding molecule set in 2 dimensions.
params: # method specific parameters
random_state (int): #used for seeding stochastic algorithms
see_property_variation_w_similarity:
log_file_path (str):
property_plot_settings:
plot_color (str): # Set a color recognized by matplotlib
identify_outliers:
random_state (int):
output (str): # filepath or "terminal" to control where results are shown
plot_outliers (bool):
pair_similarity_plot_settings: # Only meaningful if plot_outliers is True
plot_color (str): # Set a color recognized by matplotlib
cluster:
n_clusters (int):
clustering_method (str):
log_file_path (str):
cluster_file_path (str):
cluster_plot_settings:
cluster_colors (list): # Ensure len(list) >= n_cluster
embedding_plot_settings:
plot_title (str):
embedding:
method (str): # algorithm used for embedding molecule set in 2 dimensions.
params: # method specific parameters
random_state (int): #used for seeding stochastic algorithms
```
#### Overall _AIMSim_ Settings
These settings impact how all tasks run by _AIMSim_ will be executed.
- `is_verbose`: Must be either `True` or `False`. When `True`, _AIMSim_ will emit text updates of during execution to the command line, useful for debugging.
- `molecule_database`: A file path to an Excel workbook, text file containing SMILES strings, or PDB file surrounded by single quotes, i.e. `'/User/my_user/smiles_database.smi'`. Can also point to a directory containing a group of PDB files, but the file path must end with a '/' (or '\' for Windows).
- `molecule_database_source_type`: The type of data to be input to _AIMSim_, being either `text`, `excel`, or `pdb`.
- `similarity_measure`: The similarity measure to be used during all tasks, chosen from the list of supported similarity measures. Automatic similarity measure determination is also supported, and can be performed by specifying `determine`.
- `fingerprint_type`: The fingerprint type or molecular descriptor to be used during all tasks, chosen from the list of supported descriptors. Automatic determination is also supported, and can be performed by specifying `determine`.
- `measure_id_subsample`: A decimal number between 0 and 1 specifying what fraction of the dataset to use for automatic determination of similarity measure and fingerprint. For a dataset of 10,000 molecules, setting this to `0.1` would run only 1000 randomly selected molecules, dramatically reducing runtime. This field is only needed if `determine` is used in either of the prior fields.
- `sampling_ratio`: A decimal number between 0 and 1 specifying what fraction of the dataset to use for tasks. For a dataset of 10,000 molecules, setting this to `0.1` would run only 1000 randomly selected molecules, dramatically reducing runtime.
- `n_workers`: Either an integer or the string 'auto'. With an integer, _AIMSim_ will create that many processes for its operation. This number should be less than or equal to the number of _physical_ CPU cores in your computer. Set this option to 'auto' to let _AIMSim_ configure multiprocessing for you.
- `global_random_seed`: Integer to be passed to all non-deterministic functions in _AIMSim_. By default, this value is 42 to ensure consistent results between subsequent executions of _AIMSim_. This seed will override the random seeds provided to any other _AIMSim_ tasks. Alternatively, specify 'random' to allow _AIMSim_ to randomly generate a seed.
#### Task-Specific Settings
The settings fields below dictate the behavior of _AIMSim_ when performing its various tasks.
##### Compare Target Molecule
Generates a similarity distribution for the dataset compared to an individual molecule.
- `target_molecule_smiles`: SMILES string for the molecule used in comparison to the dataset.
- `draw_molecule`: If this is set to True, then _AIMSim_ draws the structure of the target molecule, and of the molecule most and least similar to it.
- `similarity_plot_settings`: Controls the appearance of the distribution.
- `plot_color`: Can be any color recognized by the _matplotlib_ library.
- `shade`: `True` or `False`, whether or not to shade in the area under the curve.
- `plot_title`: String containing text to be written above the plot.
- `log_file_path`: String specifying a file to write output to for the execution of this task. Useful for debugging.
##### Visualize Dataset
Generates a pairwise comparison matrix for all molecules in the dataset.
- `heatmap_plot_settings`: Control the appearance of the plot.
- `cmap`: _matplotlib_ recognized cmap (color map) used for heatmap.
- `plot_title`: String containing text to be written above the plot.
- `annotate`: `True` or `False`, controls whether or not _AIMSim_ will write annotations over the heatmap.
- `similarity_plot_settings`: Controls the appearance of the distribution.
- `plot_color`: Can be any color recognized by the _matplotlib_ library.
- `shade`: `True` or `False`, whether or not to shade in the area under the curve.
- `embedding_plot_settings`: Constrols the lower dimensional embedding of the dataset.
- `plot_title`: String containing text to be written above the plot.
- `embedding`: Set the algorithmic aspects of the embedding
- `method`: Label specifying the algorithm embedding the molecule set in 2 dimensions.
- `params`: Specific hyperparameters which are passed through to the underlying implementation
- `random_state`: Number used for seeding stochastic algorithms
##### Property Variation Visualization
Creates a plot of how a given property in the input molecule set varies according to the structural fingerprint chosen.
- `log_file_path`: String specifying a file to write output to for the execution of this task. Useful for debugging or retrospection.
- `property_plot_settings`: Control the appearance of the plot.
- `plot_color`: Any color recognized by the _matplotlib_ library.
##### Identify Outliers
Trains an [IsolationForest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html) on the input data to check for potential outliers.
- `random_state`: An integer to pass through to random_state in sklearn. _AIMSim_ sets this to 42 by default.
- `output`: A string which specifies where the output of the outlier search should go. This can be either a filepath or "terminal" to write the output directly to the terminal.
- `plot_outliers`: Set this to `True` to generate a 2D plot of which molecules are potential outliers.
- `pair_similarity_plot_settings`: Only meaningful if plot_outliers is True, allows access to plot settings.
- `plot_color`: Any color recognized by the _matplotlib_ library.
##### Cluster
Use a clustering algorithm to make groups from the database of molecules.
- `n_clusters`: The number of clusters to group the molecules into.
- `clustering_method`: Optional string specifying a clustering method implemented in `sklearn`, one of `kmedoids`, `ward`, or `complete_linkage`. `complete_linkage` will be chosen by default if no alternative is provided.
- `log_file_path`: String specifying a file to write output to for the execution of this task. Useful for debugging.
- `cluster_file_path`: String specifying a file path where _AIMSim_ will output the result of clustering. Useful for comparing multiple clustering approaches or saving the results of large data sets.
- `cluster_plot_settings`: Control the appearance of the clustering plot.
- `cluster_colors`: A list of strings, each of which is a color recognized by _matplotlib_ to use for the clusters. Must specify at least as many colors as there are clusters. Additional colors will be ignored.
- `embedding_plot_settings`: Constrols the lower dimensional embedding of the dataset.
- `plot_title`: String containing text to be written above the plot.
- `embedding`: Set the algorithmic aspects of the embedding
- `method`: Label specifying the algorithm embedding the clustered molecule set in 2 dimensions.
- `params`: Specific hyperparameters which are passed through to the underlying implementation
- `random_state`: Number used for seeding stochastic algorithms
## Writing Scripts with _AIMSim_
Advanced users may wish to use _AIMSim_ to create their own descriptors, use the descriptor's provided in _AIMSim_ for something else entirely, or utilize the various similarity scores. Brief explanations for how to access the core functionalities of _AIMSim_ from a Python script are shown below.
### Making Custom Descriptors
Any arbitrary numpy array can be provided as a molecular descriptor, though correct function with the similarity metrics provided with _AIMSim_ is not guaranteed.
```
from aimsim.ops.descriptor import Descriptor
desc = Descriptor()
```
With the `Descriptor` class instantiated, one can then call the methods to set the value(s) of the descriptor.
```
import numpy as np
custom_desc = np.array([1, 2, 3])
desc.set_manually(custom_desc)
desc.numpy_
```
This same function can be achieved by passing in a numpy array for the keyword argument `value` in the constructor for `Descriptor`, as shown below:
```
desc = Descriptor(custom_desc)
desc.numpy_
```
The above code is useful for individually changing a descriptor for one molecule in a `MoleculeSet` but is obviously not practical for bulk custom descriptors. To assign descriptors for an entire set of molecules at once, instantiate the `MoleculeSet` class and call the `_set_descriptor` method passing in a 2-dimensional numpy array of descriptors.
```
from AIMSim.chemical_datastructures.molecule_set import MoleculeSet
molset = MoleculeSet(
'/path/to/databse/smiles.txt',
'text',
False,
'tanimoto'
)
molset._set_descriptor([[1, 2, 3], [4, 5, 6]])
```
### Generating Descriptors with _AIMSim_
Because _AIMSim_ is able to generate such a wise variety of molecular fingerprints and descriptors from only the SMILES strings, you may want to avoid re-inventing the wheel and use the descriptors that it generates. There are two general approaches to doing this, and the approach used depends on what other code you already have in place:
1. If you have only SMILES strings to turn into fingerprints/descriptors, the `Molecule` class should be used to handle generating the molecule object and generating the descriptors.
2. If you have already created a molecule using `RDKit`, you must provide the existing molecule in your call to the constructor in `Molecule`.
These approaches are covered in this order below.
```
# with a SMILES string
smiles = "CO"
from aimsim.chemical_datastructures.molecule import Molecule
mol = Molecule(mol_smiles=smiles)
mol.set_descriptor(fingerprint_type="atom-pair_fingerprint")
mol.get_descriptor_val()
# with an RDKit molecule
from rdkit import Chem
mol_graph = Chem.MolFromSmiles(smiles)
mol = Molecule(mol_graph=mol_graph)
mol.set_descriptor(fingerprint_type="mordred:nAtom")
mol.get_descriptor_val()
```
### Acessing _AIMSim_ Similarity Metrics
As of January 2022, _AIMSim_ implements 47 unique similarity metrics for use in comparing two numbers and/or two sets of numbers. These metrics were pulled from a variety of sources, including some original implementations, so it may be of interest to use this code in your own work.
All of the similarity metrics can be accessed through the `SimilarityMeasure` class, as shown below.
```
from aimsim.ops.similarity_measures import SimilarityMeasure
from rdkit.Chem import MolFromSmiles
sim_mes = SimilarityMeasure("driver-kroeber")
desc_1 = Descriptor()
desc_1.make_fingerprint(
MolFromSmiles("COC"),
"morgan_fingerprint",
)
desc_2 = Descriptor()
desc_2.make_fingerprint(
MolFromSmiles("CCCC"),
"morgan_fingerprint",
)
out = sim_mes(
desc_1,
desc_2,
)
out
```
A complete list of supported similarity measures and the names by which _AIMSim_ recognizes them is listed in the GUI walkthrough section.
## Using AIMSim Tasks inside custom Python pipelines
In this section we will take a look at using some of the Tasks provided by AIMSim inside custom Python scripts.
### Visualizing a Dataset
First we create the dataset which consists of 100 samples, each containing 3 features. We will first create an Excel file and load that file via _AIMSim_ to visualize it. <b>Note that </b> columns corresponding to sample names or features in the Excel have to be prefixed by <i>'feature_'</i>
```
%%capture
!pip install openpyxl # for using the excel writer
import pandas as pd
from numpy.random import random
n_samples = 100
dataset = {'feature_f1': random(size=n_samples),
'feature_f2': random(size=n_samples),
'feature_f3': random(size=n_samples)}
df = pd.DataFrame(dataset)
dataset_file = 'dataset.xlsx'
df.to_excel(dataset_file)
```
First we load the data into a MoleculeSet object. We use the arbitrary features defined above and L2- similarity to define the similarity in this feature space.
```
from aimsim.chemical_datastructures import MoleculeSet
# load a MoleculeSet from the file
molecule_set = MoleculeSet(molecule_database_src=dataset_file,
molecule_database_src_type='excel',
similarity_measure='l2_similarity',
is_verbose=False)
```
Now we visualize it using the VisualizeDataset Task.
Note that the arguments to the VisualizeDataset constructor are used to edit the plot settings (such as colors and axis labels) as well as the type and parameters of the 2D embedding (here we use PCA to embed the dataset in 2 dimensions). A complete list of the keywords accepted and their default values can be found in the docstring of the constructor in our [documentation page](https://vlachosgroup.github.io/AIMSim/).
```
from aimsim.tasks import VisualizeDataset
# instantiate the task
viz = VisualizeDataset(embedding_plot_settings={"embedding": {"method": "pca"}})
viz(molecule_set)
```
### Clustering
The dataset can also be clustered using the ClusterData Task in _AIMSim_. The following code snippets clusters the dataset using the K-Medoids algorithm. Note that we reuse the MoleculeSet object, therefore we are still using the L2 similarity for clustering. The data is clustered into 5 clusters and the 2D embedding is again generated using PCA. A complete list of the keywords accepted by the ClusterData constructor and their default values can be found in the docstring of the constructor in our [documentation page](https://vlachosgroup.github.io/AIMSim/).
```
from aimsim.tasks import ClusterData
clustering = ClusterData(n_clusters=5, # data is clustered into 5 clusters
clustering_method='kmedoids',
embedding_plot_settings={"embedding": {"method": "pca"}}
)
clustering(molecule_set)
```
| github_jupyter |
# Section 3.3 Single Model Numerical Diagnostics
```
import os
import arviz as az
# Change working directory
if os.path.split(os.getcwd())[-1] != "notebooks":
os.chdir(os.path.join(".."))
NETCDF_DIR = "inference_data"
az.style.use('arviz-white')
```
## What happened to hard numbers?
One criticism of visual plots their interpretation is subjective. When running one model its relatively simple to visually inspect the results, but if testing out various models looking over many traceplots and autocorrelation diagrams becomes statistician time intensive. (We'll talk more about multiple models in Section 5). As far as it is possible to automate model checking, we would like to.
## $\hat{R}$ and Effective Sample Size
Recall our two pertinent questions MCMC practioners should ask when making posterior estimates:
* Did the chains mix well?
* Did we get enough samples?
These questions are paraphrased from the paper published in March 2019: **Rank-normalization, folding, and localization: An improved $\hat{R}$ for assessing convergence of MCMC** by [Vehtari et.al.](https://arxiv.org/abs/1903.08008), and thankfully the paper provides two numbers -- $\hat{R}$, and effective sample size (ESS) -- as tools to help answer these questions.
### Warning: Active Research Zone
Wow! A paper from 2019! Bayesian statistics is an academically active field and numerous versions of $\hat{R}$ and effective sample size calculations have been proposed over the years. the first of which was published in 1992. In this tutorial we will be covering the calucation from the 2019 paper (linked again [here]((https://arxiv.org/abs/1903.08008)))
Just be mindful that when looking at older papers or results the diagnostics will answer the same question, but the exact calculation may differ.
Some prior papers are linked here for reference.
[Gelman and Rubin (1992)](https://projecteuclid.org/euclid.ss/1177011136)
[Brooks and Gelman (1998)](http://www2.stat.duke.edu/~scs/Courses/Stat376/Papers/ConvergeDiagnostics/BrooksGelman.pdf)
[Gelman et al. Bayesian Data Analysis (3 ed, 2014)](http://www.stat.columbia.edu/~gelman/book/)
### $\hat{R}$ (say "R hat")
The first question we'll try and answer is if the chains have mixed well. The summarized formula is
$$ \Large \hat{R} = \sqrt{\frac{\hat{\text{var}}^{+}(\theta \mid y)}{W}}$$
While the details of the calculation can be found in the paper, it's using the *between chain variance* with *in chain variance* to calculate $\hat{R}$. The idea is that if all the chains have converged, the variance should be similar across all chains, and the pooled sample of all chains.
### Effective sample size (also known as ESS, also known as $S_{eff}$)
As the name suggests effective sample size helps answer the question "Did we get enough samples?" The summarized formula is
$$ \large S_{\text{eff}} = \frac{NM}{\hat{\tau}} $$
Where N is computation draws, M is the number of chains and $\hat{\tau}$ is a number derived from the chain autocorrelations. The idea here is that in highly autocorrelated chains, while the computer *is* drawing samples they're not effective because they're not doing much to help estimate the posterior.
Let's walk through an example
## Reliving the horror. Naive Metropolis Hastings with Bad Initialization
In Section 3.1 we performed an Inference Run with nightmareish results. Let's load the data again here and plot the visual diagnostics once again.
```
data_bad_init = az.from_netcdf(os.path.join(NETCDF_DIR, "data_bad_init.nc"))
az.plot_trace(data_bad_init)
```
Looking again at the trace plots we can "see" that the results look bad, but like true statisticians let's use our numerical tools to quantify the results.
```
az.rhat(data_bad_init)
az.effective_sample_size(data_bad_init)
```
According to the math $\hat{R} = 6.95$ and $S_{\text{eff}} = 2.33$, but this begs the question is this good or bad? Subjectively speaking these are bad. Generally speaking
* We want $\hat{R}$ to be to close to 1 as possible
* We want ESS to be as close to the number of simulation draws as possible
$\hat{R}$ is telling us that the variances are not very consistent and $S_{\text{eff}}$ is telling us that the 400 draws (200 draws over 2 chains) we took were as useful as ~2 independent draws from the true distribution (for ESS this small, such an interpretation is necessarily silly).
In Aki's paper the advice there is specific advice for these diagnostics
* Run at least 4 chains
* $\hat{R}$ should be less than 1.01
* ESS should be 400 "before we expect $\hat{R}$ to be useful"
In Notebook 3.4 we'll be using $\hat{R}$ and $S_{\text{eff}}$ to compare each inference run
| github_jupyter |
# Part 2 - Refine Data
The second step for analyzing the data is to perform some additional preparations and enrichments. While the first step of storing the data into the structured zone should be mainly a technical conversion without losing any information, this next step will integrate some data and also preaggregate weather data to simplify working with it.
# 0 Prepare Python Environment
## 0.1 Spark Session
```
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
if not 'spark' in locals():
spark = SparkSession.builder \
.master("local[*]") \
.config("spark.driver.memory","64G") \
.getOrCreate()
spark.version
```
## 0.2 Matplotlib
```
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
```
# 1 Read Taxi Data
Now we can read in the taxi data from the structured zone.
## 1.1 Trip Data
Let us load the NYC Taxi trip data from the Hive table `taxi.trip` and let us display the first 10 records.
```
trip_data = spark.read.table("taxi.trip")
trip_data.limit(10).toPandas()
```
Just to be sure, let us inspect the schema. It should match exactly the specified one.
```
trip_data.printSchema()
```
## 1.2 Fare information
Now we read in the second table `taxi.fare` containing the trips fare information.
```
fare_data = spark.read.table("taxi.fare")
fare_data.limit(10).toPandas()
fare_data.printSchema()
```
## 1.3 Join datasets
We can now join both the trip information and the fare information together in order to get a complete picture. Since the trip records do not contain a technical unique key, we use the following columns as the composite primary key of each trip:
* medallion
* hack_license
* vendor_id
* pickup_datetime
Finally the result is stored into the refined zone into the Hive table `refined.taxi_trip`.
```
# Create Hive database 'refined'
spark.sql("CREATE DATABASE IF NOT EXISTS refined")
# Join trip_data with fare_data using the columns "medallion", "hack_license", "vendor_id", "pickup_datetime"
taxi_trips = trip_data.join(fare_data,["medallion", "hack_license", "vendor_id", "pickup_datetime"], how="left_outer")
# Save taxi_trips into the Hive table "refined.taxi_trip"
taxi_trips.write.format("parquet").saveAsTable("refined.taxi_trip")
```
### Read from Refined Zone
```
taxi_trips = spark.read.table("refined.taxi_trip")
taxi_trips.limit(10).toPandas()
```
Let us have a look at the schema of the refined table
```
taxi_trips.printSchema()
```
Let us count the number of records in the table
```
taxi_trips.count()
```
# 2. Weather Data
The weather data also requires some additional preprocessing, especially when we want to join against weather data. The primary problem of all measurements is, that they might happen at different time intervals and not all measurements contain all metrics. Therefore we preaggregate the weather data to hourly and daily measurements, which can directly be used for joining.
## 2.1 Weather Data
We already have weather data, but only individual measurements. We do not know how many measurements there are per hour and per day, so the raw table is not very useable for joining. Instead we'd like to have an hourly and a daily weather table containing average temperature, wind speed and precipitation. Since we are only interested in the year 2013, we also only load that specific year.
```
weather = spark.read.table("isd.weather").where(f.col("year") == 2013)
weather.limit(10).toPandas()
```
## 2.2 Calculate derived metrics and preaggregate data
In order to simplify joining against weather data, we now preaggregate weather measurements to a single record per weather station and hour or per day.
### Hourly Preaggregation
For the hourly aggregation, we want to get the following columns
* `date` - day of the measurements. The day can be extracted from the timestamp column `ts` by using the Spark function `to_date` (available in the imported module `f`)
* `hour` - hour of the measurements. The hour can be extracted using the Spark function `hour`
* Grouping should be performed on the weather station IDs `usaf` and `wban` together with both extracted time columns `date` and `hour`
* For the following metrics, we are interested in the grouped averages: `wind_speed`, `air_temperature` and `precipitation_depth`
When performing the aggregation, you should ignore invalid measurements. This can be done by using the PySpark function `f.when` to conditionally only aggregate values where the correspondign quality flag (`wind_speed_qual` and `air_temperature_qual`) is not `9`. Note that it is enough to pick up only the valid values and let the `when` function return `NULL` for invalid values, since `NULL` is ignored in aggregations.
For averaging the precipitation, you should also only pick values where `precipitation_hours` equals `1`.
The final DataFrame should have the following columns (you might need to specify explicit aliases):
* `usaf`
* `wban`
* `date`
* `hour` (0-23)
* `wind_speed`
* `temperature`
* `precipitation`
```
hourly_weather = weather \
.withColumn("date", f.to_date(weather["ts"])) \
.withColumn("hour", f.hour(weather["ts"])) \
.groupBy("usaf", "wban", "date", "hour").agg(
f.avg(f.when(weather["wind_speed_qual"] != 9, weather["wind_speed"])).alias("wind_speed"),
f.avg(f.when(weather["air_temperature_qual"] != 9, weather["air_temperature"])).alias("temperature"),
f.avg(f.when(weather["precipitation_hours"] == 1, weather["precipitation_depth"])).alias("precipitation")
)
hourly_weather.limit(10).toPandas()
```
### Daily Preaggregation
In addition to the hourly metrics, we also preaggregate the data to daily records. This can easily be performed based on the hourly aggregations with a grouping on `usaf`, `wban` and `date`. Again we want to have the metrics `temperature`, `wind_speed` and `precipitation`. For the first two metrics, we are interested in the average (as this seems to make sense), while for precipitation we are interested in the sum (total amount of rainfall per day).
```
daily_weather = hourly_weather.groupBy("usaf", "wban", "date")\
.agg(
f.avg("temperature").alias("temperature"),
f.avg("wind_speed").alias("wind_speed"),
f.sum("precipitation").alias("precipitation"),
)
daily_weather.limit(10).toPandas()
```
### Save Preaggregated Weather
Finally we save both tables (hourly and daily weather), so we can directly reuse the data in the next steps.
```
hourly_weather.write.format("parquet").mode("overwrite").saveAsTable("refined.weather_hourly")
daily_weather.write.format("parquet").mode("overwrite").saveAsTable("refined.weather_daily")
```
## 2.3 Reload Data and draw Pictures
Now let us reload the data (just to make sure everything worked out nicely) and let's draw some pictures. We use a single station (which, by pure incident, is a weather station in NYC)
```
daily_weather = spark.read.table("refined.weather_daily")
nyc_station_usaf = "725053"
nyc_station_wban = "94728"
pdf = daily_weather \
.filter((daily_weather["usaf"] == nyc_station_usaf) & (daily_weather["wban"] == nyc_station_wban)) \
.orderBy("date") \
.toPandas()
```
### Wind Speed
The first picture will simply contain the wind speed for every day in 2013.
```
# Make a Plot
plt.figure(figsize=(16, 6), dpi=80, facecolor='w', edgecolor='k')
plt.plot(pdf["date"],pdf["wind_speed"])
```
### Air Temperature
The next picture contains the average air temperature for every day in 2013.
```
# Make a Plot
plt.figure(figsize=(16, 6), dpi=80, facecolor='w', edgecolor='k')
plt.plot(pdf["date"],pdf["temperature"])
```
### Precipitation
The last picture contains the precipitation for every day in 2013.
```
# Make a Plot
plt.figure(figsize=(16, 6), dpi=80, facecolor='w', edgecolor='k')
plt.plot(pdf["date"],pdf["precipitation"])
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from __future__ import division
import pandas as pd
V = np.linspace(0,1000,1000)
plt.plot(V, 6.43 - 5e-14*(np.exp(V/2.6) - 1)) #in V and A
plt.ylim(0,10)
plt.xlim(0,100)
V = np.linspace(0,100,1000)
I_o = 5e-14 #A
I_L = 6.43 #A
R_s = 0 #ohm
R_sh = 1e6 #ohm
plt.plot(V, I_L - I_o*(np.exp(V/2.6) - 1)) #in V and A
plt.ylim(0,10)
plt.xlim(0,100)
I = 100
plt.plot(I - I_L - I_o*(np.exp((V+I*R_s)/2.6) - 1)) #this
from sympy import solve, Symbol, exp
x = Symbol('x')
y = Symbol('y')
solve([x + 5*y - 2, -3*x + 6*y - 15], [x, y])
V = Symbol('V')
I = Symbol('I')
I_o = 5e-14 #A
I_L = 6.43 #A
R_s = 10 #mohm
R_sh = 1e5 #mohm
n = 1
#solve(I - I_L - I_o*(exp( (V + I * R_s) /(26*n)) - 1) , I)
from scipy.optimize import fsolve
import math
def equations(p):
x, y = p
return (x+y**2-4, math.exp(x) + x*y - 3)
x, y = fsolve(equations, (1, 1))
print equations((x, y))
import scipy.optimize as optimize
from math import sqrt
def f(c):
return sqrt(c[0]**2 + c[1]**2 + (c[2]-2)**2)
result = optimize.minimize(f, [1,1,1])
print result.values()[6]
import scipy.optimize as optimize
from math import sqrt
# I, c[0]
I_L = 6.43 #A
# I_o = 5e-14 #A, c[2]
# n = 1, c[2]
V = 1 #mV
# R_s = 1 #mohm, c[3]
# R_sh = 1e5 #mohm c[4]
def f(c):
I - I_L - I_o*(exp( (V + I * R_s) /(26*n)) - 1)
return I
result = optimize.minimize(f, [1,1,1])
print result.values()[6]
irrad_df = pd.read_csv('data/ASTMG173.csv')
irrad_df.head()
irrad_df['globaltilt'].plot()
eqe_df = pd.read_csv('data/eqe_sunpower_25.csv')
eqe_df.head()
eqe_df['percent'].values
from scipy import interpolate
x = eqe_df['wavelength'].values
y = eqe_df['percent'].values
f = interpolate.interp1d(x, y)
wav_new = np.arange(300,1180, 0.5)
eqe_new = f(xnew) # use interpolation function returned by `interp1d`
plt.plot(x, y, 'o', wav_new, eqe_new, '-')
plt.show()
irrad_df[irrad_df['wavelength']==300]
irrad_df[irrad_df['wavelength']==1180]
from scipy import interpolate
x = irrad_df['wavelength'][40:1021].values
irrad_global = irrad_df['globaltilt'][40:1021].values #AM1.5 spectrum
f = interpolate.interp1d(x, irrad_global)
wav_new = np.arange(300,1180, 0.5) #300 nm to 1180 nm with 0.5 nm spacing
irrad_new = f(xnew) #recreate AM1.5 with 0.5 nm spacing
plt.plot(x, irrad_global, 'o', wav_new, irrad_new, '-')
plt.show()
plt.plot(wav_new,eqe_new*irrad_new*wav_new)
(1/1240)*sum(eqe_new*irrad_new*wav_new)*.5/1e3 #mA/cm^2
iv_df = pd.read_csv('data/i_v_sunpower_25.csv')
plt.plot(iv_df.voltage,iv_df.current, 'r--')
I_o = 3.6e-10 #mA/cm^2
I_L = 41.74 #mA/cm^2
plt.plot(iv_df.voltage, I_L - I_o*(np.exp(iv_df.voltage/.0283) - 1)) #in V and A
plt.ylim(0,50)
```
| github_jupyter |
# Amazon SageMaker Processing と AWS Step Functions Data Science SDK で機械学習ワークフローを構築する
Amazon SageMaker Processing を使うと、データの前/後処理やモデル評価のワークロードを Amazon SageMaker platform 上で簡単に実行することができます。Processingジョブは Amazon Simple Storage Service (Amazon S3) から入力データをダウンロードし、処理結果を Amazon S3 にアップロードします。
Step Functions SDK は AWS Step Function と Amazon SageMaker を使って、データサイエンティストが機械学習ワークフローを簡単に作成して実行するためのものです。詳しい情報は以下のドキュメントをご参照ください。
* [AWS Step Functions](https://aws.amazon.com/step-functions/)
* [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html)
* [AWS Step Functions Data Science SDK](https://aws-step-functions-data-science-sdk.readthedocs.io)
AWS Step Functions Data Science SDK の SageMaker Processing Step [ProcessingStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/stable/sagemaker.html#stepfunctions.steps.sagemaker.ProcessingStep) によって、AWS Step Functions ワークフローで実装された Sageaker Processing を機械学習エンジニアが直接システムに統合することができます。
このノートブックは、SageMaker Processing Job を使ってデータの前処理、モデルの学習、モデルの精度評価の機械学習ワークフローを AWS Step Functions Data Science SDK を使って作成する方法をご紹介します。大まかな流れは以下の通りです。
1. AWS Step Functions Data Science SDK の `ProcessingStep` を使ってデータの前処理、特徴量エンジニアリング、学習用とテスト用への分割を行う scikit-learn スクリプトを実行する SageMaker Processing Job を実行
1. AWS Step Functions Data Science SDK の `TrainingStep` を使って前処理された学習データを使ったモデルの学習を実行
1. AWS Step Functions Data Science SDK の `ProcessingStep` を使って前処理したテスト用データを使った学習済モデルの評価を実行
このノートブックで使用するデータは [Census-Income KDD Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29) です。このデータセットから特徴量を選択し、データクレンジングを実施し、二値分類モデルの利用できる形にデータを変換し、最後にデータを学習用とテスト用に分割します。このノートブックではロジスティック回帰モデルを使って、国勢調査の回答者の収入が 5万ドル以上か 5万ドル未満かを予測します。このデータセットはクラスごとの不均衡が大きく、ほとんどのデータに 5万ドル以下というラベルが付加されています。
## Setup
このノートブックを実行するのに必要なライブラリをインストールします。
```
# Import the latest sagemaker, stepfunctions and boto3 SDKs
import sys
!{sys.executable} -m pip install --upgrade pip
!{sys.executable} -m pip install -qU awscli boto3 "sagemaker>=2.0.0"
!{sys.executable} -m pip install -qU "stepfunctions>=2.0.0"
!{sys.executable} -m pip show sagemaker stepfunctions
```
### 必要なモジュールのインポート
```
import io
import logging
import os
import random
import time
import uuid
import boto3
import stepfunctions
from stepfunctions import steps
from stepfunctions.inputs import ExecutionInput
from stepfunctions.steps import (
Chain,
ChoiceRule,
ModelStep,
ProcessingStep,
TrainingStep,
TransformStep,
)
from stepfunctions.template import TrainingPipeline
from stepfunctions.template.utils import replace_parameters_with_jsonpath
from stepfunctions.workflow import Workflow
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import image_uris
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.s3 import S3Uploader
from sagemaker.sklearn.processing import SKLearnProcessor
# SageMaker Session
sagemaker_session = sagemaker.Session()
region = sagemaker_session.boto_region_name
# SageMaker Execution Role
# You can use sagemaker.get_execution_role() if running inside sagemaker's notebook instance
role = get_execution_role()
```
次に、ノートブックから Step Functions を実行するための IAM ロール設定を行います。
## ノートブックインスタンスの IAM ロールに権限を追加
以下の手順を実行して、ノートブックインスタンスに紐づけられた IAM ロールに、AWS Step Functions のワークフローを作成して実行するための権限を追加してください。
1. [Amazon SageMaker console](https://console.aws.amazon.com/sagemaker/) を開く
2. **ノートブックインスタンス** を開いて現在使用しているノートブックインスタンスを選択する
3. **アクセス許可と暗号化** の部分に表示されている IAM ロールへのリンクをクリックする
4. IAM ロールの ARN は後で使用するのでメモ帳などにコピーしておく
5. **ポリシーをアタッチします** をクリックして `AWSStepFunctionsFullAccess` を検索する
6. `AWSStepFunctionsFullAccess` の横のチェックボックスをオンにして **ポリシーのアタッチ** をクリックする
もしこのノートブックを SageMaker のノートブックインスタンス以外で実行している場合、その環境で AWS CLI 設定を行ってください。詳細は [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) をご参照ください。
次に Step Functions で使用する実行ロールを作成します。
## Step Functions の実行ロールの作成
作成した Step Functions ワークフローは、AWS の他のサービスと連携するための IAM ロールを必要とします。
1. [IAM console](https://console.aws.amazon.com/iam/) にアクセス
2. 左側のメニューの **ロール** を選択し **ロールの作成** をクリック
3. **ユースケースの選択** で **Step Functions** をクリック
4. **次のステップ:アクセス権限** **次のステップ:タグ** **次のステップ:確認**をクリック
5. **ロール名** に `AmazonSageMaker-StepFunctionsWorkflowExecutionRole` と入力して **ロールの作成** をクリック
Next, attach a AWS Managed IAM policy to the role you created as per below steps.
次に、作成したロールに AWS マネージド IAM ポリシーをアタッチします。
1. [IAM console](https://console.aws.amazon.com/iam/) にアクセス
2. 左側のメニューの **ロール** を選択
3. 先ほど作成した `AmazonSageMaker-StepFunctionsWorkflowExecutionRole`を検索
4. **ポリシーをアタッチします** をクリックして `CloudWatchEventsFullAccess` を検索
5. `CloudWatchEventsFullAccess` の横のチェックボックスをオンにして **ポリシーのアタッチ** をクリック
次に、別の新しいポリシーをロールにアタッチします。ベストプラクティスとして、以下のステップで特定のリソースのみのアクセス権限とこのサンプルを実行するのに必要なアクションのみを有効にします。
1. 左側のメニューの **ロール** を選択
1. 先ほど作成した `AmazonSageMaker-StepFunctionsWorkflowExecutionRole`を検索
1. **ポリシーをアタッチします** をクリックして **ポリシーの作成** をクリック
1. **JSON** タブをクリックして以下の内容をペースト<br>
NOTEBOOK_ROLE_ARN の部分をノートブックインスタンスで使用している IAM ロールの ARN に置き換えてください。
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"events:PutTargets",
"events:DescribeRule",
"events:PutRule"
],
"Resource": [
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTransformJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTuningJobsRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForECSTaskRule",
"arn:aws:events:*:*:rule/StepFunctionsGetEventsForBatchJobsRule"
]
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": "NOTEBOOK_ROLE_ARN",
"Condition": {
"StringEquals": {
"iam:PassedToService": "sagemaker.amazonaws.com"
}
}
},
{
"Sid": "VisualEditor2",
"Effect": "Allow",
"Action": [
"batch:DescribeJobs",
"batch:SubmitJob",
"batch:TerminateJob",
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"ecs:DescribeTasks",
"ecs:RunTask",
"ecs:StopTask",
"glue:BatchStopJobRun",
"glue:GetJobRun",
"glue:GetJobRuns",
"glue:StartJobRun",
"lambda:InvokeFunction",
"sagemaker:CreateEndpoint",
"sagemaker:CreateEndpointConfig",
"sagemaker:CreateHyperParameterTuningJob",
"sagemaker:CreateModel",
"sagemaker:CreateProcessingJob",
"sagemaker:CreateTrainingJob",
"sagemaker:CreateTransformJob",
"sagemaker:DeleteEndpoint",
"sagemaker:DeleteEndpointConfig",
"sagemaker:DescribeHyperParameterTuningJob",
"sagemaker:DescribeProcessingJob",
"sagemaker:DescribeTrainingJob",
"sagemaker:DescribeTransformJob",
"sagemaker:ListProcessingJobs",
"sagemaker:ListTags",
"sagemaker:StopHyperParameterTuningJob",
"sagemaker:StopProcessingJob",
"sagemaker:StopTrainingJob",
"sagemaker:StopTransformJob",
"sagemaker:UpdateEndpoint",
"sns:Publish",
"sqs:SendMessage"
],
"Resource": "*"
}
]
}
```
5. **次のステップ:タグ** **次のステップ:確認**をクリック
6. **名前** に `AmazonSageMaker-StepFunctionsWorkflowExecutionPolicy` と入力して **ポリシーの作成** をクリック
7. 左側のメニューで **ロール** を選択して `AmazonSageMaker-StepFunctionsWorkflowExecutionRole` を検索
8. **ポリシーをアタッチします** をクリック
9. 前の手順で作成した `AmazonSageMaker-StepFunctionsWorkflowExecutionPolicy` ポリシーを検索してチェックボックスをオンにして **ポリシーのアタッチ** をクリック
11. AmazonSageMaker-StepFunctionsWorkflowExecutionRole の *Role ARN** をコピーして以下のセルにペースト
```
# paste the AmazonSageMaker-StepFunctionsWorkflowExecutionRole ARN from above
workflow_execution_role = "arn:aws:iam::420964472730:role/StepFunctionsWorkflowExecutionRole"
```
### Step Functions ワークフロー実行時の入力スキーマ作成
Step Functions ワークフローを実行する際に、パラメタなどを引数として渡すことができます。ここではそれらの引数のスキーマを作成します。
```
# Generate unique names for Pre-Processing Job, Training Job, and Model Evaluation Job for the Step Functions Workflow
training_job_name = "scikit-learn-training-{}".format(
uuid.uuid1().hex
) # Each Training Job requires a unique name
preprocessing_job_name = "scikit-learn-sm-preprocessing-{}".format(
uuid.uuid1().hex
) # Each Preprocessing job requires a unique name,
evaluation_job_name = "scikit-learn-sm-evaluation-{}".format(
uuid.uuid1().hex
) # Each Evaluation Job requires a unique name
# SageMaker expects unique names for each job, model and endpoint.
# If these names are not unique the execution will fail. Pass these
# dynamically for each execution using placeholders.
execution_input = ExecutionInput(
schema={
"PreprocessingJobName": str,
"TrainingJobName": str,
"EvaluationProcessingJobName": str,
}
)
```
## データの前処理と特徴量エンジニアリング
データクレンジング 、前処理、特徴量エンジニアリングのスクリプトの前に、データセットの初めの 20行をのぞいてみましょう。ターゲット変数は `income` 列です。選択する特徴量は `age`, `education`, `major industry code`, `class of worker`, `num persons worked for employer`, `capital gains`, `capital losses`, `dividends from stocks` です。
```
import pandas as pd
input_data = "s3://sagemaker-sample-data-{}/processing/census/census-income.csv".format(region)
df = pd.read_csv(input_data, nrows=10)
df.head(n=10)
```
scikit-learn の前処理スクリプトを実行するために `SKLearnProcessor`を作成します。これは、SageMaker が用意している scikit-learn のコンテナイメージを使って Processing ジョブを実行するためのものです。
```
sklearn_processor = SKLearnProcessor(
framework_version="0.20.0",
role=role,
instance_type="ml.m5.xlarge",
instance_count=1,
max_runtime_in_seconds=1200,
)
```
以下のセルを実行すると `preprocessing.py` が作成されます。これは前処理のためのスクリプトです。以下のセルを書き換えて実行すれば、`preprocessing.py` が上書き保存されます。このスクリプトでは、以下の処理が実行されます。
n the next cell. In this script, you
* 重複データやコンフリクトしているデータの削除
* ターゲット変数 `income` 列をカテゴリ変数から 2つのラベルを持つ列に変換
* `age` と `num persons worked for employer` をビニングして数値からカテゴリ変数に変換
* 連続値である`capital gains`, `capital losses`, `dividends from stocks` を学習しやすいようスケーリング
* `education`, `major industry code`, `class of worker`を学習しやすいようエンコード
* データを学習用とテスト用に分割し特徴量とラベルの値をそれぞれ保存
学習スクリプトでは、前処理済みの学習用データとラベル情報を使用してモデルを学習します。また、モデル評価スクリプトでは学習済みモデルと前処理済みのテスト用データトラベル情報を使用してモデルを評価します。
```
%%writefile preprocessing.py
import argparse
import os
import warnings
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelBinarizer, KBinsDiscretizer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.compose import make_column_transformer
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action="ignore", category=DataConversionWarning)
columns = [
"age",
"education",
"major industry code",
"class of worker",
"num persons worked for employer",
"capital gains",
"capital losses",
"dividends from stocks",
"income",
]
class_labels = [" - 50000.", " 50000+."]
def print_shape(df):
negative_examples, positive_examples = np.bincount(df["income"])
print(
"Data shape: {}, {} positive examples, {} negative examples".format(
df.shape, positive_examples, negative_examples
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--train-test-split-ratio", type=float, default=0.3)
args, _ = parser.parse_known_args()
print("Received arguments {}".format(args))
input_data_path = os.path.join("/opt/ml/processing/input", "census-income.csv")
print("Reading input data from {}".format(input_data_path))
df = pd.read_csv(input_data_path)
df = pd.DataFrame(data=df, columns=columns)
df.dropna(inplace=True)
df.drop_duplicates(inplace=True)
df.replace(class_labels, [0, 1], inplace=True)
negative_examples, positive_examples = np.bincount(df["income"])
print(
"Data after cleaning: {}, {} positive examples, {} negative examples".format(
df.shape, positive_examples, negative_examples
)
)
split_ratio = args.train_test_split_ratio
print("Splitting data into train and test sets with ratio {}".format(split_ratio))
X_train, X_test, y_train, y_test = train_test_split(
df.drop("income", axis=1), df["income"], test_size=split_ratio, random_state=0
)
preprocess = make_column_transformer(
(
["age", "num persons worked for employer"],
KBinsDiscretizer(encode="onehot-dense", n_bins=10),
),
(
["capital gains", "capital losses", "dividends from stocks"],
StandardScaler(),
),
(
["education", "major industry code", "class of worker"],
OneHotEncoder(sparse=False),
),
)
print("Running preprocessing and feature engineering transformations")
train_features = preprocess.fit_transform(X_train)
test_features = preprocess.transform(X_test)
print("Train data shape after preprocessing: {}".format(train_features.shape))
print("Test data shape after preprocessing: {}".format(test_features.shape))
train_features_output_path = os.path.join("/opt/ml/processing/train", "train_features.csv")
train_labels_output_path = os.path.join("/opt/ml/processing/train", "train_labels.csv")
test_features_output_path = os.path.join("/opt/ml/processing/test", "test_features.csv")
test_labels_output_path = os.path.join("/opt/ml/processing/test", "test_labels.csv")
print("Saving training features to {}".format(train_features_output_path))
pd.DataFrame(train_features).to_csv(train_features_output_path, header=False, index=False)
print("Saving test features to {}".format(test_features_output_path))
pd.DataFrame(test_features).to_csv(test_features_output_path, header=False, index=False)
print("Saving training labels to {}".format(train_labels_output_path))
y_train.to_csv(train_labels_output_path, header=False, index=False)
print("Saving test labels to {}".format(test_labels_output_path))
y_test.to_csv(test_labels_output_path, header=False, index=False)
```
前処理用スクリプトを S3 にアップロードします。
```
PREPROCESSING_SCRIPT_LOCATION = "preprocessing.py"
input_code = sagemaker_session.upload_data(
PREPROCESSING_SCRIPT_LOCATION,
bucket=sagemaker_session.default_bucket(),
key_prefix="data/sklearn_processing/code",
)
```
Processing ジョブの出力を保存する S3 パスを作成します。
```
s3_bucket_base_uri = "{}{}".format("s3://", sagemaker_session.default_bucket())
output_data = "{}/{}".format(s3_bucket_base_uri, "data/sklearn_processing/output")
preprocessed_training_data = "{}/{}".format(output_data, "train_data")
```
### `ProcessingStep` の作成
それでは、SageMaker Processing ジョブを起動するための [ProcessingStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/stable/sagemaker.html#stepfunctions.steps.sagemaker.ProcessingStep) を作成しましょう。
このステップは、前の手順で定義した SKLearnProcessor に入力と出力の情報を追加して使用します。
#### [ProcessingInputs](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.processing.ProcessingInput) と [ProcessingOutputs](https://sagemaker.readthedocs.io/en/stable/api/training/processing.html#sagemaker.processing.ProcessingOutput) オブジェクトを作成して SageMaker Processing ジョブに入力と出力の情報を追加
```
inputs = [
ProcessingInput(
source=input_data, destination="/opt/ml/processing/input", input_name="input-1"
),
ProcessingInput(
source=input_code,
destination="/opt/ml/processing/input/code",
input_name="code",
),
]
outputs = [
ProcessingOutput(
source="/opt/ml/processing/train",
destination="{}/{}".format(output_data, "train_data"),
output_name="train_data",
),
ProcessingOutput(
source="/opt/ml/processing/test",
destination="{}/{}".format(output_data, "test_data"),
output_name="test_data",
),
]
```
#### `ProcessingStep` の作成
```
# preprocessing_job_name = generate_job_name()
processing_step = ProcessingStep(
"SageMaker pre-processing step",
processor=sklearn_processor,
job_name=execution_input["PreprocessingJobName"],
inputs=inputs,
outputs=outputs,
container_arguments=["--train-test-split-ratio", "0.2"],
container_entrypoint=["python3", "/opt/ml/processing/input/code/preprocessing.py"],
)
```
## 前処理済みデータを使ったモデルの学習
学習スクリプト `train.py` を使って学習ジョブを実行するための `SKLearn` インスタンスを作成します。これはあとで `TrainingStep` を作成する際に使用します。
```
from sagemaker.sklearn.estimator import SKLearn
sklearn = SKLearn(
entry_point="train.py",
train_instance_type="ml.m5.xlarge",
role=role,
framework_version="0.20.0",
py_version="py3",
)
```
学習スクリプト `train.py` は、ロジスティック回帰モデルを学習し、学習済みモデルを `/opt/ml/model` に保存します。Amazon SageMaker は、学習ジョブの最後にそこに保存されているモデルを `model.tar.gz` に圧縮して S3 にアップロードします。
```
%%writefile train.py
import os
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
if __name__ == "__main__":
training_data_directory = "/opt/ml/input/data/train"
train_features_data = os.path.join(training_data_directory, "train_features.csv")
train_labels_data = os.path.join(training_data_directory, "train_labels.csv")
print("Reading input data")
X_train = pd.read_csv(train_features_data, header=None)
y_train = pd.read_csv(train_labels_data, header=None)
model = LogisticRegression(class_weight="balanced", solver="lbfgs")
print("Training LR model")
model.fit(X_train, y_train)
model_output_directory = os.path.join("/opt/ml/model", "model.joblib")
print("Saving model to {}".format(model_output_directory))
joblib.dump(model, model_output_directory)
```
### `TrainingStep` の作成
```
training_step = steps.TrainingStep(
"SageMaker Training Step",
estimator=sklearn,
data={"train": sagemaker.TrainingInput(preprocessed_training_data, content_type="text/csv")},
job_name=execution_input["TrainingJobName"],
wait_for_completion=True,
)
```
## モデルの評価
`evaluation.py` はモデル評価用のスクリプトです。このスクリプトは scikit-learn を用いるため、以前の手順で使用した`SKLearnProcessor` を使用します。このスクリプトは学習済みモデルとテスト用データセットを入力として受け取り、各分類クラスの分類評価メトリクス、precision、リコール、F1スコア、accuracy と ROC AUC が記載された JSON ファイルを出力します。
```
%%writefile evaluation.py
import json
import os
import tarfile
import pandas as pd
from sklearn.externals import joblib
from sklearn.metrics import classification_report, roc_auc_score, accuracy_score
if __name__ == "__main__":
model_path = os.path.join("/opt/ml/processing/model", "model.tar.gz")
print("Extracting model from path: {}".format(model_path))
with tarfile.open(model_path) as tar:
tar.extractall(path=".")
print("Loading model")
model = joblib.load("model.joblib")
print("Loading test input data")
test_features_data = os.path.join("/opt/ml/processing/test", "test_features.csv")
test_labels_data = os.path.join("/opt/ml/processing/test", "test_labels.csv")
X_test = pd.read_csv(test_features_data, header=None)
y_test = pd.read_csv(test_labels_data, header=None)
predictions = model.predict(X_test)
print("Creating classification evaluation report")
report_dict = classification_report(y_test, predictions, output_dict=True)
report_dict["accuracy"] = accuracy_score(y_test, predictions)
report_dict["roc_auc"] = roc_auc_score(y_test, predictions)
print("Classification report:\n{}".format(report_dict))
evaluation_output_path = os.path.join("/opt/ml/processing/evaluation", "evaluation.json")
print("Saving classification report to {}".format(evaluation_output_path))
with open(evaluation_output_path, "w") as f:
f.write(json.dumps(report_dict))
MODELEVALUATION_SCRIPT_LOCATION = "evaluation.py"
input_evaluation_code = sagemaker_session.upload_data(
MODELEVALUATION_SCRIPT_LOCATION,
bucket=sagemaker_session.default_bucket(),
key_prefix="data/sklearn_processing/code",
)
```
モデル評価用の ProcessingStep の入力と出力オブジェクトを作成します。
```
preprocessed_testing_data = "{}/{}".format(output_data, "test_data")
model_data_s3_uri = "{}/{}/{}".format(s3_bucket_base_uri, training_job_name, "output/model.tar.gz")
output_model_evaluation_s3_uri = "{}/{}/{}".format(
s3_bucket_base_uri, training_job_name, "evaluation"
)
inputs_evaluation = [
ProcessingInput(
source=preprocessed_testing_data,
destination="/opt/ml/processing/test",
input_name="input-1",
),
ProcessingInput(
source=model_data_s3_uri,
destination="/opt/ml/processing/model",
input_name="input-2",
),
ProcessingInput(
source=input_evaluation_code,
destination="/opt/ml/processing/input/code",
input_name="code",
),
]
outputs_evaluation = [
ProcessingOutput(
source="/opt/ml/processing/evaluation",
destination=output_model_evaluation_s3_uri,
output_name="evaluation",
),
]
model_evaluation_processor = SKLearnProcessor(
framework_version="0.20.0",
role=role,
instance_type="ml.m5.xlarge",
instance_count=1,
max_runtime_in_seconds=1200,
)
processing_evaluation_step = ProcessingStep(
"SageMaker Processing Model Evaluation step",
processor=model_evaluation_processor,
job_name=execution_input["EvaluationProcessingJobName"],
inputs=inputs_evaluation,
outputs=outputs_evaluation,
container_entrypoint=["python3", "/opt/ml/processing/input/code/evaluation.py"],
)
```
いずれかのステップが失敗したときにワークフローが失敗だとわかるように `Fail` 状態を作成します。
```
failed_state_sagemaker_processing_failure = stepfunctions.steps.states.Fail(
"ML Workflow failed", cause="SageMakerProcessingJobFailed"
)
```
#### ワークフローの中のエラーハンドリングを追加
エラーハンドリングのために [Catch Block](https://aws-step-functions-data-science-sdk.readthedocs.io/en/stable/states.html#stepfunctions.steps.states.Catch) を使用します。もし Processing ジョブステップか学習ステップが失敗したら、`Fail` 状態に遷移します。
```
catch_state_processing = stepfunctions.steps.states.Catch(
error_equals=["States.TaskFailed"],
next_step=failed_state_sagemaker_processing_failure,
)
processing_step.add_catch(catch_state_processing)
processing_evaluation_step.add_catch(catch_state_processing)
training_step.add_catch(catch_state_processing)
```
## `Workflow` の作成と実行
```
workflow_graph = Chain([processing_step, training_step, processing_evaluation_step])
branching_workflow = Workflow(
name="SageMakerProcessingWorkflow",
definition=workflow_graph,
role=workflow_execution_role,
)
branching_workflow.create()
# branching_workflow.update(workflow_graph)
# Execute workflow
execution = branching_workflow.execute(
inputs={
"PreprocessingJobName": preprocessing_job_name, # Each pre processing job (SageMaker processing job) requires a unique name,
"TrainingJobName": training_job_name, # Each Sagemaker Training job requires a unique name,
"EvaluationProcessingJobName": evaluation_job_name, # Each SageMaker processing job requires a unique name,
}
)
execution_output = execution.get_output(wait=True)
execution.render_progress()
```
### ワークフローの出力を確認
Amazon S3 から `evaluation.json` を取得して確認します。ここにはモデルの評価レポートが書かれています。なお、以下のセルは Step Functions でワークフローの実行が完了してから(`evaluation.json` が出力されてから)実行してください。
```
workflow_execution_output_json = execution.get_output(wait=True)
from sagemaker.s3 import S3Downloader
import json
evaluation_output_config = workflow_execution_output_json["ProcessingOutputConfig"]
for output in evaluation_output_config["Outputs"]:
if output["OutputName"] == "evaluation":
evaluation_s3_uri = "{}/{}".format(output["S3Output"]["S3Uri"], "evaluation.json")
break
evaluation_output = S3Downloader.read_file(evaluation_s3_uri)
evaluation_output_dict = json.loads(evaluation_output)
print(json.dumps(evaluation_output_dict, sort_keys=True, indent=4))
```
## リソースの削除
このノートブックの実行が終わったら、不要なリソースを削除することを忘れないでください。以下のコードのコメントアウトを外してから実行すると、このノートブックで作成した Step Functions のワークフローを削除することができます。ノートブックインスタンス、各種データを保存した S3 バケットも不要であれば削除してください。
```
# branching_workflow.delete()
```
| github_jupyter |
This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production.
In case you are facing issues, please read the following two documents first:
https://github.com/IBM/skillsnetwork/wiki/Environment-Setup
https://github.com/IBM/skillsnetwork/wiki/FAQ
Then, please feel free to ask:
https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all
Please make sure to follow the guidelines before asking a question:
https://github.com/IBM/skillsnetwork/wiki/FAQ#im-feeling-lost-and-confused-please-help-me
If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells.
```
from IPython.display import Markdown, display
def printmd(string):
display(Markdown('# <span style="color:red">'+string+'</span>'))
if ('sc' in locals() or 'sc' in globals()):
printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>')
!pip install pyspark==2.4.5
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
except ImportError as e:
printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>')
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
```
In case you want to learn how ETL is done, please run the following notebook first and update the file name below accordingly
https://github.com/IBM/coursera/blob/master/coursera_ml/a2_w1_s3_ETL.ipynb
```
# delete files from previous runs
!rm -f hmp.parquet*
# download the file containing the data in PARQUET format
!wget https://github.com/IBM/coursera/raw/master/hmp.parquet
# create a dataframe out of it
df = spark.read.parquet('hmp.parquet')
# register a corresponding query table
df.createOrReplaceTempView('df')
df_energy = spark.sql("""
select sqrt(sum(x*x)+sum(y*y)+sum(z*z)) as label, class from df group by class
""")
df_energy.createOrReplaceTempView('df_energy')
df_join = spark.sql('select * from df inner join df_energy on df.class=df_energy.class')
splits = df_join.randomSplit([0.8, 0.2])
df_train = splits[0]
df_test = splits[1]
df_train.count()
df_test.count()
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import MinMaxScaler
vectorAssembler = VectorAssembler(inputCols=["x","y","z"],
outputCol="features")
normalizer = MinMaxScaler(inputCol="features", outputCol="features_norm")
from pyspark.ml.regression import LinearRegression
lr = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[vectorAssembler, normalizer,lr])
model = pipeline.fit(df_train)
model.stages[2].summary.r2
model = pipeline.fit(df_test)
model.stages[2].summary.r2
```
| github_jupyter |
```
import tensorflow
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
rides_weather = pd.read_pickle("rides_weather.pkl")
rides_weather
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
# from tf.keras.models import Sequential # This does not work!
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Input, Dense, GRU, Embedding
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
print("tensorflow version", tf.__version__)
#print("keras version", tf.keras.__version__)
print("pandas version", pd.__version__)
# potentially add day and hour later
# df['Various', 'Day'] = df.index.dayofyear
# df['Various', 'Hour'] = df.index.hour
rides_weather
target_names = rides_weather.columns[:3]
target_names
shift_days = 1
shift_steps = shift_days * 24 # Number of hours.
df_targets = rides_weather[target_names].shift(-shift_steps)
rides_weather[target_names].head(shift_steps + 5)
df_targets.head()
x_data = rides_weather[target_names].values[0:-shift_steps]
print(type(x_data))
print("Shape:", x_data.shape)
y_data = df_targets.values[:-shift_steps]
print(type(y_data))
print("Shape:", y_data.shape)
num_data = len(x_data)
train_split = 0.9
num_train = int(train_split * num_data)
num_test = num_data - num_train
# Define x
x_train = x_data[0:num_train]
x_test = x_data[num_train:]
len(x_train) + len(x_test)
# Define y
y_train = y_data[0:num_train]
y_test = y_data[num_train:]
len(y_train) + len(y_test)
num_x_signals = x_data.shape[1]
num_x_signals
num_y_signals = y_data.shape[1]
num_y_signals
print("Min:", np.min(x_train))
print("Max:", np.max(x_train))
print("Min:", np.min(x_test))
print("Max:", np.max(x_test))
# Scale from 0 to 1
x_scaler = MinMaxScaler()
x_train_scaled = x_scaler.fit_transform(x_train)
x_test_scaled = x_scaler.transform(x_test)
y_scaler = MinMaxScaler()
y_train_scaled = y_scaler.fit_transform(y_train)
y_test_scaled = y_scaler.transform(y_test)
print(x_train_scaled.shape)
print(y_train_scaled.shape)
def batch_generator(batch_size, sequence_length):
"""
Generator function for creating random batches of training-data.
"""
# Infinite loop.
while True:
# Allocate a new array for the batch of input-signals.
x_shape = (batch_size, sequence_length, num_x_signals)
x_batch = np.zeros(shape=x_shape, dtype=np.float16)
# Allocate a new array for the batch of output-signals.
y_shape = (batch_size, sequence_length, num_y_signals)
y_batch = np.zeros(shape=y_shape, dtype=np.float16)
# Fill the batch with random sequences of data.
for i in range(batch_size):
# Get a random start-index.
# This points somewhere into the training-data.
idx = np.random.randint(num_train - sequence_length)
# Copy the sequences of data starting at this index.
x_batch[i] = x_train_scaled[idx:idx+sequence_length]
y_batch[i] = y_train_scaled[idx:idx+sequence_length]
yield (x_batch, y_batch)
batch_size = 214
sequence_length = 24 * 7
generator = batch_generator(batch_size=batch_size,
sequence_length=sequence_length)
x_batch, y_batch = next(generator)
print(x_batch.shape)
print(y_batch.shape)
batch = 0 # First sequence in the batch.
signal = 0 # First signal from the 20 input-signals.
seq = x_batch[batch, :, signal]
plt.plot(seq)
seq = y_batch[batch, :, signal]
plt.plot(seq)
validation_data = (np.expand_dims(x_test_scaled, axis=0),
np.expand_dims(y_test_scaled, axis=0))
model = Sequential()
model.add(GRU(units=512,return_sequences=True,input_shape=(None,num_x_signals)))
model.add(Dense(num_y_signals, activation='sigmoid'))
warmup_steps=24
def loss_mse_warmup(y_true, y_pred):
"""
Calculate the Mean Squared Error between y_true and y_pred,
but ignore the beginning "warmup" part of the sequences.
y_true is the desired output.
y_pred is the model's output.
"""
# The shape of both input tensors are:
# [batch_size, sequence_length, num_y_signals].
# Ignore the "warmup" parts of the sequences
# by taking slices of the tensors.
y_true_slice = y_true[:, warmup_steps:, :]
y_pred_slice = y_pred[:, warmup_steps:, :]
# These sliced tensors both have this shape:
# [batch_size, sequence_length - warmup_steps, num_y_signals]
# Calculate the MSE loss for each value in these tensors.
# This outputs a 3-rank tensor of the same shape.
loss = tf.losses.mean_squared_error(labels=y_true_slice,
predictions=y_pred_slice)
# Keras may reduce this across the first axis (the batch)
# but the semantics are unclear, so to be sure we use
# the loss across the entire tensor, we reduce it to a
# single scalar with the mean function.
loss_mean = tf.reduce_mean(loss)
return loss_mean
optimizer = RMSprop(lr=1e-3)
model.compile(loss=loss_mse_warmup, optimizer=optimizer)
model.summary()
path_checkpoint = '23_checkpoint.keras'
callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
monitor='val_loss',
verbose=1,
save_weights_only=True,
save_best_only=True)
callback_early_stopping = EarlyStopping(monitor='val_loss',
patience=5, verbose=1)
callback_tensorboard = TensorBoard(log_dir='./23_logs/',
histogram_freq=0,
write_graph=False)
callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_lr=1e-4,
patience=0,
verbose=1)
callbacks = [callback_early_stopping,
callback_checkpoint,
callback_tensorboard,
callback_reduce_lr]
%%time
model.fit_generator(generator=generator,
epochs=20,
steps_per_epoch=100,
validation_data=validation_data,
callbacks=callbacks)
try:
model.load_weights(path_checkpoint)
except Exception as error:
print("Error trying to load checkpoint.")
print(error)
result = model.evaluate(x=np.expand_dims(x_test_scaled, axis=0),
y=np.expand_dims(y_test_scaled, axis=0))
print("loss (test-set):", result)
def plot_comparison(start_idx, length=100, train=True):
"""
Plot the predicted and true output-signals.
:param start_idx: Start-index for the time-series.
:param length: Sequence-length to process and plot.
:param train: Boolean whether to use training- or test-set.
"""
if train:
# Use training-data.
x = x_train_scaled
y_true = y_train
else:
# Use test-data.
x = x_test_scaled
y_true = y_test
# End-index for the sequences.
end_idx = start_idx + length
# Select the sequences from the given start-index and
# of the given length.
x = x[start_idx:end_idx]
y_true = y_true[start_idx:end_idx]
# Input-signals for the model.
x = np.expand_dims(x, axis=0)
# Use the model to predict the output-signals.
y_pred = model.predict(x)
# The output of the model is between 0 and 1.
# Do an inverse map to get it back to the scale
# of the original data-set.
y_pred_rescaled = y_scaler.inverse_transform(y_pred[0])
# For each output-signal.
for signal in range(len(target_names)):
# Get the output-signal predicted by the model.
signal_pred = y_pred_rescaled[:, signal]
# Get the true output-signal from the data-set.
signal_true = y_true[:, signal]
# Make the plotting-canvas bigger.
plt.figure(figsize=(15,5))
# Plot and compare the two signals.
plt.plot(signal_true, label='true')
plt.plot(signal_pred, label='pred')
# Plot grey box for warmup-period.
p = plt.axvspan(0, warmup_steps, facecolor='black', alpha=0.15)
# Plot labels etc.
plt.ylabel(target_names[signal])
plt.legend()
plt.show()
plot_comparison(start_idx=100, length=200, train=True)
plot_comparison(start_idx=200, length=1000, train=False)
model.save("3loc_model.model")
```
| github_jupyter |

# NYC Taxi Data Regression Model
This is an [Azure Machine Learning Pipelines](https://aka.ms/aml-pipelines) version of two-part tutorial ([Part 1](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-data-prep), [Part 2](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-auto-train-models)) available for Azure Machine Learning.
You can combine the two part tutorial into one using AzureML Pipelines as Pipelines provide a way to stitch together various steps involved (like data preparation and training in this case) in a machine learning workflow.
In this notebook, you learn how to prepare data for regression modeling by using open source library [pandas](https://pandas.pydata.org/). You run various transformations to filter and combine two different NYC taxi datasets. Once you prepare the NYC taxi data for regression modeling, then you will use [AutoMLStep](https://docs.microsoft.com/python/api/azureml-train-automl-runtime/azureml.train.automl.runtime.automl_step.automlstep?view=azure-ml-py) available with [Azure Machine Learning Pipelines](https://aka.ms/aml-pipelines) to define your machine learning goals and constraints as well as to launch the automated machine learning process. The automated machine learning technique iterates over many combinations of algorithms and hyperparameters until it finds the best model based on your criterion.
After you complete building the model, you can predict the cost of a taxi trip by training a model on data features. These features include the pickup day and time, the number of passengers, and the pickup location.
## Prerequisite
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the configuration Notebook located at https://github.com/Azure/MachineLearningNotebooks first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.
## Prepare data for regression modeling
First, we will prepare data for regression modeling. We will leverage the convenience of Azure Open Datasets along with the power of Azure Machine Learning service to create a regression model to predict NYC taxi fare prices. Perform `pip install azureml-opendatasets` to get the open dataset package. The Open Datasets package contains a class representing each data source (NycTlcGreen and NycTlcYellow) to easily filter date parameters before downloading.
### Load data
Begin by creating a dataframe to hold the taxi data. When working in a non-Spark environment, Open Datasets only allows downloading one month of data at a time with certain classes to avoid MemoryError with large datasets. To download a year of taxi data, iteratively fetch one month at a time, and before appending it to green_df_raw, randomly sample 500 records from each month to avoid bloating the dataframe. Then preview the data. To keep this process short, we are sampling data of only 1 month.
Note: Open Datasets has mirroring classes for working in Spark environments where data size and memory aren't a concern.
```
import azureml.core
# Check core SDK version number
print("SDK version:", azureml.core.VERSION)
from azureml.opendatasets import NycTlcGreen, NycTlcYellow
import pandas as pd
from datetime import datetime
from dateutil.relativedelta import relativedelta
green_df_raw = pd.DataFrame([])
start = datetime.strptime("1/1/2016","%m/%d/%Y")
end = datetime.strptime("1/31/2016","%m/%d/%Y")
number_of_months = 1
sample_size = 5000
for sample_month in range(number_of_months):
temp_df_green = NycTlcGreen(start + relativedelta(months=sample_month), end + relativedelta(months=sample_month)) \
.to_pandas_dataframe()
green_df_raw = green_df_raw.append(temp_df_green.sample(sample_size))
yellow_df_raw = pd.DataFrame([])
start = datetime.strptime("1/1/2016","%m/%d/%Y")
end = datetime.strptime("1/31/2016","%m/%d/%Y")
sample_size = 500
for sample_month in range(number_of_months):
temp_df_yellow = NycTlcYellow(start + relativedelta(months=sample_month), end + relativedelta(months=sample_month)) \
.to_pandas_dataframe()
yellow_df_raw = yellow_df_raw.append(temp_df_yellow.sample(sample_size))
```
### See the data
```
from IPython.display import display
display(green_df_raw.head(5))
display(yellow_df_raw.head(5))
```
### Download data locally and then upload to Azure Blob
This is a one-time process to save the dave in the default datastore.
```
import os
dataDir = "data"
if not os.path.exists(dataDir):
os.mkdir(dataDir)
greenDir = dataDir + "/green"
yelloDir = dataDir + "/yellow"
if not os.path.exists(greenDir):
os.mkdir(greenDir)
if not os.path.exists(yelloDir):
os.mkdir(yelloDir)
greenTaxiData = greenDir + "/unprepared.parquet"
yellowTaxiData = yelloDir + "/unprepared.parquet"
green_df_raw.to_csv(greenTaxiData, index=False)
yellow_df_raw.to_csv(yellowTaxiData, index=False)
print("Data written to local folder.")
from azureml.core import Workspace
ws = Workspace.from_config()
print("Workspace: " + ws.name, "Region: " + ws.location, sep = '\n')
# Default datastore
default_store = ws.get_default_datastore()
default_store.upload_files([greenTaxiData],
target_path = 'green',
overwrite = True,
show_progress = True)
default_store.upload_files([yellowTaxiData],
target_path = 'yellow',
overwrite = True,
show_progress = True)
print("Upload calls completed.")
```
### Create and register datasets
By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. You can learn more about the what subsetting capabilities are supported by referring to [our documentation](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabular_dataset.tabulardataset?view=azure-ml-py#remarks). The data remains in its existing location, so no extra storage cost is incurred.
```
from azureml.core import Dataset
green_taxi_data = Dataset.Tabular.from_delimited_files(default_store.path('green/unprepared.parquet'))
yellow_taxi_data = Dataset.Tabular.from_delimited_files(default_store.path('yellow/unprepared.parquet'))
```
Register the taxi datasets with the workspace so that you can reuse them in other experiments or share with your colleagues who have access to your workspace.
```
green_taxi_data = green_taxi_data.register(ws, 'green_taxi_data')
yellow_taxi_data = yellow_taxi_data.register(ws, 'yellow_taxi_data')
```
### Setup Compute
#### Create new or use an existing compute
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
amlcompute_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
aml_compute = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=4)
aml_compute = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
aml_compute.wait_for_completion(show_output=True)
```
#### Define RunConfig for the compute
We will also use `pandas`, `scikit-learn` and `automl`, `pyarrow` for the pipeline steps. Defining the `runconfig` for that.
```
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
# Create a new runconfig object
aml_run_config = RunConfiguration()
# Use the aml_compute you created above.
aml_run_config.target = aml_compute
# Enable Docker
aml_run_config.environment.docker.enabled = True
# Set Docker base image to the default CPU-based image
aml_run_config.environment.docker.base_image = "mcr.microsoft.com/azureml/base:0.2.1"
# Use conda_dependencies.yml to create a conda environment in the Docker image for execution
aml_run_config.environment.python.user_managed_dependencies = False
# Specify CondaDependencies obj, add necessary packages
aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(
conda_packages=['pandas','scikit-learn'],
pip_packages=['azureml-sdk[automl,explain]', 'pyarrow'])
print ("Run configuration created.")
```
### Prepare data
Now we will prepare for regression modeling by using `pandas`. We run various transformations to filter and combine two different NYC taxi datasets.
We achieve this by creating a separate step for each transformation as this allows us to reuse the steps and saves us from running all over again in case of any change. We will keep data preparation scripts in one subfolder and training scripts in another.
> The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.
#### Define Useful Columns
Here we are defining a set of "useful" columns for both Green and Yellow taxi data.
```
display(green_df_raw.columns)
display(yellow_df_raw.columns)
# useful columns needed for the Azure Machine Learning NYC Taxi tutorial
useful_columns = str(["cost", "distance", "dropoff_datetime", "dropoff_latitude",
"dropoff_longitude", "passengers", "pickup_datetime",
"pickup_latitude", "pickup_longitude", "store_forward", "vendor"]).replace(",", ";")
print("Useful columns defined.")
```
#### Cleanse Green taxi data
```
from azureml.pipeline.core import PipelineData
from azureml.pipeline.steps import PythonScriptStep
# python scripts folder
prepare_data_folder = './scripts/prepdata'
# rename columns as per Azure Machine Learning NYC Taxi tutorial
green_columns = str({
"vendorID": "vendor",
"lpepPickupDatetime": "pickup_datetime",
"lpepDropoffDatetime": "dropoff_datetime",
"storeAndFwdFlag": "store_forward",
"pickupLongitude": "pickup_longitude",
"pickupLatitude": "pickup_latitude",
"dropoffLongitude": "dropoff_longitude",
"dropoffLatitude": "dropoff_latitude",
"passengerCount": "passengers",
"fareAmount": "cost",
"tripDistance": "distance"
}).replace(",", ";")
# Define output after cleansing step
cleansed_green_data = PipelineData("cleansed_green_data", datastore=default_store).as_dataset()
print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# cleansing step creation
# See the cleanse.py for details about input and output
cleansingStepGreen = PythonScriptStep(
name="Cleanse Green Taxi Data",
script_name="cleanse.py",
arguments=["--useful_columns", useful_columns,
"--columns", green_columns,
"--output_cleanse", cleansed_green_data],
inputs=[green_taxi_data.as_named_input('raw_data')],
outputs=[cleansed_green_data],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("cleansingStepGreen created.")
```
#### Cleanse Yellow taxi data
```
yellow_columns = str({
"vendorID": "vendor",
"tpepPickupDateTime": "pickup_datetime",
"tpepDropoffDateTime": "dropoff_datetime",
"storeAndFwdFlag": "store_forward",
"startLon": "pickup_longitude",
"startLat": "pickup_latitude",
"endLon": "dropoff_longitude",
"endLat": "dropoff_latitude",
"passengerCount": "passengers",
"fareAmount": "cost",
"tripDistance": "distance"
}).replace(",", ";")
# Define output after cleansing step
cleansed_yellow_data = PipelineData("cleansed_yellow_data", datastore=default_store).as_dataset()
print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# cleansing step creation
# See the cleanse.py for details about input and output
cleansingStepYellow = PythonScriptStep(
name="Cleanse Yellow Taxi Data",
script_name="cleanse.py",
arguments=["--useful_columns", useful_columns,
"--columns", yellow_columns,
"--output_cleanse", cleansed_yellow_data],
inputs=[yellow_taxi_data.as_named_input('raw_data')],
outputs=[cleansed_yellow_data],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("cleansingStepYellow created.")
```
#### Merge cleansed Green and Yellow datasets
We are creating a single data source by merging the cleansed versions of Green and Yellow taxi data.
```
# Define output after merging step
merged_data = PipelineData("merged_data", datastore=default_store).as_dataset()
print('Merge script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# merging step creation
# See the merge.py for details about input and output
mergingStep = PythonScriptStep(
name="Merge Taxi Data",
script_name="merge.py",
arguments=["--output_merge", merged_data],
inputs=[cleansed_green_data.parse_parquet_files(file_extension=None),
cleansed_yellow_data.parse_parquet_files(file_extension=None)],
outputs=[merged_data],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("mergingStep created.")
```
#### Filter data
This step filters out coordinates for locations that are outside the city border. We use a TypeConverter object to change the latitude and longitude fields to decimal type.
```
# Define output after merging step
filtered_data = PipelineData("filtered_data", datastore=default_store).as_dataset()
print('Filter script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# filter step creation
# See the filter.py for details about input and output
filterStep = PythonScriptStep(
name="Filter Taxi Data",
script_name="filter.py",
arguments=["--output_filter", filtered_data],
inputs=[merged_data.parse_parquet_files(file_extension=None)],
outputs=[filtered_data],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("FilterStep created.")
```
#### Normalize data
In this step, we split the pickup and dropoff datetime values into the respective date and time columns and then we rename the columns to use meaningful names.
```
# Define output after normalize step
normalized_data = PipelineData("normalized_data", datastore=default_store).as_dataset()
print('Normalize script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# normalize step creation
# See the normalize.py for details about input and output
normalizeStep = PythonScriptStep(
name="Normalize Taxi Data",
script_name="normalize.py",
arguments=["--output_normalize", normalized_data],
inputs=[filtered_data.parse_parquet_files(file_extension=None)],
outputs=[normalized_data],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("normalizeStep created.")
```
#### Transform data
Transform the normalized taxi data to final required format. This steps does the following:
- Split the pickup and dropoff date further into the day of the week, day of the month, and month values.
- To get the day of the week value, uses the derive_column_by_example() function. The function takes an array parameter of example objects that define the input data, and the preferred output. The function automatically determines the preferred transformation. For the pickup and dropoff time columns, split the time into the hour, minute, and second by using the split_column_by_example() function with no example parameter.
- After new features are generated, use the drop_columns() function to delete the original fields as the newly generated features are preferred.
- Rename the rest of the fields to use meaningful descriptions.
```
# Define output after transform step
transformed_data = PipelineData("transformed_data", datastore=default_store).as_dataset()
print('Transform script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# transform step creation
# See the transform.py for details about input and output
transformStep = PythonScriptStep(
name="Transform Taxi Data",
script_name="transform.py",
arguments=["--output_transform", transformed_data],
inputs=[normalized_data.parse_parquet_files(file_extension=None)],
outputs=[transformed_data],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("transformStep created.")
```
### Split the data into train and test sets
This function segregates the data into dataset for model training and dataset for testing.
```
train_model_folder = './scripts/trainmodel'
# train and test splits output
output_split_train = PipelineData("output_split_train", datastore=default_store).as_dataset()
output_split_test = PipelineData("output_split_test", datastore=default_store).as_dataset()
print('Data spilt script is in {}.'.format(os.path.realpath(train_model_folder)))
# test train split step creation
# See the train_test_split.py for details about input and output
testTrainSplitStep = PythonScriptStep(
name="Train Test Data Split",
script_name="train_test_split.py",
arguments=["--output_split_train", output_split_train,
"--output_split_test", output_split_test],
inputs=[transformed_data.parse_parquet_files(file_extension=None)],
outputs=[output_split_train, output_split_test],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=train_model_folder,
allow_reuse=True
)
print("testTrainSplitStep created.")
```
## Use automated machine learning to build regression model
Now we will use **automated machine learning** to build the regression model. We will use [AutoMLStep](https://docs.microsoft.com/python/api/azureml-train-automl-runtime/azureml.train.automl.runtime.automl_step.automlstep?view=azure-ml-py) in AML Pipelines for this part. Perform `pip install azureml-sdk[automl]`to get the automated machine learning package. These functions use various features from the data set and allow an automated model to build relationships between the features and the price of a taxi trip.
### Automatically train a model
#### Create experiment
```
from azureml.core import Experiment
experiment = Experiment(ws, 'NYCTaxi_Tutorial_Pipelines')
print("Experiment created")
```
#### Define settings for autogeneration and tuning
Here we define the experiment parameter and model settings for autogeneration and tuning. We can specify automl_settings as **kwargs as well.
Use your defined training settings as a parameter to an `AutoMLConfig` object. Additionally, specify your training data and the type of model, which is `regression` in this case.
Note: When using AmlCompute, we can't pass Numpy arrays directly to the fit method.
```
import logging
from azureml.train.automl import AutoMLConfig
# Change iterations to a reasonable number (50) to get better accuracy
automl_settings = {
"iteration_timeout_minutes" : 10,
"iterations" : 2,
"primary_metric" : 'spearman_correlation',
"n_cross_validations": 5
}
training_dataset = output_split_train.parse_parquet_files(file_extension=None).keep_columns(['pickup_weekday','pickup_hour', 'distance','passengers', 'vendor', 'cost'])
automl_config = AutoMLConfig(task = 'regression',
debug_log = 'automated_ml_errors.log',
path = train_model_folder,
compute_target = aml_compute,
featurization = 'auto',
training_data = training_dataset,
label_column_name = 'cost',
**automl_settings)
print("AutoML config created.")
```
#### Define AutoMLStep
```
from azureml.pipeline.steps import AutoMLStep
trainWithAutomlStep = AutoMLStep(name='AutoML_Regression',
automl_config=automl_config,
allow_reuse=True)
print("trainWithAutomlStep created.")
```
#### Build and run the pipeline
```
from azureml.pipeline.core import Pipeline
from azureml.widgets import RunDetails
pipeline_steps = [trainWithAutomlStep]
pipeline = Pipeline(workspace = ws, steps=pipeline_steps)
print("Pipeline is built.")
pipeline_run = experiment.submit(pipeline, regenerate_outputs=False)
print("Pipeline submitted for execution.")
RunDetails(pipeline_run).show()
```
### Explore the results
```
# Before we proceed we need to wait for the run to complete.
pipeline_run.wait_for_completion()
# functions to download output to local and fetch as dataframe
def get_download_path(download_path, output_name):
output_folder = os.listdir(download_path + '/azureml')[0]
path = download_path + '/azureml/' + output_folder + '/' + output_name
return path
def fetch_df(step, output_name):
output_data = step.get_output_data(output_name)
download_path = './outputs/' + output_name
output_data.download(download_path, overwrite=True)
df_path = get_download_path(download_path, output_name) + '/processed.parquet'
return pd.read_parquet(df_path)
```
#### View cleansed taxi data
```
green_cleanse_step = pipeline_run.find_step_run(cleansingStepGreen.name)[0]
yellow_cleanse_step = pipeline_run.find_step_run(cleansingStepYellow.name)[0]
cleansed_green_df = fetch_df(green_cleanse_step, cleansed_green_data.name)
cleansed_yellow_df = fetch_df(yellow_cleanse_step, cleansed_yellow_data.name)
display(cleansed_green_df.head(5))
display(cleansed_yellow_df.head(5))
```
#### View the combined taxi data profile
```
merge_step = pipeline_run.find_step_run(mergingStep.name)[0]
combined_df = fetch_df(merge_step, merged_data.name)
display(combined_df.describe())
```
#### View the filtered taxi data profile
```
filter_step = pipeline_run.find_step_run(filterStep.name)[0]
filtered_df = fetch_df(filter_step, filtered_data.name)
display(filtered_df.describe())
```
#### View normalized taxi data
```
normalize_step = pipeline_run.find_step_run(normalizeStep.name)[0]
normalized_df = fetch_df(normalize_step, normalized_data.name)
display(normalized_df.head(5))
```
#### View transformed taxi data
```
transform_step = pipeline_run.find_step_run(transformStep.name)[0]
transformed_df = fetch_df(transform_step, transformed_data.name)
display(transformed_df.describe())
display(transformed_df.head(5))
```
#### View training data used by AutoML
```
split_step = pipeline_run.find_step_run(testTrainSplitStep.name)[0]
train_split = fetch_df(split_step, output_split_train.name)
display(train_split.describe())
display(train_split.head(5))
```
#### View the details of the AutoML run
```
from azureml.train.automl.run import AutoMLRun
#from azureml.widgets import RunDetails
# workaround to get the automl run as its the last step in the pipeline
# and get_steps() returns the steps from latest to first
for step in pipeline_run.get_steps():
automl_step_run_id = step.id
print(step.name)
print(automl_step_run_id)
break
automl_run = AutoMLRun(experiment = experiment, run_id=automl_step_run_id)
#RunDetails(automl_run).show()
```
#### Retrieve all Child runs
We use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(automl_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
### Retreive the best model
Uncomment the below cell to retrieve the best model
```
# best_run, fitted_model = automl_run.get_output()
# print(best_run)
# print(fitted_model)
```
### Test the model
#### Get test data
Uncomment the below cell to get test data
```
# split_step = pipeline_run.find_step_run(testTrainSplitStep.name)[0]
# x_test = fetch_df(split_step, output_split_test.name)[['distance','passengers', 'vendor','pickup_weekday','pickup_hour']]
# y_test = fetch_df(split_step, output_split_test.name)[['cost']]
# display(x_test.head(5))
# display(y_test.head(5))
```
#### Test the best fitted model
Uncomment the below cell to test the best fitted model
```
# y_predict = fitted_model.predict(x_test)
# y_actual = y_test.values.tolist()
# display(pd.DataFrame({'Actual':y_actual, 'Predicted':y_predict}).head(5))
# import matplotlib.pyplot as plt
# fig = plt.figure(figsize=(14, 10))
# ax1 = fig.add_subplot(111)
# distance_vals = [x[0] for x in x_test.values]
# ax1.scatter(distance_vals[:100], y_predict[:100], s=18, c='b', marker="s", label='Predicted')
# ax1.scatter(distance_vals[:100], y_actual[:100], s=18, c='r', marker="o", label='Actual')
# ax1.set_xlabel('distance (mi)')
# ax1.set_title('Predicted and Actual Cost/Distance')
# ax1.set_ylabel('Cost ($)')
# plt.legend(loc='upper left', prop={'size': 12})
# plt.rcParams.update({'font.size': 14})
# plt.show()
```
| github_jupyter |
**Chapter 3 – Classification**
_This notebook contains all the sample code and solutions to the exercices in chapter 3._
# Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
```
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import numpy.random as rnd
import os
# to make this notebook's output stable across runs
rnd.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "classification"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
```
# MNIST
```
from shutil import copyfileobj
from six.moves import urllib
from sklearn.datasets.base import get_data_home
import os
def fetch_mnist(data_home=None):
mnist_alternative_url = "https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat"
data_home = get_data_home(data_home=data_home)
data_home = os.path.join(data_home, 'mldata')
if not os.path.exists(data_home):
os.makedirs(data_home)
mnist_save_path = os.path.join(data_home, "mnist-original.mat")
if not os.path.exists(mnist_save_path):
mnist_url = urllib.request.urlopen(mnist_alternative_url)
with open(mnist_save_path, "wb") as matlab_file:
copyfileobj(mnist_url, matlab_file)
fetch_mnist()
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata("MNIST original")
# from six.moves import urllib
# from sklearn.datasets import fetch_mldata
# try:
# mnist = fetch_mldata('MNIST original')
# except urllib.error.HTTPError as ex:
# print("Could not download MNIST data from mldata.org, trying alternative...")
# # Alternative method to load MNIST, if mldata.org is down
# from scipy.io import loadmat
# mnist_alternative_url = "https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat"
# mnist_path = "./mnist-original.mat"
# response = urllib.request.urlopen(mnist_alternative_url)
# with open(mnist_path, "wb") as f:
# content = response.read()
# f.write(content)
# mnist_raw = loadmat(mnist_path)
# mnist = {
# "data": mnist_raw["data"].T,
# "target": mnist_raw["label"][0],
# "COL_NAMES": ["label", "data"],
# "DESCR": "mldata.org dataset: mnist-original",
# }
# print("Success!")
mnist
X, y = mnist["data"], mnist["target"]
X.shape
y.shape
28*28
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = matplotlib.cm.binary,
interpolation="nearest")
plt.axis("off")
some_digit_index = 36000
some_digit = X[some_digit_index]
plot_digit(some_digit)
save_fig("some_digit_plot")
plt.show()
# EXTRA
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = matplotlib.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(9,9))
example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]]
plot_digits(example_images, images_per_row=10)
save_fig("more_digits_plot")
plt.show()
y[some_digit_index]
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = rnd.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
```
# Binary classifier
```
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(X_train, y_train_5)
sgd_clf.predict([some_digit])
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = (y_train_5[train_index])
X_test_fold = X_train[test_index]
y_test_fold = (y_train_5[test_index])
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
n_correct = sum(y_pred == y_test_fold)
print(n_correct / len(y_pred))
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring="accuracy")
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_5, y_train_pred)
from sklearn.metrics import precision_score, recall_score
precision_score(y_train_5, y_train_pred)
4344 / (4344 + 1307)
recall_score(y_train_5, y_train_pred)
4344 / (4344 + 1077)
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred)
4344 / (4344 + (1077 + 1307)/2)
y_scores = sgd_clf.decision_function([some_digit])
y_scores
threshold = 0
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
threshold = 200000
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.xlabel("Threshold", fontsize=16)
plt.legend(loc="center left", fontsize=16)
plt.ylim([0, 1])
plt.figure(figsize=(8, 4))
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.xlim([-700000, 700000])
save_fig("precision_recall_vs_threshold_plot")
plt.show()
(y_train_pred == (y_scores > 0)).all()
y_train_pred_90 = (y_scores > 70000)
precision_score(y_train_5, y_train_pred_90)
recall_score(y_train_5, y_train_pred_90)
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
save_fig("precision_vs_recall_plot")
plt.show()
```
# ROC curves
```
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
def plot_roc_curve(fpr, tpr, **options):
plt.plot(fpr, tpr, linewidth=2, **options)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
save_fig("roc_curve_plot")
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(random_state=42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba")
y_scores_forest = y_probas_forest[:, 1] # score = proba of positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5, y_scores_forest)
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, "b:", linewidth=2, label="SGD")
plot_roc_curve(fpr_forest, tpr_forest, label="Random Forest")
plt.legend(loc="lower right", fontsize=16)
save_fig("roc_curve_comparison_plot")
plt.show()
roc_auc_score(y_train_5, y_scores_forest)
y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)
precision_score(y_train_5, y_train_pred_forest)
recall_score(y_train_5, y_train_pred_forest)
```
# Multiclass classification
```
sgd_clf.fit(X_train, y_train)
sgd_clf.predict([some_digit])
some_digit_scores = sgd_clf.decision_function([some_digit])
some_digit_scores
np.argmax(some_digit_scores)
sgd_clf.classes_
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))
ovo_clf.fit(X_train, y_train)
ovo_clf.predict([some_digit])
len(ovo_clf.estimators_)
forest_clf.fit(X_train, y_train)
forest_clf.predict([some_digit])
forest_clf.predict_proba([some_digit])
cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy")
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
conf_mx
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
cax = ax.matshow(conf_mx)
fig.colorbar(cax)
plt.matshow(conf_mx, cmap=plt.cm.gray)
save_fig("confusion_matrix_plot", tight_layout=False)
plt.show()
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
save_fig("confusion_matrix_errors_plot", tight_layout=False)
plt.show()
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
plt.figure(figsize=(8,8))
plt.subplot(221)
plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222)
plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223)
plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224)
plot_digits(X_bb[:25], images_per_row=5)
save_fig("error_analysis_digits_plot")
plt.show()
```
# Multilabel classification
```
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
knn_clf.predict([some_digit])
y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_train, cv=3)
f1_score(y_train, y_train_knn_pred, average="macro")
```
# Multioutput classification
```
noise = rnd.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = rnd.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test
some_index = 5500
plt.subplot(121); plot_digit(X_test_mod[some_index])
plt.subplot(122); plot_digit(y_test_mod[some_index])
save_fig("noisy_digit_example_plot")
plt.show()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[some_index]])
plot_digit(clean_digit)
save_fig("cleaned_digit_example_plot")
plt.show()
```
# Extra material
## Dummy (ie. random) classifier
```
from sklearn.dummy import DummyClassifier
dmy_clf = DummyClassifier()
y_probas_dmy = cross_val_predict(dmy_clf, X_train, y_train_5, cv=3, method="predict_proba")
y_scores_dmy = y_probas_dmy[:, 1]
fprr, tprr, thresholdsr = roc_curve(y_train_5, y_scores_dmy)
plot_roc_curve(fprr, tprr)
```
## KNN classifier
```
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_jobs=-1, weights='distance', n_neighbors=4)
knn_clf.fit(X_train, y_train)
y_knn_pred = knn_clf.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_knn_pred)
from scipy.ndimage.interpolation import shift
def shift_digit(digit_array, dx, dy, new=0):
return shift(digit_array.reshape(28, 28), [dy, dx], cval=new).reshape(784)
plot_digit(shift_digit(some_digit, 5, 1, new=100))
X_train_expanded = [X_train]
y_train_expanded = [y_train]
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
shifted_images = np.apply_along_axis(shift_digit, axis=1, arr=X_train, dx=dx, dy=dy)
X_train_expanded.append(shifted_images)
y_train_expanded.append(y_train)
X_train_expanded = np.concatenate(X_train_expanded)
y_train_expanded = np.concatenate(y_train_expanded)
X_train_expanded.shape, y_train_expanded.shape
knn_clf.fit(X_train_expanded, y_train_expanded)
y_knn_expanded_pred = knn_clf.predict(X_test)
accuracy_score(y_test, y_knn_expanded_pred)
ambiguous_digit = X_test[2589]
knn_clf.predict_proba([ambiguous_digit])
plot_digit(ambiguous_digit)
```
# Exercise solutions
**Coming soon**
| github_jupyter |
# Tutorial 7: Graph Neural Networks

**Filled notebook:**
[](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial7/GNN_overview.ipynb)
[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial7/GNN_overview.ipynb)
**Pre-trained models:**
[](https://github.com/phlippe/saved_models/tree/main/tutorial7)
[](https://drive.google.com/drive/folders/1DOTV_oYt5boa-MElbc2izat4VMSc1gob?usp=sharing)
**Recordings:**
[](https://youtu.be/fK7d56Ly9q8)
[](https://youtu.be/ZCNSUWe4a_Q)
In this tutorial, we will discuss the application of neural networks on graphs. Graph Neural Networks (GNNs) have recently gained increasing popularity in both applications and research, including domains such as social networks, knowledge graphs, recommender systems, and bioinformatics. While the theory and math behind GNNs might first seem complicated, the implementation of those models is quite simple and helps in understanding the methodology. Therefore, we will discuss the implementation of basic network layers of a GNN, namely graph convolutions, and attention layers. Finally, we will apply a GNN on a node-level, edge-level, and graph-level tasks.
Below, we will start by importing our standard libraries. We will use PyTorch Lightning as already done in Tutorial 5 and 6.
```
## Standard libraries
import os
import json
import math
import numpy as np
import time
## Imports for plotting
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
sns.set()
## Progress bar
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
# Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install --quiet pytorch-lightning>=1.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial7"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print(device)
```
We also have a few pre-trained models we download below.
```
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/tutorial7/"
# Files to download
pretrained_files = ["NodeLevelMLP.ckpt", "NodeLevelGNN.ckpt", "GraphLevelGraphConv.ckpt"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name)
if "/" in file_name:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
```
## Graph Neural Networks
### Graph representation
Before starting the discussion of specific neural network operations on graphs, we should consider how to represent a graph. Mathematically, a graph $\mathcal{G}$ is defined as a tuple of a set of nodes/vertices $V$, and a set of edges/links $E$: $\mathcal{G}=(V,E)$. Each edge is a pair of two vertices, and represents a connection between them. For instance, let's look at the following graph:
<center width="100%" style="padding:10px"><img src="example_graph.svg" width="250px"></center>
The vertices are $V=\{1,2,3,4\}$, and edges $E=\{(1,2), (2,3), (2,4), (3,4)\}$. Note that for simplicity, we assume the graph to be undirected and hence don't add mirrored pairs like $(2,1)$. In application, vertices and edge can often have specific attributes, and edges can even be directed. The question is how we could represent this diversity in an efficient way for matrix operations. Usually, for the edges, we decide between two variants: an adjacency matrix, or a list of paired vertex indices.
The **adjacency matrix** $A$ is a square matrix whose elements indicate whether pairs of vertices are adjacent, i.e. connected, or not. In the simplest case, $A_{ij}$ is 1 if there is a connection from node $i$ to $j$, and otherwise 0. If we have edge attributes or different categories of edges in a graph, this information can be added to the matrix as well. For an undirected graph, keep in mind that $A$ is a symmetric matrix ($A_{ij}=A_{ji}$). For the example graph above, we have the following adjacency matrix:
$$
A = \begin{bmatrix}
0 & 1 & 0 & 0\\
1 & 0 & 1 & 1\\
0 & 1 & 0 & 1\\
0 & 1 & 1 & 0
\end{bmatrix}
$$
While expressing a graph as a list of edges is more efficient in terms of memory and (possibly) computation, using an adjacency matrix is more intuitive and simpler to implement. In our implementations below, we will rely on the adjacency matrix to keep the code simple. However, common libraries use edge lists, which we will discuss later more.
Alternatively, we could also use the list of edges to define a sparse adjacency matrix with which we can work as if it was a dense matrix, but allows more memory-efficient operations. PyTorch supports this with the sub-package `torch.sparse` ([documentation](https://pytorch.org/docs/stable/sparse.html)) which is however still in a beta-stage (API might change in future).
### Graph Convolutions
Graph Convolutional Networks have been introduced by [Kipf et al.](https://openreview.net/pdf?id=SJU4ayYgl) in 2016 at the University of Amsterdam. He also wrote a great [blog post](https://tkipf.github.io/graph-convolutional-networks/) about this topic, which is recommended if you want to read about GCNs from a different perspective. GCNs are similar to convolutions in images in the sense that the "filter" parameters are typically shared over all locations in the graph. At the same time, GCNs rely on message passing methods, which means that vertices exchange information with the neighbors, and send "messages" to each other. Before looking at the math, we can try to visually understand how GCNs work. The first step is that each node creates a feature vector that represents the message it wants to send to all its neighbors. In the second step, the messages are sent to the neighbors, so that a node receives one message per adjacent node. Below we have visualized the two steps for our example graph.
<center width="100%" style="padding:10px"><img src="graph_message_passing.svg" width="700px"></center>
If we want to formulate that in more mathematical terms, we need to first decide how to combine all the messages a node receives. As the number of messages vary across nodes, we need an operation that works for any number. Hence, the usual way to go is to sum or take the mean. Given the previous features of nodes $H^{(l)}$, the GCN layer is defined as follows:
$$H^{(l+1)} = \sigma\left(\hat{D}^{-1/2}\hat{A}\hat{D}^{-1/2}H^{(l)}W^{(l)}\right)$$
$W^{(l)}$ is the weight parameters with which we transform the input features into messages ($H^{(l)}W^{(l)}$). To the adjacency matrix $A$ we add the identity matrix so that each node sends its own message also to itself: $\hat{A}=A+I$. Finally, to take the average instead of summing, we calculate the matrix $\hat{D}$ which is a diagonal matrix with $D_{ii}$ denoting the number of neighbors node $i$ has. $\sigma$ represents an arbitrary activation function, and not necessarily the sigmoid (usually a ReLU-based activation function is used in GNNs).
When implementing the GCN layer in PyTorch, we can take advantage of the flexible operations on tensors. Instead of defining a matrix $\hat{D}$, we can simply divide the summed messages by the number of neighbors afterward. Additionally, we replace the weight matrix with a linear layer, which additionally allows us to add a bias. Written as a PyTorch module, the GCN layer is defined as follows:
```
class GCNLayer(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, node_feats, adj_matrix):
"""
Inputs:
node_feats - Tensor with node features of shape [batch_size, num_nodes, c_in]
adj_matrix - Batch of adjacency matrices of the graph. If there is an edge from i to j, adj_matrix[b,i,j]=1 else 0.
Supports directed edges by non-symmetric matrices. Assumes to already have added the identity connections.
Shape: [batch_size, num_nodes, num_nodes]
"""
# Num neighbours = number of incoming edges
num_neighbours = adj_matrix.sum(dim=-1, keepdims=True)
node_feats = self.projection(node_feats)
node_feats = torch.bmm(adj_matrix, node_feats)
node_feats = node_feats / num_neighbours
return node_feats
```
To further understand the GCN layer, we can apply it to our example graph above. First, let's specify some node features and the adjacency matrix with added self-connections:
```
node_feats = torch.arange(8, dtype=torch.float32).view(1, 4, 2)
adj_matrix = torch.Tensor([[[1, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]]])
print("Node features:\n", node_feats)
print("\nAdjacency matrix:\n", adj_matrix)
```
Next, let's apply a GCN layer to it. For simplicity, we initialize the linear weight matrix as an identity matrix so that the input features are equal to the messages. This makes it easier for us to verify the message passing operation.
```
layer = GCNLayer(c_in=2, c_out=2)
layer.projection.weight.data = torch.Tensor([[1., 0.], [0., 1.]])
layer.projection.bias.data = torch.Tensor([0., 0.])
with torch.no_grad():
out_feats = layer(node_feats, adj_matrix)
print("Adjacency matrix", adj_matrix)
print("Input features", node_feats)
print("Output features", out_feats)
```
As we can see, the first node's output values are the average of itself and the second node. Similarly, we can verify all other nodes. However, in a GNN, we would also want to allow feature exchange between nodes beyond its neighbors. This can be achieved by applying multiple GCN layers, which gives us the final layout of a GNN. The GNN can be build up by a sequence of GCN layers and non-linearities such as ReLU. For a visualization, see below (figure credit - [Thomas Kipf, 2016](https://tkipf.github.io/graph-convolutional-networks/)).
<center width="100%" style="padding: 10px"><img src="gcn_network.png" width="600px"></center>
However, one issue we can see from looking at the example above is that the output features for nodes 3 and 4 are the same because they have the same adjacent nodes (including itself). Therefore, GCN layers can make the network forget node-specific information if we just take a mean over all messages. Multiple possible improvements have been proposed. While the simplest option might be using residual connections, the more common approach is to either weigh the self-connections higher or define a separate weight matrix for the self-connections. Alternatively, we can re-visit a concept from the last tutorial: attention.
### Graph Attention
If you remember from the last tutorial, attention describes a weighted average of multiple elements with the weights dynamically computed based on an input query and elements' keys (if you haven't read Tutorial 6 yet, it is recommended to at least go through the very first section called [What is Attention?](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html#What-is-Attention?)). This concept can be similarly applied to graphs, one of such is the Graph Attention Network (called GAT, proposed by [Velickovic et al., 2017](https://arxiv.org/abs/1710.10903)). Similarly to the GCN, the graph attention layer creates a message for each node using a linear layer/weight matrix. For the attention part, it uses the message from the node itself as a query, and the messages to average as both keys and values (note that this also includes the message to itself). The score function $f_{attn}$ is implemented as a one-layer MLP which maps the query and key to a single value. The MLP looks as follows (figure credit - [Velickovic et al.](https://arxiv.org/abs/1710.10903)):
<center width="100%" style="padding:10px"><img src="graph_attention_MLP.svg" width="250px"></center>
$h_i$ and $h_j$ are the original features from node $i$ and $j$ respectively, and represent the messages of the layer with $\mathbf{W}$ as weight matrix. $\mathbf{a}$ is the weight matrix of the MLP, which has the shape $[1,2\times d_{\text{message}}]$, and $\alpha_{ij}$ the final attention weight from node $i$ to $j$. The calculation can be described as follows:
$$\alpha_{ij} = \frac{\exp\left(\text{LeakyReLU}\left(\mathbf{a}\left[\mathbf{W}h_i||\mathbf{W}h_j\right]\right)\right)}{\sum_{k\in\mathcal{N}_i} \exp\left(\text{LeakyReLU}\left(\mathbf{a}\left[\mathbf{W}h_i||\mathbf{W}h_k\right]\right)\right)}$$
The operator $||$ represents the concatenation, and $\mathcal{N}_i$ the indices of the neighbors of node $i$. Note that in contrast to usual practice, we apply a non-linearity (here LeakyReLU) before the softmax over elements. Although it seems like a minor change at first, it is crucial for the attention to depend on the original input. Specifically, let's remove the non-linearity for a second, and try to simplify the expression:
$$
\begin{split}
\alpha_{ij} & = \frac{\exp\left(\mathbf{a}\left[\mathbf{W}h_i||\mathbf{W}h_j\right]\right)}{\sum_{k\in\mathcal{N}_i} \exp\left(\mathbf{a}\left[\mathbf{W}h_i||\mathbf{W}h_k\right]\right)}\\[5pt]
& = \frac{\exp\left(\mathbf{a}_{:,:d/2}\mathbf{W}h_i+\mathbf{a}_{:,d/2:}\mathbf{W}h_j\right)}{\sum_{k\in\mathcal{N}_i} \exp\left(\mathbf{a}_{:,:d/2}\mathbf{W}h_i+\mathbf{a}_{:,d/2:}\mathbf{W}h_k\right)}\\[5pt]
& = \frac{\exp\left(\mathbf{a}_{:,:d/2}\mathbf{W}h_i\right)\cdot\exp\left(\mathbf{a}_{:,d/2:}\mathbf{W}h_j\right)}{\sum_{k\in\mathcal{N}_i} \exp\left(\mathbf{a}_{:,:d/2}\mathbf{W}h_i\right)\cdot\exp\left(\mathbf{a}_{:,d/2:}\mathbf{W}h_k\right)}\\[5pt]
& = \frac{\exp\left(\mathbf{a}_{:,d/2:}\mathbf{W}h_j\right)}{\sum_{k\in\mathcal{N}_i} \exp\left(\mathbf{a}_{:,d/2:}\mathbf{W}h_k\right)}\\
\end{split}
$$
We can see that without the non-linearity, the attention term with $h_i$ actually cancels itself out, resulting in the attention being independent of the node itself. Hence, we would have the same issue as the GCN of creating the same output features for nodes with the same neighbors. This is why the LeakyReLU is crucial and adds some dependency on $h_i$ to the attention.
Once we obtain all attention factors, we can calculate the output features for each node by performing the weighted average:
$$h_i'=\sigma\left(\sum_{j\in\mathcal{N}_i}\alpha_{ij}\mathbf{W}h_j\right)$$
$\sigma$ is yet another non-linearity, as in the GCN layer. Visually, we can represent the full message passing in an attention layer as follows (figure credit - [Velickovic et al.](https://arxiv.org/abs/1710.10903)):
<center width="100%"><img src="graph_attention.jpeg" width="400px"></center>
To increase the expressiveness of the graph attention network, [Velickovic et al.](https://arxiv.org/abs/1710.10903) proposed to extend it to multiple heads similar to the Multi-Head Attention block in Transformers. This results in $N$ attention layers being applied in parallel. In the image above, it is visualized as three different colors of arrows (green, blue, and purple) that are afterward concatenated. The average is only applied for the very final prediction layer in a network.
After having discussed the graph attention layer in detail, we can implement it below:
```
class GATLayer(nn.Module):
def __init__(self, c_in, c_out, num_heads=1, concat_heads=True, alpha=0.2):
"""
Inputs:
c_in - Dimensionality of input features
c_out - Dimensionality of output features
num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The
output features are equally split up over the heads if concat_heads=True.
concat_heads - If True, the output of the different heads is concatenated instead of averaged.
alpha - Negative slope of the LeakyReLU activation.
"""
super().__init__()
self.num_heads = num_heads
self.concat_heads = concat_heads
if self.concat_heads:
assert c_out % num_heads == 0, "Number of output features must be a multiple of the count of heads."
c_out = c_out // num_heads
# Sub-modules and parameters needed in the layer
self.projection = nn.Linear(c_in, c_out * num_heads)
self.a = nn.Parameter(torch.Tensor(num_heads, 2 * c_out)) # One per head
self.leakyrelu = nn.LeakyReLU(alpha)
# Initialization from the original implementation
nn.init.xavier_uniform_(self.projection.weight.data, gain=1.414)
nn.init.xavier_uniform_(self.a.data, gain=1.414)
def forward(self, node_feats, adj_matrix, print_attn_probs=False):
"""
Inputs:
node_feats - Input features of the node. Shape: [batch_size, c_in]
adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes]
print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)
"""
batch_size, num_nodes = node_feats.size(0), node_feats.size(1)
# Apply linear layer and sort nodes by head
node_feats = self.projection(node_feats)
node_feats = node_feats.view(batch_size, num_nodes, self.num_heads, -1)
# We need to calculate the attention logits for every edge in the adjacency matrix
# Doing this on all possible combinations of nodes is very expensive
# => Create a tensor of [W*h_i||W*h_j] with i and j being the indices of all edges
edges = adj_matrix.nonzero(as_tuple=False) # Returns indices where the adjacency matrix is not 0 => edges
node_feats_flat = node_feats.view(batch_size * num_nodes, self.num_heads, -1)
edge_indices_row = edges[:,0] * num_nodes + edges[:,1]
edge_indices_col = edges[:,0] * num_nodes + edges[:,2]
a_input = torch.cat([
torch.index_select(input=node_feats_flat, index=edge_indices_row, dim=0),
torch.index_select(input=node_feats_flat, index=edge_indices_col, dim=0)
], dim=-1) # Index select returns a tensor with node_feats_flat being indexed at the desired positions along dim=0
# Calculate attention MLP output (independent for each head)
attn_logits = torch.einsum('bhc,hc->bh', a_input, self.a)
attn_logits = self.leakyrelu(attn_logits)
# Map list of attention values back into a matrix
attn_matrix = attn_logits.new_zeros(adj_matrix.shape+(self.num_heads,)).fill_(-9e15)
attn_matrix[adj_matrix[...,None].repeat(1,1,1,self.num_heads) == 1] = attn_logits.reshape(-1)
# Weighted average of attention
attn_probs = F.softmax(attn_matrix, dim=2)
if print_attn_probs:
print("Attention probs\n", attn_probs.permute(0, 3, 1, 2))
node_feats = torch.einsum('bijh,bjhc->bihc', attn_probs, node_feats)
# If heads should be concatenated, we can do this by reshaping. Otherwise, take mean
if self.concat_heads:
node_feats = node_feats.reshape(batch_size, num_nodes, -1)
else:
node_feats = node_feats.mean(dim=2)
return node_feats
```
Again, we can apply the graph attention layer on our example graph above to understand the dynamics better. As before, the input layer is initialized as an identity matrix, but we set $\mathbf{a}$ to be a vector of arbitrary numbers to obtain different attention values. We use two heads to show the parallel, independent attention mechanisms working in the layer.
```
layer = GATLayer(2, 2, num_heads=2)
layer.projection.weight.data = torch.Tensor([[1., 0.], [0., 1.]])
layer.projection.bias.data = torch.Tensor([0., 0.])
layer.a.data = torch.Tensor([[-0.2, 0.3], [0.1, -0.1]])
with torch.no_grad():
out_feats = layer(node_feats, adj_matrix, print_attn_probs=True)
print("Adjacency matrix", adj_matrix)
print("Input features", node_feats)
print("Output features", out_feats)
```
We recommend that you try to calculate the attention matrix at least for one head and one node for yourself. The entries are 0 where there does not exist an edge between $i$ and $j$. For the others, we see a diverse set of attention probabilities. Moreover, the output features of node 3 and 4 are now different although they have the same neighbors.
## PyTorch Geometric
We had mentioned before that implementing graph networks with adjacency matrix is simple and straight-forward but can be computationally expensive for large graphs. Many real-world graphs can reach over 200k nodes, for which adjacency matrix-based implementations fail. There are a lot of optimizations possible when implementing GNNs, and luckily, there exist packages that provide such layers. The most popular packages for PyTorch are [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/) and the [Deep Graph Library](https://www.dgl.ai/) (the latter being actually framework agnostic). Which one to use depends on the project you are planning to do and personal taste. In this tutorial, we will look at PyTorch Geometric as part of the PyTorch family. Similar to PyTorch Lightning, PyTorch Geometric is not installed by default on GoogleColab (and actually also not in our `dl2020` environment due to many dependencies that would be unnecessary for the practicals). Hence, let's import and/or install it below:
```
# torch geometric
try:
import torch_geometric
except ModuleNotFoundError:
# Installing torch geometric packages with specific CUDA+PyTorch version.
# See https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html for details
TORCH = torch.__version__.split('+')[0]
CUDA = 'cu' + torch.version.cuda.replace('.','')
!pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
!pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
!pip install torch-cluster -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
!pip install torch-spline-conv -f https://pytorch-geometric.com/whl/torch-{TORCH}+{CUDA}.html
!pip install torch-geometric
import torch_geometric
import torch_geometric.nn as geom_nn
import torch_geometric.data as geom_data
```
PyTorch Geometric provides us a set of common graph layers, including the GCN and GAT layer we implemented above. Additionally, similar to PyTorch's torchvision, it provides the common graph datasets and transformations on those to simplify training. Compared to our implementation above, PyTorch Geometric uses a list of index pairs to represent the edges. The details of this library will be explored further in our experiments.
In our tasks below, we want to allow us to pick from a multitude of graph layers. Thus, we define again below a dictionary to access those using a string:
```
gnn_layer_by_name = {
"GCN": geom_nn.GCNConv,
"GAT": geom_nn.GATConv,
"GraphConv": geom_nn.GraphConv
}
```
Additionally to GCN and GAT, we added the layer `geom_nn.GraphConv` ([documentation](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.GraphConv)). GraphConv is a GCN with a separate weight matrix for the self-connections. Mathematically, this would be:
$$
\mathbf{x}_i^{(l+1)} = \mathbf{W}^{(l + 1)}_1 \mathbf{x}_i^{(l)} + \mathbf{W}^{(\ell + 1)}_2 \sum_{j \in \mathcal{N}_i} \mathbf{x}_j^{(l)}
$$
In this formula, the neighbor's messages are added instead of averaged. However, PyTorch Geometric provides the argument `aggr` to switch between summing, averaging, and max pooling.
## Experiments on graph structures
Tasks on graph-structured data can be grouped into three groups: node-level, edge-level and graph-level. The different levels describe on which level we want to perform classification/regression. We will discuss all three types in more detail below.
### Node-level tasks: Semi-supervised node classification
Node-level tasks have the goal to classify nodes in a graph. Usually, we have given a single, large graph with >1000 nodes of which a certain amount of nodes are labeled. We learn to classify those labeled examples during training and try to generalize to the unlabeled nodes.
A popular example that we will use in this tutorial is the Cora dataset, a citation network among papers. The Cora consists of 2708 scientific publications with links between each other representing the citation of one paper by another. The task is to classify each publication into one of seven classes. Each publication is represented by a bag-of-words vector. This means that we have a vector of 1433 elements for each publication, where a 1 at feature $i$ indicates that the $i$-th word of a pre-defined dictionary is in the article. Binary bag-of-words representations are commonly used when we need very simple encodings, and already have an intuition of what words to expect in a network. There exist much better approaches, but we will leave this to the NLP courses to discuss.
We will load the dataset below:
```
cora_dataset = torch_geometric.datasets.Planetoid(root=DATASET_PATH, name="Cora")
```
Let's look at how PyTorch Geometric represents the graph data. Note that although we have a single graph, PyTorch Geometric returns a dataset for compatibility to other datasets.
```
cora_dataset[0]
```
The graph is represented by a `Data` object ([documentation](https://pytorch-geometric.readthedocs.io/en/latest/modules/data.html#torch_geometric.data.Data)) which we can access as a standard Python namespace. The edge index tensor is the list of edges in the graph and contains the mirrored version of each edge for undirected graphs. The `train_mask`, `val_mask`, and `test_mask` are boolean masks that indicate which nodes we should use for training, validation, and testing. The `x` tensor is the feature tensor of our 2708 publications, and `y` the labels for all nodes.
After having seen the data, we can implement a simple graph neural network. The GNN applies a sequence of graph layers (GCN, GAT, or GraphConv), ReLU as activation function, and dropout for regularization. See below for the specific implementation.
```
class GNNModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, num_layers=2, layer_name="GCN", dp_rate=0.1, **kwargs):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of the output features. Usually number of classes in classification
num_layers - Number of "hidden" graph layers
layer_name - String of the graph layer to use
dp_rate - Dropout rate to apply throughout the network
kwargs - Additional arguments for the graph layer (e.g. number of heads for GAT)
"""
super().__init__()
gnn_layer = gnn_layer_by_name[layer_name]
layers = []
in_channels, out_channels = c_in, c_hidden
for l_idx in range(num_layers-1):
layers += [
gnn_layer(in_channels=in_channels,
out_channels=out_channels,
**kwargs),
nn.ReLU(inplace=True),
nn.Dropout(dp_rate)
]
in_channels = c_hidden
layers += [gnn_layer(in_channels=in_channels,
out_channels=c_out,
**kwargs)]
self.layers = nn.ModuleList(layers)
def forward(self, x, edge_index):
"""
Inputs:
x - Input features per node
edge_index - List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
"""
for l in self.layers:
# For graph layers, we need to add the "edge_index" tensor as additional input
# All PyTorch Geometric graph layer inherit the class "MessagePassing", hence
# we can simply check the class type.
if isinstance(l, geom_nn.MessagePassing):
x = l(x, edge_index)
else:
x = l(x)
return x
```
Good practice in node-level tasks is to create an MLP baseline that is applied to each node independently. This way we can verify whether adding the graph information to the model indeed improves the prediction, or not. It might also be that the features per node are already expressive enough to clearly point towards a specific class. To check this, we implement a simple MLP below.
```
class MLPModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, num_layers=2, dp_rate=0.1):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of the output features. Usually number of classes in classification
num_layers - Number of hidden layers
dp_rate - Dropout rate to apply throughout the network
"""
super().__init__()
layers = []
in_channels, out_channels = c_in, c_hidden
for l_idx in range(num_layers-1):
layers += [
nn.Linear(in_channels, out_channels),
nn.ReLU(inplace=True),
nn.Dropout(dp_rate)
]
in_channels = c_hidden
layers += [nn.Linear(in_channels, c_out)]
self.layers = nn.Sequential(*layers)
def forward(self, x, *args, **kwargs):
"""
Inputs:
x - Input features per node
"""
return self.layers(x)
```
Finally, we can merge the models into a PyTorch Lightning module which handles the training, validation, and testing for us.
```
class NodeLevelGNN(pl.LightningModule):
def __init__(self, model_name, **model_kwargs):
super().__init__()
# Saving hyperparameters
self.save_hyperparameters()
if model_name == "MLP":
self.model = MLPModel(**model_kwargs)
else:
self.model = GNNModel(**model_kwargs)
self.loss_module = nn.CrossEntropyLoss()
def forward(self, data, mode="train"):
x, edge_index = data.x, data.edge_index
x = self.model(x, edge_index)
# Only calculate the loss on the nodes corresponding to the mask
if mode == "train":
mask = data.train_mask
elif mode == "val":
mask = data.val_mask
elif mode == "test":
mask = data.test_mask
else:
assert False, f"Unknown forward mode: {mode}"
loss = self.loss_module(x[mask], data.y[mask])
acc = (x[mask].argmax(dim=-1) == data.y[mask]).sum().float() / mask.sum()
return loss, acc
def configure_optimizers(self):
# We use SGD here, but Adam works as well
optimizer = optim.SGD(self.parameters(), lr=0.1, momentum=0.9, weight_decay=2e-3)
return optimizer
def training_step(self, batch, batch_idx):
loss, acc = self.forward(batch, mode="train")
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def validation_step(self, batch, batch_idx):
_, acc = self.forward(batch, mode="val")
self.log('val_acc', acc)
def test_step(self, batch, batch_idx):
_, acc = self.forward(batch, mode="test")
self.log('test_acc', acc)
```
Additionally to the Lightning module, we define a training function below. As we have a single graph, we use a batch size of 1 for the data loader and share the same data loader for the train, validation, and test set (the mask is picked inside the Lightning module). Besides, we set the argument `progress_bar_refresh_rate` to zero as it usually shows the progress per epoch, but an epoch only consists of a single step. The rest of the code is very similar to what we have seen in Tutorial 5 and 6 already.
```
def train_node_classifier(model_name, dataset, **model_kwargs):
pl.seed_everything(42)
node_data_loader = geom_data.DataLoader(dataset, batch_size=1)
# Create a PyTorch Lightning trainer with the generation callback
root_dir = os.path.join(CHECKPOINT_PATH, "NodeLevel" + model_name)
os.makedirs(root_dir, exist_ok=True)
trainer = pl.Trainer(default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=1 if str(device).startswith("cuda") else 0,
max_epochs=200,
progress_bar_refresh_rate=0) # 0 because epoch size is 1
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, f"NodeLevel{model_name}.ckpt")
if os.path.isfile(pretrained_filename):
print("Found pretrained model, loading...")
model = NodeLevelGNN.load_from_checkpoint(pretrained_filename)
else:
pl.seed_everything()
model = NodeLevelGNN(model_name=model_name, c_in=dataset.num_node_features, c_out=dataset.num_classes, **model_kwargs)
trainer.fit(model, node_data_loader, node_data_loader)
model = NodeLevelGNN.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
# Test best model on the test set
test_result = trainer.test(model, test_dataloaders=node_data_loader, verbose=False)
batch = next(iter(node_data_loader))
batch = batch.to(model.device)
_, train_acc = model.forward(batch, mode="train")
_, val_acc = model.forward(batch, mode="val")
result = {"train": train_acc,
"val": val_acc,
"test": test_result[0]['test_acc']}
return model, result
```
Finally, we can train our models. First, let's train the simple MLP:
```
# Small function for printing the test scores
def print_results(result_dict):
if "train" in result_dict:
print(f"Train accuracy: {(100.0*result_dict['train']):4.2f}%")
if "val" in result_dict:
print(f"Val accuracy: {(100.0*result_dict['val']):4.2f}%")
print(f"Test accuracy: {(100.0*result_dict['test']):4.2f}%")
node_mlp_model, node_mlp_result = train_node_classifier(model_name="MLP",
dataset=cora_dataset,
c_hidden=16,
num_layers=2,
dp_rate=0.1)
print_results(node_mlp_result)
```
Although the MLP can overfit on the training dataset because of the high-dimensional input features, it does not perform too well on the test set. Let's see if we can beat this score with our graph networks:
```
node_gnn_model, node_gnn_result = train_node_classifier(model_name="GNN",
layer_name="GCN",
dataset=cora_dataset,
c_hidden=16,
num_layers=2,
dp_rate=0.1)
print_results(node_gnn_result)
```
As we would have hoped for, the GNN model outperforms the MLP by quite a margin. This shows that using the graph information indeed improves our predictions and lets us generalizes better.
The hyperparameters in the model have been chosen to create a relatively small network. This is because the first layer with an input dimension of 1433 can be relatively expensive to perform for large graphs. In general, GNNs can become relatively expensive for very big graphs. This is why such GNNs either have a small hidden size or use a special batching strategy where we sample a connected subgraph of the big, original graph.
### Edge-level tasks: Link prediction
In some applications, we might have to predict on an edge-level instead of node-level. The most common edge-level task in GNN is link prediction. Link prediction means that given a graph, we want to predict whether there will be/should be an edge between two nodes or not. For example, in a social network, this is used by Facebook and co to propose new friends to you. Again, graph level information can be crucial to perform this task. The output prediction is usually done by performing a similarity metric on the pair of node features, which should be 1 if there should be a link, and otherwise close to 0. To keep the tutorial short, we will not implement this task ourselves. Nevertheless, there are many good resources out there if you are interested in looking closer at this task.
Tutorials and papers for this topic include:
* [PyTorch Geometric example](https://github.com/rusty1s/pytorch_geometric/blob/master/examples/link_pred.py)
* [Graph Neural Networks: A Review of Methods and Applications](https://arxiv.org/pdf/1812.08434.pdf), Zhou et al. 2019
* [Link Prediction Based on Graph Neural Networks](https://papers.nips.cc/paper/2018/file/53f0d7c537d99b3824f0f99d62ea2428-Paper.pdf), Zhang and Chen, 2018.
### Graph-level tasks: Graph classification
Finally, in this part of the tutorial, we will have a closer look at how to apply GNNs to the task of graph classification. The goal is to classify an entire graph instead of single nodes or edges. Therefore, we are also given a dataset of multiple graphs that we need to classify based on some structural graph properties. The most common task for graph classification is molecular property prediction, in which molecules are represented as graphs. Each atom is linked to a node, and edges in the graph are the bonds between atoms. For example, look at the figure below.
<center width="100%"><img src="molecule_graph.svg" width="600px"></center>
On the left, we have an arbitrary, small molecule with different atoms, whereas the right part of the image shows the graph representation. The atom types are abstracted as node features (e.g. a one-hot vector), and the different bond types are used as edge features. For simplicity, we will neglect the edge attributes in this tutorial, but you can include by using methods like the [Relational Graph Convolution](https://arxiv.org/abs/1703.06103) that uses a different weight matrix for each edge type.
The dataset we will use below is called the MUTAG dataset. It is a common small benchmark for graph classification algorithms, and contain 188 graphs with 18 nodes and 20 edges on average for each graph. The graph nodes have 7 different labels/atom types, and the binary graph labels represent "their mutagenic effect on a specific gram negative bacterium" (the specific meaning of the labels are not too important here). The dataset is part of a large collection of different graph classification datasets, known as the [TUDatasets](https://chrsmrrs.github.io/datasets/), which is directly accessible via `torch_geometric.datasets.TUDataset` ([documentation](https://pytorch-geometric.readthedocs.io/en/latest/modules/datasets.html#torch_geometric.datasets.TUDataset)) in PyTorch Geometric. We can load the dataset below.
```
tu_dataset = torch_geometric.datasets.TUDataset(root=DATASET_PATH, name="MUTAG")
```
Let's look at some statistics for the dataset:
```
print("Data object:", tu_dataset.data)
print("Length:", len(tu_dataset))
print(f"Average label: {tu_dataset.data.y.float().mean().item():4.2f}")
```
The first line shows how the dataset stores different graphs. The nodes, edges, and labels of each graph are concatenated to one tensor, and the dataset stores the indices where to split the tensors correspondingly. The length of the dataset is the number of graphs we have, and the "average label" denotes the percentage of the graph with label 1. As long as the percentage is in the range of 0.5, we have a relatively balanced dataset. It happens quite often that graph datasets are very imbalanced, hence checking the class balance is always a good thing to do.
Next, we will split our dataset into a training and test part. Note that we do not use a validation set this time because of the small size of the dataset. Therefore, our model might overfit slightly on the validation set due to the noise of the evaluation, but we still get an estimate of the performance on untrained data.
```
torch.manual_seed(42)
tu_dataset.shuffle()
train_dataset = tu_dataset[:150]
test_dataset = tu_dataset[150:]
```
When using a data loader, we encounter a problem with batching $N$ graphs. Each graph in the batch can have a different number of nodes and edges, and hence we would require a lot of padding to obtain a single tensor. Torch geometric uses a different, more efficient approach: we can view the $N$ graphs in a batch as a single large graph with concatenated node and edge list. As there is no edge between the $N$ graphs, running GNN layers on the large graph gives us the same output as running the GNN on each graph separately. Visually, this batching strategy is visualized below (figure credit - PyTorch Geometric team, [tutorial here](https://colab.research.google.com/drive/1I8a0DfQ3fI7Njc62__mVXUlcAleUclnb?usp=sharing#scrollTo=2owRWKcuoALo)).
<center width="100%"><img src="torch_geometric_stacking_graphs.png" width="600px"></center>
The adjacency matrix is zero for any nodes that come from two different graphs, and otherwise according to the adjacency matrix of the individual graph. Luckily, this strategy is already implemented in torch geometric, and hence we can use the corresponding data loader:
```
graph_train_loader = geom_data.DataLoader(train_dataset, batch_size=64, shuffle=True)
graph_val_loader = geom_data.DataLoader(test_dataset, batch_size=64) # Additional loader if you want to change to a larger dataset
graph_test_loader = geom_data.DataLoader(test_dataset, batch_size=64)
```
Let's load a batch below to see the batching in action:
```
batch = next(iter(graph_test_loader))
print("Batch:", batch)
print("Labels:", batch.y[:10])
print("Batch indices:", batch.batch[:40])
```
We have 38 graphs stacked together for the test dataset. The batch indices, stored in `batch`, show that the first 12 nodes belong to the first graph, the next 22 to the second graph, and so on. These indices are important for performing the final prediction. To perform a prediction over a whole graph, we usually perform a pooling operation over all nodes after running the GNN model. In this case, we will use the average pooling. Hence, we need to know which nodes should be included in which average pool. Using this pooling, we can already create our graph network below. Specifically, we re-use our class `GNNModel` from before, and simply add an average pool and single linear layer for the graph prediction task.
```
class GraphGNNModel(nn.Module):
def __init__(self, c_in, c_hidden, c_out, dp_rate_linear=0.5, **kwargs):
"""
Inputs:
c_in - Dimension of input features
c_hidden - Dimension of hidden features
c_out - Dimension of output features (usually number of classes)
dp_rate_linear - Dropout rate before the linear layer (usually much higher than inside the GNN)
kwargs - Additional arguments for the GNNModel object
"""
super().__init__()
self.GNN = GNNModel(c_in=c_in,
c_hidden=c_hidden,
c_out=c_hidden, # Not our prediction output yet!
**kwargs)
self.head = nn.Sequential(
nn.Dropout(dp_rate_linear),
nn.Linear(c_hidden, c_out)
)
def forward(self, x, edge_index, batch_idx):
"""
Inputs:
x - Input features per node
edge_index - List of vertex index pairs representing the edges in the graph (PyTorch geometric notation)
batch_idx - Index of batch element for each node
"""
x = self.GNN(x, edge_index)
x = geom_nn.global_mean_pool(x, batch_idx) # Average pooling
x = self.head(x)
return x
```
Finally, we can create a PyTorch Lightning module to handle the training. It is similar to the modules we have seen before and does nothing surprising in terms of training. As we have a binary classification task, we use the Binary Cross Entropy loss.
```
class GraphLevelGNN(pl.LightningModule):
def __init__(self, **model_kwargs):
super().__init__()
# Saving hyperparameters
self.save_hyperparameters()
self.model = GraphGNNModel(**model_kwargs)
self.loss_module = nn.BCEWithLogitsLoss() if self.hparams.c_out == 1 else nn.CrossEntropyLoss()
def forward(self, data, mode="train"):
x, edge_index, batch_idx = data.x, data.edge_index, data.batch
x = self.model(x, edge_index, batch_idx)
x = x.squeeze(dim=-1)
if self.hparams.c_out == 1:
preds = (x > 0).float()
data.y = data.y.float()
else:
preds = x.argmax(dim=-1)
loss = self.loss_module(x, data.y)
acc = (preds == data.y).sum().float() / preds.shape[0]
return loss, acc
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=1e-2, weight_decay=0.0) # High lr because of small dataset and small model
return optimizer
def training_step(self, batch, batch_idx):
loss, acc = self.forward(batch, mode="train")
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def validation_step(self, batch, batch_idx):
_, acc = self.forward(batch, mode="val")
self.log('val_acc', acc)
def test_step(self, batch, batch_idx):
_, acc = self.forward(batch, mode="test")
self.log('test_acc', acc)
```
Below we train the model on our dataset. It resembles the typical training functions we have seen so far.
```
def train_graph_classifier(model_name, **model_kwargs):
pl.seed_everything(42)
# Create a PyTorch Lightning trainer with the generation callback
root_dir = os.path.join(CHECKPOINT_PATH, "GraphLevel" + model_name)
os.makedirs(root_dir, exist_ok=True)
trainer = pl.Trainer(default_root_dir=root_dir,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc")],
gpus=1 if str(device).startswith("cuda") else 0,
max_epochs=500,
progress_bar_refresh_rate=0)
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, f"GraphLevel{model_name}.ckpt")
if os.path.isfile(pretrained_filename):
print("Found pretrained model, loading...")
model = GraphLevelGNN.load_from_checkpoint(pretrained_filename)
else:
pl.seed_everything(42)
model = GraphLevelGNN(c_in=tu_dataset.num_node_features,
c_out=1 if tu_dataset.num_classes==2 else tu_dataset.num_classes,
**model_kwargs)
trainer.fit(model, graph_train_loader, graph_val_loader)
model = GraphLevelGNN.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
# Test best model on validation and test set
train_result = trainer.test(model, test_dataloaders=graph_train_loader, verbose=False)
test_result = trainer.test(model, test_dataloaders=graph_test_loader, verbose=False)
result = {"test": test_result[0]['test_acc'], "train": train_result[0]['test_acc']}
return model, result
```
Finally, let's perform the training and testing. Feel free to experiment with different GNN layers, hyperparameters, etc.
```
model, result = train_graph_classifier(model_name="GraphConv",
c_hidden=256,
layer_name="GraphConv",
num_layers=3,
dp_rate_linear=0.5,
dp_rate=0.0)
print(f"Train performance: {100.0*result['train']:4.2f}%")
print(f"Test performance: {100.0*result['test']:4.2f}%")
```
The test performance shows that we obtain quite good scores on an unseen part of the dataset. It should be noted that as we have been using the test set for validation as well, we might have overfitted slightly to this set. Nevertheless, the experiment shows us that GNNs can be indeed powerful to predict the properties of graphs and/or molecules.
## Conclusion
In this tutorial, we have seen the application of neural networks to graph structures. We looked at how a graph can be represented (adjacency matrix or edge list), and discussed the implementation of common graph layers: GCN and GAT. The implementations showed the practical side of the layers, which is often easier than the theory. Finally, we experimented with different tasks, on node-, edge- and graph-level. Overall, we have seen that including graph information in the predictions can be crucial for achieving high performance. There are a lot of applications that benefit from GNNs, and the importance of these networks will likely increase over the next years.
| github_jupyter |
# Numerical Differentiation
Teng-Jui Lin
Content adapted from UW AMATH 301, Beginning Scientific Computing, in Spring 2020.
- Numerical differentiation
- First order methods
- Forward difference
- Backward difference
- Second order methods
- Central difference
- Other second order methods
- Errors
- `numpy` implementation
- Data differentiation by [`numpy.gradient()`](https://numpy.org/doc/stable/reference/generated/numpy.gradient.html)
## Numerical differentiation of known function
From the definition of derivative, the forward difference approximation is given by
$$
f'(x) = \dfrac{f(x+\Delta x) - f(x)}{\Delta x}
$$
The backward difference approximation is given by
$$
f'(x) = \dfrac{f(x) - f(x-\Delta x)}{\Delta x}
$$
The central difference approximation is given by
$$
f'(x) = \dfrac{f(x + \Delta x) - f(x-\Delta x)}{2\Delta x}
$$
which is the average of forward and backward difference.
Forward and backward difference are $\mathcal{O}(\Delta x)$, or first order method. Central difference is $\mathcal{O}(\Delta x^2)$, being a second order method. Note that we also have second order method at the left and right end points:
$$
f'(x) = \dfrac{-3f(x) + 4f(x+\Delta x) - 4f(x+2\Delta x)}{2\Delta x}
$$
$$
f'(x) = \dfrac{3f(x) - 4f(x-\Delta x) + 4f(x-2\Delta x)}{2\Delta x}
$$
### Implementation
**Problem Statement.** Find the derivative of the function
$$
f(x) = \sin x
$$
using analytic expression, forward difference, backward difference, and central difference. compare their accuracy using a plot.
```
import numpy as np
import matplotlib.pyplot as plt
# target function
f = lambda x : np.sin(x)
df = lambda x : np.cos(x) # analytic for comparison
x = np.arange(0, 2*np.pi, 0.1)
def forward_diff(f, x, dx):
return (f(x + dx) - f(x))/dx
def backward_diff(f, x, dx):
return (f(x) - f(x - dx))/dx
def central_diff(f, x, dx):
return (f(x + dx) - f(x - dx))/(2*dx)
dx = 0.1
forward_df = forward_diff(f, x, dx)
backward_df = backward_diff(f, x, dx)
central_df = central_diff(f, x, dx)
# plot settings
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
plt.rcParams.update({
'font.family': 'Arial', # Times New Roman, Calibri
'font.weight': 'normal',
'mathtext.fontset': 'cm',
'font.size': 18,
'lines.linewidth': 2,
'axes.linewidth': 2,
'axes.spines.top': False,
'axes.spines.right': False,
'axes.titleweight': 'bold',
'axes.titlesize': 18,
'axes.labelweight': 'bold',
'xtick.major.size': 8,
'xtick.major.width': 2,
'ytick.major.size': 8,
'ytick.major.width': 2,
'figure.dpi': 80,
'legend.framealpha': 1,
'legend.edgecolor': 'black',
'legend.fancybox': False,
'legend.fontsize': 14
})
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(x, df(x), label='Analytic', color='black')
ax.plot(x, forward_df, '--', label='Forward')
ax.plot(x, backward_df, '--', label='Backward')
ax.plot(x, central_df, '--', label='Central')
ax.set_xlabel('$x$')
ax.set_ylabel('$f\'(x)$')
ax.set_title('Numerical differentiation methods')
ax.set_xlim(0, 2*np.pi)
ax.set_ylim(-1, 1)
ax.legend()
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(x, df(x), label='Analytic', color='black')
ax.plot(x, forward_df, '--', label='Forward')
ax.plot(x, backward_df, '--', label='Backward')
ax.plot(x, central_df, '--', label='Central')
ax.set_xlabel('$x$')
ax.set_ylabel('$f\'(x)$')
ax.set_title('Numerical differentiation methods')
ax.set_xlim(1.5, 2.5)
ax.set_ylim(-0.9, 0.5)
ax.legend()
```
### Error and method order
**Problem Statement.** Compare the error of forward difference, backward difference, and central difference with analytic derivative of the function
$$
f(x) = \sin x
$$
Compare the error of the methods using a plot.
```
# target function
f = lambda x : np.sin(x)
df = lambda x : np.cos(x) # analytic for comparison
x = np.arange(0, 2*np.pi, 0.1)
dx = np.array([0.1 / 2**i for i in range(5)])
forward_errors = np.zeros(len(dx))
backward_errors = np.zeros(len(dx))
central_errors = np.zeros(len(dx))
for i in range(len(dx)):
forward_df = forward_diff(f, x, dx[i])
backward_df = backward_diff(f, x, dx[i])
central_df = central_diff(f, x, dx[i])
forward_errors[i] = np.linalg.norm(df(x) - forward_df)
backward_errors[i] = np.linalg.norm(df(x) - backward_df)
central_errors[i] = np.linalg.norm(df(x) - central_df)
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(dx, forward_errors, '.-', label='Forward')
ax.plot(dx, backward_errors, 'o-.', label='Backward', alpha=0.5)
ax.plot(dx, central_errors, 'o--', label='Central', alpha=0.8)
ax.set_xlabel('$dx$')
ax.set_ylabel('Error')
ax.set_title('Error of numerical methods')
# ax.set_xlim(1.5, 2.5)
# ax.set_ylim(-0.9, 0.5)
ax.legend()
```
## Numerical differentiation of data
### Implementation
**Problem Statement.** The Gaussian function has the form
$$
f(x) = \dfrac{1}{\sqrt{2\pi\sigma^2}}\exp\left(-\dfrac{(x-\mu)^2}{2\sigma^2}\right)
$$
(a) Generate an equidistant Gaussian dataset of such form in the domain $[0, 5]$ with $\sigma = 1, \mu = 2.5$.
(b) Find the numerical derivative of the data points using second order methods and [`numpy.gradient()`](https://numpy.org/doc/stable/reference/generated/numpy.gradient.html). Plot the data and the derivative.
```
# generate data
gaussian = lambda x, sigma, mu : 1/np.sqrt(2*np.pi*sigma**2) * np.exp(-(x - mu)**2 / (2*sigma**2))
gaussian_data_x = np.linspace(0, 5, 50)
gaussian_data_y = np.array([gaussian(i, 1, 2.5) for i in gaussian_data_x])
def numerical_diff(data_x, data_y):
'''
Numerically differentiate given equidistant data points.
Central difference is used in middle.
Second order forward and backward difference used at end points.
:param data_x: x-coordinates of data points
:param data_y: y-coordinates of data points
:returns: numerical derivative of data points
'''
df = np.zeros_like(data_x)
dx = data_x[1] - data_x[0] # assume equidistant points
df[0] = (-3*data_y[0] + 4*data_y[1] - data_y[2])/(2*dx)
df[-1] = (3*data_y[-1] - 4*data_y[-2] + data_y[-3])/(2*dx)
df[1:-1] = (data_y[2:] - data_y[0:-2])/(2*dx)
return df
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(gaussian_data_x, gaussian_data_y, 'o', label='Data points')
ax.plot(gaussian_data_x, numerical_diff(gaussian_data_x, gaussian_data_y), '.', label='Derivative')
ax.set_xlabel('$x$')
ax.set_ylabel('$f(x), f\'(x)$')
ax.set_title('Numerical differentiation of data')
# ax.set_xlim(1.5, 2.5)
# ax.set_ylim(-0.9, 0.5)
ax.legend()
```
### Numerical differentiation of data with `numpy`
[`numpy.gradient()`](https://numpy.org/doc/stable/reference/generated/numpy.gradient.html) has similar implementation as above. It uses central difference in the middle, and forward and backward differences at the end points.
```
dx = gaussian_data_x[1] - gaussian_data_x[0]
gaussian_df = np.gradient(gaussian_data_y, dx)
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(gaussian_data_x, gaussian_data_y, 'o', label='Data points')
ax.plot(gaussian_data_x, gaussian_df, '.', label='Derivative')
ax.set_xlabel('$x$')
ax.set_ylabel('$f(x), f\'(x)$')
ax.set_title('Numerical differentiation of data')
# ax.set_xlim(1.5, 2.5)
# ax.set_ylim(-0.9, 0.5)
ax.legend()
```
| github_jupyter |
```
from typing import List
from collections import defaultdict
from functools import lru_cache
class Solution:
def catMouseGame(self, graph: List[List[int]]) -> int:
@lru_cache(None)
def dfs(mouse, cat, step):
if step > len(graph) * 2:
return 0
if cat == mouse:
return 2
if mouse == 0:
return 1
if step % 2 == 0:# 老鼠行动
draw = False
for nm in graph[mouse]:
ans = dfs(nm, cat, step + 1)
if ans == 0:
draw = True
elif ans == 1:
return 1
return 0 if draw else 2
else:
draw = False
for nc in graph[cat]:
if nc == 0:
continue
ans = dfs(mouse, nc, step + 1)
if ans == 0:
draw = True
elif ans == 2:
return 2
return 0 if draw else 1
return dfs(1, 2, 0)
from typing import List
from collections import defaultdict, deque
class Solution:
def catMouseGame(self, graph: List[List[int]]) -> int:
# 用 已知 推 未知
def getPreState(m, c, t):
pos = []
if t == 1: # 如果当前轮到老鼠走,那么前一步是轮到猫走
for nc in graph[c]:
if nc == 0:
continue
pos.append((m, nc, 2))
else:
for nm in graph[m]:
pos.append((nm, c, 1))
return pos
def mustLoss(m, c, t):
if t == 1:
for nm in graph[m]:
if res[(nm, c, 2)] != 2:
return False
else:
for nc in graph[c]:
if nc == 0:
continue
if res[(m, nc, 1)] != 1:
return False
return True
res = defaultdict(int) # 记录每个状态的输赢
queue = deque()
n = len(graph)
for t in range(1, 3): # 1: mouse move, 2: cat move
for i in range(1, n):
res[(0, i, t)] = 1 # 如果是1,就是老鼠赢
queue.append((0, i, t))
res[(i, i, t)] = 2 # 如果是2,就是猫赢
queue.append((i, i, t))
while queue:
m, c, t = queue.popleft()
ans = res[(m, c, t)] # 当前状态的结果,是已经知道谁赢谁输的
for pre in getPreState(m, c, t): # 用当前的状态推导之前的状态
m2, c2, t2 = pre # 先前的一个状态
if res[pre] != 0:
continue
if ans == t2:
res[pre] = ans
queue.append(pre)
elif mustLoss(m2, c2, t2):
res[pre] = 3 - t2
queue.append(pre)
return res[(1, 2, 1)] # 初始位置为(1,2,1)
solution = Solution()
solution.catMouseGame([[1,3],[0],[3],[0,2]])
```
| github_jupyter |
```
import os
os.chdir("../../scVI/")
os.getcwd()
import torch
import pickle
import seaborn as sns
import numpy as np
import pandas as pd
from umap import UMAP
from sklearn.cluster import SpectralClustering
from scvi.inference import UnsupervisedTrainer
from scvi.models import VAE
save_path = '../CSF/Notebooks/'
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
%matplotlib inline
from random import sample
from numpy.random import permutation
%matplotlib inline
celllabels = np.load(save_path + 'meta/celllabels.npy')
isCD4 = celllabels=='CD4'
clusters = np.load(save_path + 'meta/CD4.clusters.npy')
isMS = np.load(save_path+'meta/isMS.npy')[isCD4]
isCSF = np.load(save_path+'meta/isCSF.npy')[isCD4]
def ES_fast(score, s , p, interval):
N = len(s)
N_H = np.sum(s==1)
m = 1/(N-N_H)
power = np.abs(score)**p
N_R = np.sum(power[s==1])
h = power / N_R
ES = [0]
hit = 0
miss = 0
for i in np.arange(0, (len(power)-interval),interval):
x = np.arange(i,i+interval,1)
si = s[x]
hit = hit + np.sum(h[x][si==1])
miss = miss + m*np.sum(si==0)
ES.append(hit-miss)
return(ES)
VisionScore = pd.read_csv('../CSF/signatures/sigScore.csv')
s = isMS[isCSF==True]
score = np.asarray(VisionScore['TFH'])[isCSF==True]
ranked = pd.DataFrame(np.asarray([s, score]).T,columns=['s','score'])
ranked = ranked.sample(frac=1)
ranked = ranked.sort_values(by='score',ascending=False)
s = np.asarray(ranked['s'])
score = np.asarray(ranked['score'])
sns.distplot(score[s==False], kde=True, rug=False,color='green',label='control')
sns.distplot(score[s==True], kde=True, rug=False,color='orange',label='MS')
# plt.legend()
plt.savefig(save_path+'figures/SupFigure8/scoredist.MSinCSF.TFH.pdf')
from random import sample
scorerank = np.argsort(np.argsort(score))
sns.rugplot(sample(list(scorerank[s==True]),1000), label = 'MS', color='orange', linewidth = 0.1)
sns.rugplot(sample(list(scorerank[s==False]),1000), label = 'Control', color = 'green', linewidth = 0.1)
plt.axis('off')
# plt.legend()
plt.savefig(save_path+'figures/SupFigure8/rugplot.MSinCSF.TFH.pdf')
ES = ES_fast(score,s,1,1)
from random import sample
control = [ES_fast(score,np.asarray(sample(list(s),len(s))),1,1) for i in range(100)]
control_score = pd.read_csv('../CSF/signatures/sigScore.TFH.matched.csv')
control_score = control_score.loc[isCSF==True]
control_score = control_score[control_score.columns[1:]]
control_score.loc[:5]
control2 = []
for x in control_score.columns:
score = control_score[x]
ranked = pd.DataFrame(np.asarray([s, score]).T,columns=['s','score'])
ranked = ranked.sample(frac=1)
ranked = ranked.sort_values(by='score',ascending=False)
s = np.asarray(ranked['s'])
score = np.asarray(ranked['score'])
control2.append(ES_fast(score,s,1,1))
plt.plot(np.arange(len(ES)),ES,'r-')
for i in range(100):
plt.plot(np.arange(len(ES)),control2[i],'b-',alpha=0.1)
plt.axvline(x=np.argmax(ES),color='black')
# plt.title("Diseased Cell Set")
plt.xlabel("Rank in Ordered Dataset")
plt.ylabel("Enrichment Score (ES)")
plt.savefig(save_path+'figures/SupFigure8/ES.MSinCSF.TFH.pdf')
with open(save_path + 'CSEA/TFH.MSinCSF.pkl', 'wb') as f:
pickle.dump((ES,control2), f)
```
### Pvalue
```
np.mean(np.asarray([np.max(x) for x in control2[1:]]) > np.max(ES))
```
### Leading Edge
```
np.argmax(ES)
```
# in blood
```
s = isMS[isCSF==False]
score = np.asarray(VisionScore['TFH'])[isCSF==False]
ranked = pd.DataFrame(np.asarray([s, score]).T,columns=['s','score'])
ranked = ranked.sample(frac=1)
ranked = ranked.sort_values(by='score',ascending=False)
s = np.asarray(ranked['s'])
score = np.asarray(ranked['score'])
sns.distplot(score[s==False], kde=True, rug=False,color='green',label='control')
sns.distplot(score[s==True], kde=True, rug=False,color='orange',label='MS')
# plt.legend()
plt.savefig(save_path+'figures/SupFigure8/scoredist.MSinPBMC.TFH.pdf')
scorerank = np.argsort(np.argsort(score))
sns.rugplot(sample(list(scorerank[s==True]),1000), label = 'MS', color='orange', linewidth = 0.1)
sns.rugplot(sample(list(scorerank[s==False]),1000), label = 'Control', color = 'green', linewidth = 0.1)
plt.axis('off')
# plt.legend()
plt.savefig(save_path+'figures/SupFigure8/rugplot.MSinPBMC.TFH.pdf')
ES = ES_fast(score,s,1,1)
from random import sample
control = [ES_fast(score,np.asarray(sample(list(s),len(s))),1,1) for i in range(100)]
control_score = pd.read_csv('../CSF/signatures/sigScore.TFH.matched.csv')
control_score = control_score.loc[isCSF==False]
control_score = control_score[control_score.columns[1:]]
control2 = []
for x in control_score.columns:
score = control_score[x]
ranked = pd.DataFrame(np.asarray([s, score]).T,columns=['s','score'])
ranked = ranked.sample(frac=1)
ranked = ranked.sort_values(by='score',ascending=False)
s = np.asarray(ranked['s'])
score = np.asarray(ranked['score'])
control2.append(ES_fast(score,s,1,1))
plt.plot(np.arange(len(ES)),ES,'r-')
for i in range(100):
plt.plot(np.arange(len(ES)),control2[i],'b-',alpha=0.1)
plt.axvline(x=np.argmax(ES),color='black')
# plt.title("Diseased Cell Set")
plt.xlabel("Rank in Ordered Dataset")
plt.ylabel("Enrichment Score (ES)")
plt.savefig(save_path+'figures/SupFigure8/ES.MSinPBMC.TFH.pdf')
```
# Significance value
0.889 in blood
0.002 in CSF
```
np.mean(np.asarray([np.max(x) for x in control2]) > np.max(ES))
```
### Leading Edge
```
np.argmax(ES)
```
# show TFH cell origins
```
latent_u = np.load(save_path + 'UMAP/all_dataset.umap.npy')
celllabels = np.load(save_path + 'meta/celllabels.npy')
celltype, labels = np.unique(celllabels, return_inverse=True)
isMS = np.load(save_path+'meta/isMS.npy')
isCSF = np.load(save_path+'meta/isCSF.npy')
validclusters = (celllabels!='Mono Doublet') & \
(celllabels!='contamination1') & \
(celllabels!='doublet') & \
(celllabels!='B cell doublets') & \
(celllabels!='RBC')
isCD4 = (celllabels=='CD4')
latent_u = latent_u[celllabels=='CD4',:]
isMS = isMS[celllabels=='CD4']
isCSF = isCSF[celllabels=='CD4']
edgethres = np.quantile(VisionScore['TFH'],(1-(587+135)/25105))
TFH = np.asarray(VisionScore['TFH']>edgethres)
len(isCSF)
fig, ax = plt.subplots(figsize=(5, 5),facecolor='white')
plt.scatter(latent_u[:, 0], latent_u[:, 1],c='lightgray',s=5)
plt.scatter(latent_u[:, 0][TFH & isCSF], latent_u[:, 1][TFH & isCSF],c='orange',s=3,label='CSF')
plt.scatter(latent_u[:, 0][TFH & (isCSF==False)], latent_u[:, 1][TFH & (isCSF==False)],c='green',s=3,label='PBMC')
plt.title('TFH',fontsize=30)
plt.axis("off")
# plt.legend()
plt.tight_layout()
plt.savefig(save_path+'figures/SupFigure8/TFH.CSF_PBMC.pdf')
fig, ax = plt.subplots(figsize=(5, 5),facecolor='white')
plt.scatter(latent_u[:, 0], latent_u[:, 1],c='lightgray',s=5)
plt.scatter(latent_u[:, 0][TFH & isCSF & isMS], latent_u[:, 1][TFH & isCSF & isMS],c='orange',s=3,label='MS')
plt.scatter(latent_u[:, 0][TFH & isCSF & (isMS==False)], latent_u[:, 1][TFH & isCSF & (isMS==False)],c='green',s=3,label='control')
plt.title('TFH',fontsize=30)
plt.axis("off")
# plt.legend()
plt.tight_layout()
plt.savefig(save_path+'figures/SupFigure8/TFH.MSinCSF.pdf')
fig, ax = plt.subplots(figsize=(5, 5),facecolor='white')
plt.scatter(latent_u[:, 0], latent_u[:, 1],c='lightgray',s=5)
plt.scatter(latent_u[:, 0][TFH & (isCSF==False) & isMS], latent_u[:, 1][TFH & (isCSF==False) & isMS],c='orange',s=3,label='MS')
plt.scatter(latent_u[:, 0][TFH & (isCSF==False) & (isMS==False)], latent_u[:, 1][TFH & (isCSF==False) & (isMS==False)],c='green',s=3,label='control')
plt.title('TFH',fontsize=30)
plt.axis("off")
# plt.legend()
plt.tight_layout()
plt.savefig(save_path+'figures/SupFigure8/TFH.MSinPBMC.pdf')
```
| github_jupyter |
```
import sys
sys.path.append('/home/bibek/projects/DEEPL')
from helpers.deep import get_deep_data, get_classifier
#data = get_deep_data(debug=False, filepath='/home/bibek/projects/DEEPL/_playground/sample_data/nlp_out.csv')
import pandas as pd
df = pd.read_csv('/home/bibek/projects/DEEPL/_playground/sample_data/processed_sectors_subsectors.csv')
#################################################
### COMPARE KEYWORDS EXTRACTION VS SIMPLE method
#################################################
# create a function to extract keywords from document(will return list)
def keywords_extracter(doc):
ngrams = get_key_ngrams(doc, 3)
allwords = {}
for x in ngrams['1grams']:
allwords[x[0]] = True
for y in ngrams['2grams']:
a,b = y[0].split()
allwords[a] = True
allwords[b] = True
for y in ngrams['3grams']:
a,b,c = y[0].split()
allwords[a] = True
allwords[b] = True
allwords[c] = True
return list(allwords.keys())
# convert doc to str and then split
str_split = compose(str.split, str)
# stemmer
stemmer = PorterStemmer()
# split and stem
#split_and_stem = lambda x: list(map(stemmer.stem,x))
rm_stop_list = curried_map(rm_stop_words_txt)
rm_punc_list = curried_map(rm_punc_not_nums)
rm_punc_nums_list = curried_map(remove_punc_and_nums)
lower_list = curried_map(str.lower)
composed_list_processor= compose(remove_punc_and_nums, rm_stop_words_txt, str.lower)
# rm_punc_nums_processor
punc_nums_preprocessor = compose(list, curried_filter(lambda x: x.strip()!=''), curried_map(composed_list_processor), str.split, str)
processed = df['excerpt'].apply(punc_nums_preprocessor)
print(processed)
assert False
# keywords pre processor
kw_preprocessor = compose(list, rm_punc_list , rm_stop_list,lower_list,keywords_extracter,str)
# simple pre processor
simple_preprocessor = compose(list, rm_punc_list , rm_stop_list,lower_list,str.split,str)
kw_processed = [(kw_preprocessor(ex), l) for (ex, l) in data] # if langid.classify(str(ex))[0] == 'en']
simple_processed = [(simple_preprocessor(ex), l) for (ex, l) in data] # if langid.classify(str(ex))[0] == 'en']
punc_nums_processed = [(punc_nums_preprocessor(ex), l) for (ex, l) in data] # if langid.classify(str(ex))[0] == 'en']
from classifier.feature_selectors import UnigramFeatureSelector, BigramFeatureSelector
from classifier.NaiveBayes_classifier import NaiveBayesClassifier
#processed_data = kw_processed
#processed_data = simple_processed
def get_avg_accuracy(iters, size, processed_data, feature_selector=UnigramFeatureSelector):
sum_accuracy = 0
accuracies = []
for x in range(iters):
random.shuffle(processed_data)
processed_data = processed_data[:size]
data_len = len(processed_data)
test_len = int(data_len * 0.25)
train_data = processed_data[test_len:]
test_data = processed_data[:test_len]
selector = feature_selector.new(corpus=processed_data, top=2000) # use top 2000 words
classifier = NaiveBayesClassifier.new(selector, train_data)
accuracy = classifier.get_accuracy(test_data)
accuracies.append(accuracy)
sum_accuracy += accuracy
return sum_accuracy/iters, accuracies
SIZE = 200
ITERS = 2
kw_accuracy = get_avg_accuracy(ITERS, SIZE, kw_processed)
punc_nums_accuracy = get_avg_accuracy(ITERS, SIZE, punc_nums_processed)
simple_accuracy = get_avg_accuracy(ITERS, SIZE, simple_processed)
simple_bigram_accuracy = get_avg_accuracy(ITERS, SIZE, simple_processed, BigramFeatureSelector)
print('KEYWORDS:', kw_accuracy)
print('PUNC NUMS:', punc_nums_accuracy)
print('SIMPLE: ', simple_accuracy)
print('SIMPLE BIGRAM: ', simple_bigram_accuracy)
```
| github_jupyter |
#### Xl Juleoriansyah Nksrsb / 13317005
#### Muhamad Asa Nurrizqita Adhiem / 13317018
#### Oktoni Nur Pambudi / 13317022
#### Bernardus Rendy / 13317041
# Definisi Masalah
#### Dalam tangki dengan luas permukaan A, luas luaran a, dalam percepatan gravitasi g [Parameter A,a,g]
#### Diisi dengan flow fluida Vin (asumsi fluida incompressible) sehingga terdapat ketinggian h [Variabel Input Vin dan output h]
#### Akan memiliki luaran $V_{out}$ dengan
$V_{out} = a \sqrt{2gh} $
#### Sehingga akan didapat hubungan persamaan diferensial non-linear
$ \frac {dh}{dt} = \frac {V_{in}}{A} - \frac {a}{A}\sqrt{2gh}$
<img src="./dinsis_nonlinear.png" style="width:50%;">
#### Sumber Gambar: Slide Kuliah Dinamika Sistem dan Simulasi (Eko M. Budi & Estiyanti Ekawati) System Modeling: Fluidic Systems
```
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
def dhdt_non(h,t,Vin,A,a,g):
return (Vin/A)-(a/A)*np.sqrt(2*g*h)
# initial condition
h0 = 0
# Parameter
A = 1
g = 9.8
Vin =100
a = 1
# time points
t = np.linspace(0,100)
# solve ODEs
hnon = odeint(dhdt_non,h0,t,args=(Vin,A,a,g))
# plot results
plt.plot(t,hnon,'r-',linewidth=2,label='h_non_linear')
plt.xlabel('time')
plt.ylabel('h(t)')
plt.legend()
plt.show()
```
# Alternatif Penyelesaian: Linearisasi
#### Dalam sebuah persamaan diferensial non-linear, sulit ditentukan fungsi transfer (karena h dalam akar sehingga tidak dapat dikeluarkan dengan mudah) dan penyelesaian analitik tanpa menggunakan numerik sehingga dibentuk suatu metode bernama linearisasi. Linearisasi juga mengimplikasi operasi matematika yang jauh lebih mudah
#### Linearisasi menggunakan ekspansi taylor orde 1 untuk mengubah persamaan diferensial $ \frac {dh(t)}{dt} = \frac {q_i(t)}{A} - \frac {a}{A}\sqrt{2gh(t)}$ menjadi linear
<img src="./dinsis_linear1.png" style="width:50%">
#### Sumber Gambar: Slide Kuliah Dinamika Sistem dan Simulasi (Eko M. Budi & Estiyanti Ekawati) System Modeling: Fluidic Systems
#### Menghasilkan (dengan catatan qi adalah Vin)
# $ \frac {dh}{dt} - \frac {d\bar h}{dt} = \frac {V_{in}- \bar {V_{in}}}{A} - (\frac {a \sqrt {2g}}{2A \sqrt {\bar h}})(h - \bar h) $
#### Setelah linearisasi, dihasilkan persamaan diferensial linear yang dapat beroperasi dekat $ \bar h $
#### Secara sederhana, ditulis
# $ \frac {d\hat h}{dt} = \frac {\hat {V_{in}}}{A} - \frac{\hat h}{R} $
#### Dimana
### $ \hat h = h-\bar h $
### $ \hat {V_{in}} = V_{in} - \bar {V_{in}} $
### $ R=\frac {A \sqrt {2 \bar {h}}}{a \sqrt{g}} $
#### Sehingga harus dipilih kondisi dimana $ \bar h $ sesuai untuk daerah operasi persamaan
#### Terlihat bahwa persamaan digunakan pada 0 hingga steady state, saat steady state
# $ \frac {dh}{dt} = 0 $
#### Berdasarkan persamaan
# $ \frac {dh}{dt} = \frac {V_{in}}{A} - \frac {a}{A}\sqrt{2gh}$
# $ 0 = V_{in} - a \sqrt{2g\bar {h}} $
# $ \bar {h} = \frac {V_{in}^2}{2ga^2} $
#### Juga harus dipilih kondisi dimana $ \bar V_{in} $ sesuai untuk daerah operasi persamaan
#### Terlihat bahwa jika input merupakan fungsi step,
# $ \bar V_{in} = V_{in} $
#### Karena $ V_{in} $ konstan, maka pada kondisi akhir dimana $ \bar V_{in} $ beroperasi, juga tetap sama dengan $ V_{in} $
#### Menggunakan persamaan yang sebelumnya telah diturunkan
# $ \frac {d\hat h}{dt} = \frac {\hat {V_{in}}}{A} - \frac{\hat h}{R} $
#### Dimana
### $ \hat h = h-\bar h $
### $ \hat {V_{in}} = V_{in} - \bar {V_{in}} $
### $ R=\frac {A \sqrt {2 \bar {h}}}{a \sqrt{g}} $
```
def dhhatdt_lin(hhat,t,Vinhat,A,a,g,R):
return (Vinhat/A)-(hhat/R)
# Initial condition
h0 = 0
# Input
Vin=100
# Parameter
A = 1
g = 9.8
a = 1
hbar = Vin**2/(2*g*a**2)
R=(A*np.sqrt(2*hbar))/(a*np.sqrt(g))
hhat0 = h0-hbar
Vinbar= Vin
Vinhat= Vin-Vinbar
# time points
t = np.linspace(0,100)
# solve ODEs, karena hasil ODE yang didapat adalah untuk hhat, maka harus dilakukan penambahan hbar karena h = hhat+hbar
hlin = odeint(dhhatdt_lin,hhat0,t,args=(Vinhat,A,a,g,R))
hlin = hlin+hbar
# plot results
plt.plot(t,hlin,'b-',linewidth=2,label='h_linear')
plt.xlabel('time')
plt.ylabel('h(t)')
plt.legend()
plt.show()
```
# Perbandingan Non-linear dan Linearisasi
```
plt.plot(t,hnon,'r-',linewidth=2,label='h_non_linear')
plt.plot(t,hlin,'b-',linewidth=2,label='h_linear')
plt.xlabel('time')
plt.ylabel('h(t)')
plt.legend()
plt.show()
```
#### Terlihat perbedaan respon sistem ketika dilakukan aproksimasi linearisasi terhadap dhdt
#### Walaupun terjadi perbedaan, perbedaan tersebut kurang signifikan pada sistem ini dengan Sum Squared Error sebesar:
```
err=hnon-hlin
err=err*err
sum(err)
```
# Interface Parameter
```
from ipywidgets import interact,fixed,widgets
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from ipywidgets import interact,fixed,widgets,Button,Layout
def dhhatdt_lin(hhat,t,Vinhat,A,a,g,R):
return (Vinhat/A)-(hhat/R)
def dhdt_non(h,t,Vin,A,a,g):
return (Vin/A)-(a/A)*np.sqrt(2*g*h)
g = 9.8
range_A = widgets.FloatSlider(
value=2.,
min=1.,
max=10.0,
step=0.1,
description='Luas Alas Tangki ($dm^2$):',
layout=Layout(width='80%', height='50px'),
style={'description_width': '200px'},
readout_format='.1f',
)
range_a = widgets.FloatSlider(
value=2.,
min=0.1, max=+3., step=0.1,
description='Luas Pipa ($dm^2$) :',
layout=Layout(width='80%', height='50px'),
style={'description_width': '200px'},
readout_format='.1f',
)
range_Vin = widgets.FloatSlider(
value= 2.,
min=0.1,
max=100.0,
step=0.1,
description='Debit Fluida Masuk ($dm^2 / s$)',
layout=Layout(width='80%', height='50px'),
style={'description_width': '200px'},
readout_format='.1f',
)
range_h0 = widgets.FloatSlider(
value= 2.,
min=0.,
max=500.0,
step=0.1,
description='Ketinggian Mula-Mula ($dm$):',
layout=Layout(width='80%', height='50px'),
style={'description_width': '200px'},
readout_format='.1f',
)
range_amplitude = widgets.FloatSlider(
value= 2.,
min=0.,
max=100.0,
step=0.1,
description='Amplituda Gangguan Sinusoidal:',
layout=Layout(width='80%', height='50px'),
style={'description_width': '200px'},
readout_format='.1f',
)
time_slider = widgets.IntSlider(
min=100, max=1000, step=1, value=100,
description='Waktu Maksimum ($s$):',
layout=Layout(width='80%', height='50px'),
style={'description_width': '200px'},
readout_format='.1f',
)
max_err_button = widgets.Button(
description='Error Maksimum',
)
max_err_sin_button = widgets.Button(
description='Error Maksimum Sinusoidal',
)
min_err_button = widgets.Button(
description='Error Minimum',
)
tab1 = widgets.VBox(children=[range_A,range_a,range_Vin,range_h0,time_slider,max_err_button,min_err_button])
tab2 = widgets.VBox(children=[range_A,range_a,range_Vin,range_h0,range_amplitude,time_slider,max_err_sin_button,min_err_button])
tab = widgets.Tab(children=[tab1, tab2])
tab.set_title(0, 'Step')
tab.set_title(1, 'GangguanSinusoidal')
A = range_A.value
a = range_a.value
Vin = range_Vin.value
h0 = range_h0.value
tmax = time_slider.value
amp = range_amplitude.value
#Max error untuk step
def max_err_set(b=None):
range_A.value=10.0
range_a.value=0.1
range_Vin.value=100
range_h0.value=0
time_slider.value=1000
@max_err_button.on_click
def maximum_err_set(b):
max_err_set()
#Max error untuk sinusoidal
def max_err_sin_set(b=None):
range_A.value=10.0
range_a.value=2.9
range_Vin.value=100
range_h0.value=0
time_slider.value=150
range_amplitude.value=100
@max_err_sin_button.on_click
def maximum_err_sin_set(b):
max_err_sin_set()
#Min error untuk step dan sinusoidal
def min_err_set(b=None):
range_A.value=1.0
range_a.value=2.9
range_Vin.value=100
range_h0.value=50
time_slider.value=100
range_amplitude.value=0
@min_err_button.on_click
def minimum_err_set(b):
min_err_set()
def plot3(A,a,Vin,h0,amp,tmax):
t = np.linspace(50,tmax,1000)
f, ax = plt.subplots(1, 1, figsize=(8, 6))
if tab.selected_index == 1 :
def dhdt_non_sin(h,t,Vin,A,a,g,amp):
return ((Vin+abs(amp*np.sin(np.pi*t)))/A)-(a/A)*np.sqrt(2*g*h)
def dhhatdt_lin_sin(hhat,t,Vin,A,a,g,amp):
V=Vin+abs(amp*np.sin(np.pi*t))
R=(A*np.sqrt(2*hbar))/(a*np.sqrt(g))
Vinbar=Vin
Vinhat=V-Vinbar
return ((Vinhat/A)-(hhat/R))
hbar = Vin**2/(2*g*a**2)
hhat0 = h0-hbar
hlin = odeint(dhhatdt_lin_sin,hhat0,t,args=(Vin,A,a,g,amp))
hlin = hlin+hbar
hnon = odeint(dhdt_non_sin,h0,t,args=(Vin,A,a,g,amp))
ax.plot(t, hlin , color = 'blue', label ='linier')
ax.plot(t, hnon , color = 'red', label ='non-linier')
ax.title.set_text('Input Step dengan Gangguan Sinusoidal')
ax.legend()
if tab.selected_index == 0 :
hbar = Vin**2/(2*g*a**2)
R=(A*np.sqrt(2*hbar))/(a*np.sqrt(g))
hhat0 = h0-hbar
Vinbar= Vin
Vinhat= Vin-Vinbar
hlin = odeint(dhhatdt_lin,hhat0,t,args=(Vinhat,A,a,g,R))
hlin = hlin+hbar
hnon = odeint(dhdt_non,h0,t,args=(Vin,A,a,g))
ax.plot(t, hlin , color = 'blue' , label ='linier')
ax.plot(t, hnon , color = 'red', label='non-linier')
ax.title.set_text('Input Step')
ax.legend()
ui = tab
out = widgets.interactive_output(plot3,{'A':range_A,'a':range_a,'Vin':range_Vin,'h0':range_h0,'amp':range_amplitude,'tmax':time_slider})
display(ui,out)
```
# Pembahasan
Dari grafik di atas: kurva biru (linear) dan merah (non linear), dapat dilihat bahwa kurva merah dan biru tersebut terkadang sama atau hampir berhimpit yang berarti error antara linear dan non-linear kecil, namun terkadang juga tidak berhimpit dan error antara linear dan non-linear menjadi besar. Dapat digunakan interaksi untuk meninjau efek perubahan parameter terhadap model respon sistem yang dibentuk dengan persamaan non-linear dan linear. Untuk dapat melihat perbedaan respon persamaan linar dan persamaan nonlinear serta menentukan keterbatasan persamaan hasil linearisasi, kita akan membuat error tersebut agar menjadi besar. Untuk error maksimum atau minimum dapat digunakan tombol "error maksimum" dan "error minimum". Adapun cara yang dilakukan adalah:
#### 1) Memperkecil ketinggian awal (h0) dari fluida di tabung, sehingga rentang h0 dan hfinal semakin besar
Hal ini akan menyebabkan h dan hbar memiliki perbedaan nilai yang besar saat proses transien. Ketika rentang h0 dan hfinal membesar, pada saat respon belum steady, h dan hbar akan semakin menjauh karena nilai hbar yang diambil adalah saat keadaan steady.
#### 2) Meningkatkan luas alas tabung (A)
Untuk nilai A, semakin besar A, maka akan semakin lama keadaan steady tercapai. Maka semakin lama proses h menuju hbar pada steady state, sehingga error semakin besar.
#### 3) Mengecilkan luas pipa luaran (a) [saat respon sisem sedang meningkat]
#### 4) Memperbesar luas pipa luaran (a) [saat respon sistem sedang menurun]
Kemudian untuk a, yang merupakan variabel penentu banyaknya fluida yang keluar dari tangki menentukan apakah respon tersebut menurun atau meningkat. Di faktor 2, 3, dan 4 ini kita juga mengetahui bahwa error akan terjadi saat keadaan transien akibat hbar diasumsikan saat keadaan steady. Saat respon sistem meningkat, jika a semakin kecil, perubahan
## $ \frac{dh}{dt} - \frac{\bar{dh}}{dt} $
semakin besar sehingga error semakin besar pada saat transien. Berlaku sebaliknya saat respon sistem sedang menurun.
#### 5) Vin meningkat (saat respon sedang meningkat)
#### 6) Vin menurun (saat respon sedang menurun)
Dari faktor 5 dan 6 dapat dilihat bahwa saat kita meningkatkan nilai Vin, kurva biru (linear) akan semakin memperlambat keadaan steady (kurva semakin ke kanan) yang berarti error dari linearisasi semakin besar. Hal ini berhubungan dengan asumsi hbar diambil saat keadaan steady.
#### 7) Amplitudo sinusoidal yang meningkat
Faktor 7 menjelaskan hubungan Vinbar dan Vin harus berbeda nilai sekecil mungkin dan harus sesuai dengan rentang kerja sistem.
| github_jupyter |
##### Copyright 2019 Qiyang Hu
```
#@title Licensed under MIT License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://huqy.github.io/idre_learning_machine_learning/LICENSE.md
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Useful Routines for Collab
## Start Collab with requesting a free GPU from Google Cloud
Edit -> Notebook settings -> Select GPU as Hardware accelerator
**OR**
1. Google Drive -> New -> More -> Connect more apps -> Collaboratory
2. Google Drive -> New -> More -> Collaboratory
3. Runtime -> Interrupt execution
4. Runtime -> Change runtime type -> Select GPU as Hardware accelerator
## Check the resources obtained from Collab
Google colab is a free to use Jupyter notebook , that allows you to use free Tesla T4 GPU it also gives you a total of 12 GB of ram , and you can use it up to 12 hours in row
```
!lsb_release -a
!uname -r
!lscpu | grep 'Model name'
!lscpu | grep 'Socket(s):'
!lscpu | grep 'Thread(s) per core'
!lscpu | grep "L3 cache"
!cat /proc/meminfo | grep 'MemAvailable'
!df -h / | awk '{print $4}'
!nvidia-smi
import tensorflow as tf
tf.test.gpu_device_name()
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
```
## Check the libs in Colab
```
import sys
print('The python version is', sys.version)
import sklearn
print('The scikit-learn version is {}.'.format(sklearn.__version__))
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
```
## Mounting Google Drive to Collab's /content/drive
```
from google.colab import drive
drive.mount('/content/drive')
```
## Using Kaggle API for Google Colaboratory
We need first to download the token from Kaggle:
1. Go to kaggle.com -> log in -> click "my account"
2. Scroll down to API and hit “Create New API Token.” It will prompt to download a file called **kaggle.json** to your local computer.
```
from google.colab import files
files.upload()
#!pip install -q kaggle
!mkdir -p /root/.kaggle
!cp kaggle.json /root/.kaggle
!chmod 600 /root/.kaggle/kaggle.json
!kaggle config set -n path -v{/content}
!kaggle competitions list -s titanic
!kaggle competitions download -c titanic -p /content
```
## Direct open a jupyter notebook with colab
1. Change URL from "https://github.com/..." to "https://colab.research.google.com/github/..."
2. OR: just use the "[Open in Colab](https://chrome.google.com/webstore/detail/open-in-colab/iogfkhleblhcpcekbiedikdehleodpjo)" Chrome extension
---
## References:
[1] https://www.kdnuggets.com/2018/02/google-colab-free-gpu-tutorial-tensorflow-keras-pytorch.html
[2] https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb#scrollTo=K-NVg7RjyeTk
| github_jupyter |
# Simple ray tracing
```
# setup path to ray_tracing package
import sys
sys.path.append('~/Documents/python/ray_tracing/')
import ray_tracing as rt
from matplotlib import rcParams
rcParams['figure.figsize'] = [8, 4]
import matplotlib.pyplot as plt
plt.ion()
```
## Principle
The package 'ray_tracing.py' provides you with the means to quickly plot an optical system. Currently included are Lenses (L) with and without an aperture and simple apertures (A), such as irises, that are separated by distances (D). All ray tracing is done according to the paraxial approximation: $\sin \alpha \approx \tan \alpha \approx \alpha$, the larger $\alpha$ the larger the error!
### Example 1: one lens
Lets look at one lens of focal length 100 mm, an object shall be placed 150 mm apart from it and we look at two rays, __[the marginal and the principle ray](https://en.wikipedia.org/wiki/Ray_(optics))__. A ray is given as a vector $(h, \varphi)$, where $h$ is the height of the starting point and $\varphi$ is the angle measured against the optical axis in rad.
```
osys = rt.OpticalSystem(' d150 | L100 | d500 ')
height_1 = 0.0; phi_1 = 0.005;
ray_1 = (height_1, phi_1)
height_2 = 1.0; phi_2 = -1/150;
ray_2 = (height_2, phi_2)
ax = osys.plot_statics()
osys.plot_ray(ray_1, label="marginal ray")
osys.plot_ray(ray_2, label="chief ray")
ax.legend()
```
You can see that the marginal ray (blue) crosses the optical axis again at 450 mm. This is where the image is formed. The height of the chief (orange) ray at that position is 2.0 mm. Lets check that:
```
rt.get_image_pos(object_distance=150, focal_length=100)
rt.get_image_size(object_size=1.0, object_distance=150, focal_length=100)
```
The image is formed 300 mm after the lens, hence at 450 mm and it's magnified twice.
### Example 2: two lens system
```
osys = rt.OpticalSystem(' d150 | L100 | d400 | L50 | d150 ')
height_1 = 0.0; phi_1 = 0.005;
ray_1 = (height_1, phi_1)
height_2 = 1.0; phi_2 = -1/150;
ray_2 = (height_2, phi_2)
ax = osys.plot_statics()
osys.plot_ray(ray_1, label="marginal ray")
osys.plot_ray(ray_2, label="meridional ray")
ax.legend();
```
### Example 3: two lens system with apertures
Let's now consider an optical sytem with lenses of finite size. Apertures of lenses can be added by '/' following a number. Apertures of lenses are plotted as thick black lines.
```
osys = rt.OpticalSystem(' d150 | L100/1 | d400 | L50/2 | d150 ')
height_1 = 0.0; phi_1 = 0.005;
ray_1 = (height_1, phi_1)
height_2 = 1.0; phi_2 = -1/150;
ray_2 = (height_2, phi_2)
height_2 = 1.0; phi_2 = -1/150;
ray_2 = (height_2, phi_2)
height_3 = 0.5; phi_3 = -0.5/150;
ray_3 = (height_3, phi_3)
ax = osys.plot_statics()
osys.plot_ray(ray_1, label="marginal ray")
osys.plot_ray(ray_2, label="chief ray")
osys.plot_ray(ray_3, label="meridional ray")
ax.legend();
```
Rays not passing an aperture
```
"""
ray traycing: led | d0 | l1 | d1 | l2 | d2 | d3 | l3 | d4 | l4 | d5 | d6 | obj | d7
"""
trace = 'd15 | L15/5.5 | d10 | L40/12.5 | d40 | d80 | L80/15 | d60 | L300/16 | d300 | d3.33 | L3.33/4.4 | d3.33'
sequence = rt.trace_parser(trace)
from numpy import arange
plt.ion()
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
for idx, h in enumerate(arange(-0.5, 0.6, 0.125)):
rt.plot_ray(h, sequence, axis=ax )
fig.subplots_adjust(right=0.8)
ax.legend(loc='center right', bbox_to_anchor=(1.3, 0.5));
```
| github_jupyter |
```
#default_exp utils
```
# Utility Functions
> Utility functions to help with downstream tasks
```
#hide
from nbdev.showdoc import *
from self_supervised.byol import *
from self_supervised.simclr import *
from self_supervised.swav import *
#export
from fastai.vision.all import *
```
## Loading Weights for Downstream Tasks
```
#export
def transfer_weights(learn:Learner, weights_path:Path, device:torch.device=None):
"Load and freeze pretrained weights inplace from `weights_path` using `device`"
if device is None: device = learn.dls.device
new_state_dict = torch.load(weights_path, map_location=device)
if 'model' in new_state_dict.keys(): new_state_dict = new_state_dict['model']
#allow for simply exporting the raw PyTorch model
learn_state_dict = learn.model.state_dict()
matched_layers = 0
for name, param in learn_state_dict.items():
name = 'encoder.'+name[2:]
if name in new_state_dict:
matched_layers += 1
input_param = new_state_dict[name]
if input_param.shape == param.shape:
param.copy_(input_param)
else:
raise ValueError(f'Shape mismatch at {name}, please ensure you have the same backbone')
else: pass # these are weights that weren't in the original model, such as a new head
if matched_layers == 0: raise Exception("No shared weight names were found between the models")
learn.model.load_state_dict(learn_state_dict)
learn.freeze()
print("Weights successfully transferred!")
```
When training models with this library, the `state_dict` will change, so loading it back into `fastai` as an encoder won't be a perfect match. This helper function aims to make that simple.
Example usage:
First prepare the downstream-task dataset (`ImageWoof` is shown here):
```
def get_dls(bs:int=32):
"Prepare `IMAGEWOOF` `DataLoaders` with `bs`"
path = untar_data(URLs.IMAGEWOOF)
tfms = [[PILImage.create], [parent_label, Categorize()]]
item_tfms = [ToTensor(), Resize(224)]
batch_tfms = [FlipItem(), RandomResizedCrop(224, min_scale=0.35),
IntToFloatTensor(), Normalize.from_stats(*imagenet_stats)]
items = get_image_files(path)
splits = GrandparentSplitter(valid_name='val')(items)
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(after_item=item_tfms, after_batch=batch_tfms,
bs=32)
return dls
dls = get_dls(bs=32)
```
For the sake of example we will make and save a SWaV model trained for one epoch (in reality you'd want to train for many more):
```
net = create_swav_model(arch=xresnet34, pretrained=False)
learn = Learner(dls, net, SWAVLoss(), cbs=[SWAV()])
learn.save('../../../swav_test');
```
Followed by a `Learner` designed for classification with a simple custom head for our `xresnet`:
```
learn = cnn_learner(dls, xresnet34, pretrained=False)
```
Before loading in all the weights:
```
transfer_weights(learn, '../../swav_test.pth')
```
Now we can do downstream tasks with our pretrained models!
| github_jupyter |
```
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.stats.proportion import proportion_confint
from statsmodels.stats.weightstats import CompareMeans, DescrStatsW, ztest
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from statsmodels.stats.weightstats import *
from statsmodels.stats.proportion import proportion_confint
import warnings
warnings.filterwarnings('ignore')
```
Прежде всего, скопируем нужные функции из учебного ноутбука. Они понадобятся.
```
def proportions_diff_confint_ind(sample1, sample2, alpha = 0.05):
z = scipy.stats.norm.ppf(1 - alpha / 2.)
p1 = float(sum(sample1)) / len(sample1)
p2 = float(sum(sample2)) / len(sample2)
left_boundary = (p1 - p2) - z * np.sqrt(p1 * (1 - p1)/ len(sample1) + p2 * (1 - p2)/ len(sample2))
right_boundary = (p1 - p2) + z * np.sqrt(p1 * (1 - p1)/ len(sample1) + p2 * (1 - p2)/ len(sample2))
return (left_boundary, right_boundary)
def proportions_diff_z_stat_ind(sample1, sample2):
n1 = len(sample1)
n2 = len(sample2)
p1 = float(sum(sample1)) / n1
p2 = float(sum(sample2)) / n2
P = float(p1*n1 + p2*n2) / (n1 + n2)
return (p1 - p2) / np.sqrt(P * (1 - P) * (1. / n1 + 1. / n2))
def proportions_diff_z_test(z_stat, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'two-sided':
return 2 * (1 - scipy.stats.norm.cdf(np.abs(z_stat)))
if alternative == 'less':
return scipy.stats.norm.cdf(z_stat)
if alternative == 'greater':
return 1 - scipy.stats.norm.cdf(z_stat)
def proportions_diff_confint_rel(sample1, sample2, alpha = 0.05):
z = scipy.stats.norm.ppf(1 - alpha / 2.)
sample = list(zip(sample1, sample2))
n = len(sample)
f = sum([1 if (x[0] == 1 and x[1] == 0) else 0 for x in sample])
g = sum([1 if (x[0] == 0 and x[1] == 1) else 0 for x in sample])
left_boundary = float(f - g) / n - z * np.sqrt(float((f + g)) / n**2 - float((f - g)**2) / n**3)
right_boundary = float(f - g) / n + z * np.sqrt(float((f + g)) / n**2 - float((f - g)**2) / n**3)
return (left_boundary, right_boundary)
def proportions_diff_z_stat_rel(sample1, sample2):
sample = list(zip(sample1, sample2))
n = len(sample)
f = sum([1 if (x[0] == 1 and x[1] == 0) else 0 for x in sample])
g = sum([1 if (x[0] == 0 and x[1] == 1) else 0 for x in sample])
return float(f - g) / np.sqrt(f + g - float((f - g)**2) / n )
```
В одном из выпусков программы "Разрушители легенд" проверялось, действительно ли заразительна зевота. В эксперименте участвовало 50 испытуемых, проходивших собеседование на программу. Каждый из них разговаривал с рекрутером; в конце 34 из 50 бесед рекрутер зевал. Затем испытуемых просили подождать решения рекрутера в соседней пустой комнате.
Во время ожидания 10 из 34 испытуемых экспериментальной группы и 4 из 16 испытуемых контрольной начали зевать. Таким образом, разница в доле зевающих людей в этих двух группах составила примерно 4.4%. Ведущие заключили, что миф о заразительности зевоты подтверждён.
Можно ли утверждать, что доли зевающих в контрольной и экспериментальной группах отличаются статистически значимо? Посчитайте достигаемый уровень значимости при альтернативе заразительности зевоты, округлите до четырёх знаков после десятичной точки.
Имеются данные измерений двухсот швейцарских тысячефранковых банкнот, бывших в обращении в первой половине XX века. Сто из банкнот были настоящими, и сто — поддельными.
Отделите 50 случайных наблюдений в тестовую выборку с помощью функции $\textbf{sklearn.cross_validation.train_test_split}$ (зафиксируйте $\textbf{random state = 1)}$. На оставшихся $150$ настройте два классификатора поддельности банкнот:
1. логистическая регрессия по признакам $X_1,X_2,X_3$
2. логистическая регрессия по признакам $X_4,X_5,X_6$
Каждым из классификаторов сделайте предсказания меток классов на тестовой выборке. Одинаковы ли доли ошибочных предсказаний двух классификаторов? Проверьте гипотезу, вычислите достигаемый уровень значимости. Введите номер первой значащей цифры (например, если вы получили $5.5\times10^{-8}$, нужно ввести 8).
```
df = pd.read_table('banknotes.txt')
y = df['real']
X = df.drop(['real'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X,y, random_state = 1, test_size = 50)
X1_train = X_train[['X1', 'X2','X3']]
X2_train = X_train[['X4','X5','X6']]
X1_test = X_test[['X1', 'X2','X3']]
X2_test = X_test[['X4','X5','X6']]
logreg = LogisticRegression()
logreg.fit(X1_train, y_train)
pred1 = logreg.predict(X1_test)
logreg.fit(X2_train, y_train)
pred2 = logreg.predict(X2_test)
pred1_acc = np.array([1 if pred1[i] == np.array(y_test)[i] else 0 for i in range(len(pred1))])
pred2_acc = np.array([1 if pred2[i] == np.array(y_test)[i] else 0 for i in range(len(pred2))])
print('First prediction accuracy:', sum(pred1_acc)/len(pred1_acc),
'\n','Second prediction accuracy:', sum(pred2_acc)/len(pred2_acc))
```
Вывод - доли ошибок не одинаковы
В предыдущей задаче посчитайте $95\%$ доверительный интервал для разности долей ошибок двух классификаторов. Чему равна его ближайшая к нулю граница? Округлите до четырёх знаков после десятичной точки.
Построим $95\%$ доверительный интервал для разницы предсказаний
```
print('95%% доверительный интервал для разницы предсказаний: [%.4f, %.4f]' %
proportions_diff_confint_rel(pred1_acc, pred2_acc))
print ("p-value: %f" % proportions_diff_z_test(proportions_diff_z_stat_rel(pred1_acc, pred2_acc)))
```
Ежегодно более 200000 людей по всему миру сдают стандартизированный экзамен GMAT при поступлении на программы MBA. Средний результат составляет 525 баллов, стандартное отклонение — 100 баллов.
Сто студентов закончили специальные подготовительные курсы и сдали экзамен. Средний полученный ими балл — 541.4. Проверьте гипотезу о неэффективности программы против односторонней альтернативы о том, что программа работает. Отвергается ли на уровне значимости 0.05 нулевая гипотеза? Введите достигаемый уровень значимости, округлённый до 4 знаков после десятичной точки.
```
n = 100
mean_result = 525
stand_dev = 100
mean_spec = 541.4
alpha = 0.05
```
Реализуем формулу: $Z(X^n) = \frac{\overline{X}-\mu_0}{\frac{\sigma}{\sqrt{n}}}$
```
def z_conf(mu, sigma, n, x_mean):
return (x_mean - mu)/(sigma / np.sqrt(n))
print((z_conf(mu = mean_result, x_mean=mean_spec, n=n, sigma=stand_dev)))
print(round(1-stats.norm.cdf(z_conf(mu = mean_result, x_mean=mean_spec, n=n, sigma=stand_dev)),4))
```
Оцените теперь эффективность подготовительных курсов, средний балл 100 выпускников которых равен 541.5. Отвергается ли на уровне значимости 0.05 та же самая нулевая гипотеза против той же самой альтернативы? Введите достигаемый уровень значимости, округлённый до 4 знаков после десятичной точки.
```
print((z_conf(mu = mean_result, x_mean=541.5, n=n, sigma=stand_dev)))
print(round(1-stats.norm.cdf(z_conf(mu = mean_result, x_mean=541.5, n=n, sigma=stand_dev)),4))
```
| github_jupyter |
## How-to guide for Customer Churn use-case on Abacus.AI platform
This notebook provides you with a hands on environment to build a customer churn prediction model using the Abacus.AI Python Client Library.
We'll be using the [Telco Customer Churn Dataset](https://s3.amazonaws.com//realityengines.exampledatasets/customer_churn/telco.csv), which contains information about multiple users, their attributes, and whether or not they churned.
1. Install the Abacus.AI library
```
!pip install abacusai
```
We'll also import pandas and pprint tools for visualization in this notebook.
```
import pandas as pd # A tool we'll use to download and preview CSV files
import pprint # A tool to pretty print dictionary outputs
pp = pprint.PrettyPrinter(indent=2)
```
2. Add your Abacus.AI [API Key](https://abacus.ai/app/profile/apikey) generated using the API dashboard as follows:
```
#@title Abacus.AI API Key
api_key = '' #@param {type: "string"}
```
3. Import the Abacus.AI library and instantiate a client.
```
from abacusai import ApiClient
client = ApiClient(api_key)
```
## 1. Create a Project
Abacus.AI projects are containers that have ML features and trained models. By specifying a business **Use Case**, Abacus.AI tailors the deep learning algorithms to produce the best performing model catered specifically for your data.
We'll call the `list_use_cases` method to retrieve a list of the Use Cases currently available on the Abacus.AI platform.
```
client.list_use_cases()
```
In this notebook, we're going to create a customer churn prediction model using the Telco Customer Churn dataset. The 'CUSTOMER_CHURN' use case is best tailored for this situation. For the purpose of taking an example, we will be using the [Telco Customer Churn Dataset](https://s3.amazonaws.com//realityengines.exampledatasets/customer_churn/telco.csv) that has user information, attributes, and whether or not they churned.
```
#@title Abacus.AI Use Case
use_case = 'CUSTOMER_CHURN' #@param {type: "string"}
```
By calling the `describe_use_case_requirements` method we can view what features are required for this use_case and what features are recommended.
```
for requirement in client.describe_use_case_requirements(use_case):
pp.pprint(requirement.to_dict())
```
Finally, let's create the project.
```
churn_project = client.create_project(name='Customer Churn Prediction', use_case=use_case)
churn_project.to_dict()
```
**Note: When feature_groups_enabled is True then the use case supports feature groups (collection of ML features). Feature groups are created at the organization level and can be tied to a project to further use it for training ML models**
## 2. Add Datasets to your Project
Abacus.AI can read datasets directly from `AWS S3`, `Google Cloud Storage`, and other cloud storage buckets, you can also connect your dataset connector and pull your data from them (bigquery, snowflake, etc.). Otherwise you can also directly upload and store your datasets with Abacus.AI. For this notebook, we will have Abacus.AI read the datasets directly from a public S3 bucket's location.
We are using one dataset for this notebook. We'll tell Abacus.AI how the dataset should be used when creating it by tagging the dataset with a special Abacus.AI **Dataset Type**.
- [Telco Customer Churn Dataset](https://s3.amazonaws.com//realityengines.exampledatasets/customer_churn/telco.csv) (**USER_ATTRIBUTES**):
This dataset contains information about multiple users for a specified company, along with whether or not they churned.
### Add the dataset to Abacus.AI
First we'll use Pandas to preview the file, then add it to Abacus.AI.
```
pd.read_csv('https://s3.amazonaws.com//realityengines.exampledatasets/customer_churn/telco.csv')
```
Using the Create Dataset API, we can tell Abacus.AI the public S3 URI of where to find the datasets. We will also give each dataset a Refresh Schedule, which tells Abacus.AI when it should refresh the dataset (take an updated/latest copy of the dataset).
If you're unfamiliar with Cron Syntax, Crontab Guru can help translate the syntax back into natural language: [https://crontab.guru/#0_12_\*_\*_\*](https://crontab.guru/#0_12_*_*_*)
**Note: This cron string will be evaluated in UTC time zone**
```
churn_dataset = client.create_dataset_from_file_connector(name='Telco Customer Churn',
location='s3://realityengines.exampledatasets/customer_churn/telco.csv', table_name='churn_prediction', refresh_schedule = '0 12 * * *')
datasets = [churn_dataset]
for dataset in datasets:
dataset.wait_for_inspection()
```
## 3. Create Feature Groups and add them to your Project
Datasets are created at the organization level and can be used to create feature groups as follows:
```
feature_group = client.create_feature_group(table_name='churn_pred_fg', sql='SELECT * FROM churn_prediction')
```
Adding Feature Group to the project:
```
client.add_feature_group_to_project(feature_group_id=feature_group.feature_group_id,project_id = churn_project.project_id)
```
Setting the Feature Group type according to the use case requirements:
```
client.set_feature_group_type(feature_group_id=feature_group.feature_group_id, project_id = churn_project.project_id, feature_group_type= "USER_ATTRIBUTES")
```
Check current Feature Group schema:
```
client.get_feature_group_schema(feature_group_id=feature_group.feature_group_id)
```
#### For each **Use Case**, there are special **Column Mappings** that must be applied to a column to fulfill use case requirements. We can find the list of available **Column Mappings** by calling the *Describe Use Case Requirements* API:
```
client.describe_use_case_requirements(use_case)[0].allowed_feature_mappings
client.set_feature_mapping(project_id = churn_project.project_id,feature_group_id= feature_group.feature_group_id, feature_name='Churn',feature_mapping='CHURNED_YN')
client.set_feature_group_column_mapping(project_id = churn_project.project_id,feature_group_id= feature_group.feature_group_id, column='customerID',column_mapping='USER_ID')
```
For each required Feature Group Type within the use case, you must assign the Feature group to be used for training the model:
```
client.use_feature_group_for_training(project_id = churn_project.project_id,feature_group_id= feature_group.feature_group_id)
```
Now that we've our feature groups assigned, we're almost ready to train a model!
To be sure that our project is ready to go, let's call project.validate to confirm that all the project requirements have been met:
```
churn_project.validate()
```
## 4. Train a Model
For each **Use Case**, Abacus.AI has a bunch of options for training. We can call the *Get Training Config Options* API to see the available options.
```
churn_project.get_training_config_options()
```
In this notebook, we'll just train with the default options, but definitely feel free to experiment, especially if you have familiarity with Machine Learning.
```
churn_model = churn_project.train_model(training_config={})
churn_model
```
After we start training the model, we can call this blocking call that routinely checks the status of the model until it is trained and evaluated:
```
churn_model.wait_for_evaluation()
```
**Note that model training might take some minutes to some hours depending upon the size of datasets, complexity of the models being trained and a variety of other factors**
## **Checkpoint** [Optional]
As model training can take an hours to complete, your page could time out or you might end up hitting the refresh button, this section helps you restore your progress:
```
!pip install abacusai
import pandas as pd
import pprint
pp = pprint.PrettyPrinter(indent=2)
api_key = '' #@param {type: "string"}
from abacusai import ApiClient
client = ApiClient(api_key)
churn_project = next(project for project in client.list_projects() if project.name == 'Customer Churn Prediction')
churn_model = churn_project.list_models()[-1]
churn_model.wait_for_evaluation()
```
## 5. Evaluate your Model Metrics
After your model is done training you can inspect the model's quality by reviewing the model's metrics:
```
pp.pprint(churn_model.get_metrics().to_dict())
```
To get a better understanding on what these metrics mean, visit our [documentation](https://abacus.ai/app/help/useCases/CUSTOMER_CHURN/training) page.
## 6. Deploy Model
After the model has been trained, we need to deploy the model to be able to start making predictions. Deploying a model will reserve cloud resources to host the model for Realtime and/or batch predictions.
```
churn_deployment = client.create_deployment(name='Customer Churn Deployment', description='Customer Churn Prediction Model Deployment', model_id=churn_model.model_id)
churn_deployment.wait_for_deployment()
```
After the model is deployed, we need to create a deployment token for authenticating prediction requests. This token is only authorized to predict on deployments in this project, so it's safe to embed this token inside of a user-facing application or website.
```
deployment_token = churn_project.create_deployment_token().deployment_token
deployment_token
```
## 7. Predict
Now that you have an active deployment and a deployment token to authenticate requests, you can make the `predict_churn` API call below.
This command will return the probability of a user with specified attributes churning. The prediction would be performed based on the specified dataset, which, in this case, contains information about the user, their attributes, and whether or not they churned.
```
ApiClient().predict_churn(deployment_token=deployment_token,
deployment_id=churn_deployment.deployment_id,
query_data={"MonthlyCharges":69.7,"TotalCharges":560.85,"gender":"Male","SeniorCitizen":"1","Partner":"No","Dependents":"No","tenure":"8","PhoneService":"Yes","MultipleLines":"No","InternetService":"Fiber optic","OnlineSecurity":"No","OnlineBackup":"No","DeviceProtection":"No","TechSupport":"No","StreamingTV":"No","StreamingMovies":"No","Contract":"Month-to-month","PaperlessBilling":"Yes","PaymentMethod":"Electronic check"})
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.