text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
!wget https://nlp.stanford.edu/projects/snli/snli_1.0.zip
!wget http://nlp.stanford.edu/data/glove.840B.300d.zip
!unzip snli_1.0.zip
!unzip glove.840B.300d.zip
from os.path import join as pjoin, isfile
import json
import numpy as np
TEXT_DATA_DIR = 'snli_1.0'
def load_data(tier):
premise = []
hypothseis = []
label = []
cnt = 0
with open(pjoin(TEXT_DATA_DIR, 'snli_1.0_' + tier + '.jsonl')) as f:
for line in f.readlines():
d = json.loads(line)
if d['gold_label'] != '-':
cnt += 1
premise.append(d['sentence1'])
hypothseis.append(d['sentence2'])
label.append(d['gold_label'])
print('# of', tier, 'samples :', cnt, end=' | ')
print('Entailment :', label.count('entailment'), '| Neutral :', label.count('neutral'), '| Contradiction :', label.count('contradiction'))
return (premise, hypothseis, label)
train = load_data('train')
dev = load_data('dev')
test = load_data('test')
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import keras.backend as K
from keras.layers import LSTM, Input, Dot, Softmax, Multiply, Concatenate, Subtract, Dense, Lambda, Embedding, Dropout
from keras.layers.wrappers import Bidirectional
from keras.models import Model, load_model
SentenceLen = 100
WordVecLen = 300
if not isfile('tokenizer.pickle'):
tokenizer = Tokenizer(num_words=SentenceLen)
tokenizer.fit_on_texts(train[0] + train[1] + dev[0] + dev[1] + test[0] + test[1])
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
tokenizer = pickle.load(open('tokenizer.pickle', 'rb'))
def PadSeq(text):
sequences = tokenizer.texts_to_sequences(text)
return pad_sequences(sequences, maxlen=SentenceLen)
if not isfile('embeddings.npy'):
embeddings_index = {}
f = open('glove.840B.300d.txt', encoding='utf8')
for line in f:
values = line.split()
word = ' '.join(values[:-300])
coefs = np.asarray(values[-300:], dtype='float32')
embeddings_index[word] = coefs
f.close()
word_index = tokenizer.word_index
embedding_matrix = np.zeros((len(word_index) + 1, WordVecLen))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
np.save('embeddings.npy', embedding_matrix)
def load_embeddings():
embedding_matrix = np.load('embeddings.npy')
embedding_layer = Embedding(len(tokenizer.word_index) + 1,
WordVecLen,
weights=[embedding_matrix],
input_length=SentenceLen,
trainable=False)
return embedding_layer
embedding_layer = load_embeddings()
def labelToVec(labels):
vec = []
for label in labels:
if label == 'entailment':
vec.append([1.0, 0.0, 0.0])
elif label == 'contradiction':
vec.append([0.0, 1.0, 0.0])
elif label == 'neutral':
vec.append([0.0, 0.0, 1.0])
else:
raise ValueError('Unknown label %s' % (label))
return np.array(vec)
train_y = labelToVec(train[2])
train_x = [PadSeq(train[0]), PadSeq(train[1])]
dev_y = labelToVec(dev[2])
dev_x = [PadSeq(dev[0]), PadSeq(dev[1])]
test_y = labelToVec(test[2])
test_x = [PadSeq(test[0]), PadSeq(test[1])]
del train
del dev
del test
del tokenizer
bilstm1 = Bidirectional(LSTM(300, return_sequences=True))
bilstm2 = Bidirectional(LSTM(300, return_sequences=True))
i1 = Input(shape=(SentenceLen,), dtype='float32')
i2 = Input(shape=(SentenceLen,), dtype='float32')
x1 = embedding_layer(i1)
x2 = embedding_layer(i2)
x1 = bilstm1(x1)
x2 = bilstm1(x2)
e = Dot(axes=2)([x1, x2])
e1 = Softmax(axis=2)(e)
e2 = Softmax(axis=1)(e)
e1 = Lambda(K.expand_dims, arguments={'axis' : 3})(e1)
e2 = Lambda(K.expand_dims, arguments={'axis' : 3})(e2)
_x1 = Lambda(K.expand_dims, arguments={'axis' : 1})(x2)
_x1 = Multiply()([e1, _x1])
_x1 = Lambda(K.sum, arguments={'axis' : 2})(_x1)
_x2 = Lambda(K.expand_dims, arguments={'axis' : 2})(x1)
_x2 = Multiply()([e2, _x2])
_x2 = Lambda(K.sum, arguments={'axis' : 1})(_x2)
m1 = Concatenate()([x1, _x1, Subtract()([x1, _x1]), Multiply()([x1, _x1])])
m2 = Concatenate()([x2, _x2, Subtract()([x2, _x2]), Multiply()([x2, _x2])])
y1 = bilstm2(m1)
y2 = bilstm2(m2)
mx1 = Lambda(K.max, arguments={'axis' : 1})(y1)
av1 = Lambda(K.mean, arguments={'axis' : 1})(y1)
mx2 = Lambda(K.max, arguments={'axis' : 1})(y2)
av2 = Lambda(K.mean, arguments={'axis' : 1})(y2)
y = Concatenate()([av1, mx1, av2, mx2])
y = Dense(1024, activation='tanh')(y)
y = Dropout(0.5)(y)
y = Dense(1024, activation='tanh')(y)
y = Dropout(0.5)(y)
y = Dense(3, activation='softmax')(y)
model = Model(inputs=[i1, i2], outputs=y)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train_x, train_y, epochs=10, validation_data=(dev_x, dev_y))
score = model.evaluate(test_x, test_y, verbose=1)
print('Test loss : ', score[0])
print('Test accuracy : ', score[1])
model.save_weights('NLI.h5')
```
| github_jupyter |
```
import datetime
# Age Finder
birth_year = input('what year were your born?')
age = (datetime.datetime.now().year) - int(birth_year)
print(f'this year you are will celebrate {age} years!')
# Password Checker
username = input('What is your name?')
password = input('What is your password?')
pass_len = len(password)
hashed_pass = pass_len * '*'
print(f'Hey {username}, your password {hashed_pass} is {pass_len} characters long.')
is_old = True
age = 20
if is_old and age >= 21:
print('yay! you can drink!')
elif is_old and age < 21:
print('you are too young to drink! Here is some milk.')
a = False
b = False
if a and b:
print('and')
c = False
d = True
if c or d:
print('or')
is_magician = True
is_expert = False
if is_magician and is_expert:
print('you are a master magician.')
elif is_magician and not is_expert:
print("at least you're getting there.")
elif not is_magician:
print('you need magic powers.')
# Counter exercise
my_list = [1,2,3,4,5,6,7,8,9,10]
counter = 0
for num in my_list:
counter = counter + num
print(counter)
for num in range(1, 10):
print(num)
for i, char in enumerate('Hello'):
print(i, char)
for i, char in enumerate(list(range(100))):
if char == 50:
print(i, char)
#Exercise!
#Display the image below to the right hand side where the 0 is going to be ' ', and the 1 is going to be '*'. This will reveal an image!
picture = [
[0,0,0,1,0,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[1,1,1,1,1,1,1],
[0,0,0,1,0,0,0],
[0,0,0,1,0,0,0]
]
for line in picture:
new_line = ''
for num in line:
if num:
new_line += '*'
else:
new_line += ' '
print(new_line)
# Check for duplicates in list - don't use a Set
some_list = ['a', 'b', 'c', 'b', 'd', 'm', 'n', 'n']
hash_table = {}
duplicates = []
for letter in some_list:
if letter in hash_table:
duplicates.append(letter)
else:
hash_table[letter] = True
print('duplicates: ' + ', '.join(duplicates))
def checkDriverAge(age=0):
# age = input("What is your age?: ")
if int(age) < 18:
print("Sorry, you are too young to drive this car. Powering off")
elif int(age) > 18:
print("Powering On. Enjoy the ride!");
elif int(age) == 18:
print("Congratulations on your first year of driving. Enjoy the ride!")
checkDriverAge(92)
#1. Wrap the above code in a function called checkDriverAge(). Whenever you call this function, you will get prompted for age.
#2 Instead of using the input(). Now, make the checkDriverAge() function accept an argument of age, so that if you enter:
#checkDriverAge(92);
#it returns "Powering On. Enjoy the ride!"
#also make it so that the default age is set to 0 if no argument is given.
def highest_even(li):
li.sort(reverse=True)
for num in li:
if num % 2 == 0:
return num
print(highest_even([10, 2, 3, 4, 5, 8, 11]))
a = 'hellooooooo'
if ((n := len(a)) > 10):
print(f'too long {n} elements')
```
| github_jupyter |
# S47 journeys
This notebook looks at S47 journeys and whether they follow the usual trajectory of S47 -> ICPC -> CPP start.
The input: main flatfile CIN with data from all LAs.
The output of this notebook is a table with the columns:
- Source
- Destination
- Count
- Local Authority
- Demographics (gender, age, ethnicity, disability)
This is the shape required to plug the table into a PowerBI Sankey diagram.
```
import os
import pandas as pd
import numpy as np
%run "00-config.ipynb"
%load_ext autoreload
%autoreload 2
```
### Config
#### Filepaths
```
input_file = os.path.join(flatfile_folder, 'main_flatcin.csv')
output_file = os.path.join(output_folder, 's47-sankey.csv')
```
#### Key assumptions
```
# Max days S47 -> CCP for both to be linked
s47_cpp = 60
# Max days ICPC -> CCP for both to be linked
icpc_cpp = 45
# Date from which S47 is too recent to determine next journey
s47_max_date = cin_census_close - pd.Timedelta("60 days") # 60 days before CIN Census closes
# Date from which ICPC is too recent to determine next journey
icpc_max_date = cin_census_close - pd.Timedelta("45 days") # 45 days before CIN Census closes
```
### Data wrangling
```
# Load flatfile
df = pd.read_csv(input_file)
# Only keep 2 subsets: S47 events and CPP start events.
s47 = df[df.Type == 'S47ActualStartDate']
cpp = df[df.Type == 'CPPstartDate']
# Drop empty cols
s47.dropna(axis=1, how='all', inplace=True)
cpp.dropna(axis=1, how='all', inplace=True)
# Merge S47 and CPP together, to match each S47 event with CPP events (if they occurred)
data = s47.merge(cpp[['LAchildID', 'LA', 'CPPstartDate']], how='left', on=['LAchildID', 'LA'])
# We might have merged some CPP events that happened to the child, but before the ICPC or much later.
# In that case, the ICPC and the CPP are not related.
# Rule: if the CPP happened before the ICPC, or more than X days later (defined at top of notebook), they are not related.
# Turn relevant columns into dates
data['S47ActualStartDate'] = pd.to_datetime(data['S47ActualStartDate'])
data['DateOfInitialCPC'] = pd.to_datetime(data['DateOfInitialCPC'])
data['CPPstartDate'] = pd.to_datetime(data['CPPstartDate'])
# Calculate icpc_to_cpp: length of time between ICPC and CPP
data['icpc_to_cpp'] = data['CPPstartDate'] - data['DateOfInitialCPC']
data['icpc_to_cpp'] = data['icpc_to_cpp'].dt.days
# Calculate s47_to_cpp: length of time between S47 and CPP
data['s47_to_cpp'] = data['CPPstartDate'] - data['S47ActualStartDate']
data['s47_to_cpp'] = data['s47_to_cpp'].dt.days
# Let's look at the distribution of days between ICPC and CPP start
data.hist(column='icpc_to_cpp')
# We can see that some pairs do not make sense: the CPP happened earlier or much later
# Only keep CPP matches:
# CPP start within X days of ICPC (defined top of notebook), or
# CPP start within X days of S47 (defined top of notebook), or
cpp_match = data[((data.icpc_to_cpp >= 0) & (data.icpc_to_cpp <= icpc_cpp)) |
((data.s47_to_cpp >= 0) & (data.s47_to_cpp <= s47_cpp))]
# Add the matches back to the list of S47s
s47_outcomes = s47.merge(cpp_match[['Date', 'LAchildID', 'CPPstartDate', 'icpc_to_cpp', 's47_to_cpp']], how='left', on=['Date', 'LAchildID'])
# Turn columns into dates
s47_outcomes['DateOfInitialCPC'] = pd.to_datetime(s47_outcomes['DateOfInitialCPC'])
s47_outcomes['S47ActualStartDate'] = pd.to_datetime(s47_outcomes['S47ActualStartDate'])
s47_outcomes['CPPstartDate'] = pd.to_datetime(s47_outcomes['CPPstartDate'])
```
### Shaping the PowerBI Sankey diagram input
PowerBI requires a dataset with a Source (start of flow) and Destination (end of flow), with a count of occurrences.
https://powerbi.microsoft.com/en-us/blog/visual-awesomeness-unlocked-sankey-diagram/
We have several Source -> Destination options:
- Step 1:
- S47 to ICPC
- S47 to CPP directly
- S47 to nothing (no ICPC nor CPP)
- S47 TBD (S47 occurred within 21 days of closing the CIN Census)
- Step 2:
- ICPC to CPP
- ICPC to no CPP
- ICPC TBD (ICPC occurred within 2 months of closing the CIN Census)
We also want to keep the Demographics columns (age, ethnicity, etc.) to be able to filter the sankeys.
```
# Generate Source and Destination for step 1
step1 = s47_outcomes.copy()
# The source is necessarily S47 for step 1
step1['Source'] = 'S47 strategy discussion'
# The destination varies
step1['Destination'] = np.nan # Create empty col
# S47 -> ICPC
step1.loc[step1['DateOfInitialCPC'].notnull(), 'Destination'] = 'ICPC'
# S47 -> CPP directly
step1.loc[step1['DateOfInitialCPC'].isnull() & step1['CPPstartDate'].notnull(), 'Destination'] = 'CPP start'
# S47 -> TBD (too recent)
step1.loc[step1['Destination'].isnull() & (step1['S47ActualStartDate'] >= s47_max_date), 'Destination'] = 'TBD - S47 too recent'
# S47 -> No ICPC
step1.loc[step1['Destination'].isnull(), 'Destination'] = 'No ICPC nor CPP'
# Look at resulting trends
step1.Destination.value_counts(dropna=False)
# Generate Source and Destination for step 2
# Step 2 starts from all S47 that got to ICPC in step 1
step2 = step1[step1.Destination == 'ICPC']
# The source is necessarily ICPC for step 2
step2['Source'] = 'ICPC'
# The destination varies
step2['Destination'] = np.nan # Empty col
# ICPC -> CPP
step2.loc[step2['CPPstartDate'].notnull(), 'Destination'] = 'CPP start'
# ICPC -> TBD (too recent)
step2.loc[step2['Destination'].isnull() & (step2['DateOfInitialCPC'] >= icpc_max_date), 'Destination'] = 'TBD - ICPC too recent'
# ICPC -> No CPP
step2.loc[step2['Destination'].isnull(), 'Destination'] = 'No CPP'
# Look at resulting trends
step2.Destination.value_counts(dropna=False)
# Bring Steps 1 & 2 together
s47_journey = pd.concat([step1, step2])
# Calculate age of child during the S47, based on cin_census_close (defined in config)
s47_journey['Age at S47'] = s47_journey['S47ActualStartDate'].dt.year - s47_journey['PersonBirthDate']
# Save
s47_journey.to_csv(output_file, index=False)
```
| github_jupyter |
# Extract Financial Statement Values
Financial statement analysis is critical to every organization to enable companies to make better economic decisions that yields more income in the future. For a given financial statement, a rulebook is followed to extract the values associated with accrual, audit status, balance sheet, measurement date and pension plans as shown (refer rulebook & sample data here)
## Task:
Build an NLP model which analyzes each document, looks for relevant financial terms (described below)
The NLP Model should learn on Documents available under Training data folder using Rules listed here
Apply the same model on Test Data documents to extract relevant financial information
The results need to be updated & uploaded in the sheet provided: ‘Results.csv’ (download dataset)
Note: No training labels are explicitly available for this problem statement. You will be able to test your model's accuracy by submitting values extracted for Test data.
## Data Description:
Columns
Description of Values
Credit Name, State, Security ID, Org ID, FYE
Identifiers from Documents provided to you already in “Results.csv”
Accounting Basis
Identify ‘Basis of Accounting’ as [‘Accrual’, ‘Cash’, ‘Modified Accrual’, ‘Modified Cash’, ‘Regulatory’]
Pension Plan 1 Name*
Pension Plan Identifier for Pension Plan with highest Total Pension Liability (0 if no date is specified)
Pension Plan 1 Measurement Date
Reporting Date for Pension Plan 1 (0 if no date is specified) [DD/MM/YYYY]
Pension Plan 1 Total Pension Liability
Total Pension Plan liability for Pension Plan 1 (0 if no liability is specified) [int64]
Balance Sheet Cash
Total value of Balance sheet-Governmental funds under ‘Cash & Cash Equivalents’ row. This will include all other row items as well which have been highlighted in the rules for cash and cash equivalent. (0 if no balance sheet amount is specified) [int64]
Pension Plan 2 Name*
Pension Plan Identifier for Pension Plan with second highest Total Pension Liability (0 if no date is specified)
Pension Plan 2 Measurement Date
Reporting Date for Pension Plan 1 (0 if no date is specified) [DD/MM/YYYY]
Pension Plan 2 Total Pension Liability
Total Pension Plan liability for Pension Plan 1 (0 if no date is specified) [int64]
Note: *- Map Pension Plan Names to Industry Standards as specified here
### Data Volume:
Train Data: 479 Documents, 1.5GB
Test Data: 98 Documents, 373.7MB
Submission Format: You need to update Results.csv provided here, with your predictions and upload it online.
Data Files: Download Dataset (~1.7GB)
Evaluation Metric:
[ML_Model] accuracy_score
[Offline] Source Code, Models/Logic used
# Prequisites and Problem at hand
To understand the problem lets looks at given tasks
* Build an NLP model which analyzes each document, looks for relevant financial terms (described below)
* The NLP Model should learn on Documents available under Training data folder using Rules listed here
* Apply the same model on Test Data documents to extract relevant financial information
* The results need to be updated & uploaded in the sheet provided: ‘Results.csv’ (download dataset)
# Approach
### As
### "Note: No training labels are explicitly available for this problem statement. You will be able to test your model's accuracy by submitting values extracted for Test data"
* This was totally based on **Rule Based extraction methodology** since no training labels were given us for training the model and themn predicting, so it was impossible to use any kind of supervised mechanism.
* Though unsupervised methods could have worked but seeing the evaluation metric, rule based extraction best suited it.
* Org ID of "Jefferson Cnty GO _AL_**329986**_G O Municipality" is being changed to map with results.csv
```
from PIL import Image
import pytesseract
import sys
from pdf2image import convert_from_path
import os
import pandas as pd
import numpy as np
res=pd.read_csv('../Results5cf9666.csv')
res.head()
res['Accounting Basis']='Modified Accrual'
res['Pension Plan 1 Total Pension Liability']=0
res['Pension Plan 2 Total Pension Liability']=0
res['Balance Sheet Cash']=0
res['Pension Plan 1 Name']=res['State']
res['Pension Plan 2 Name']=res['State']
res.to_csv('newSub4.csv',index=False)
res.head()
```
## Pension Abbs excel
```
pen=pd.read_excel('../Pension Plan Naming Conventions.xlsx')
pen
```
### Total Words and their Abbreviation in a dict
```
d_pp={}
for i,k in pen[['Word','Abbreviation']].values:
d_pp[i.strip()]=k.strip()
```
### Total States and their Abbreviation
```
pen[['State Abbreviation','State Abbreviation.1']].values
states=pen['State Abbreviation'].dropna().values.tolist()
states.extend(pen['State Abbreviation.1'].dropna().values.tolist())
```
### All test pdf
```
import os
test_pdf=os.listdir('Test Data')
# test_pdf
pd.options.display.max_rows = 100
# res
'Jefferson Cnty GO _AL_10928_G O Municipality & County_County_2017.pdf'.split("_")
len(res.State.values)
```
## Pension Plan 1 and Pension Plan 2 Name Extraction
* Extract POS tagging NNP which are in Pension plan Conventions excel
* Get the sentence with highest NNP tags
```
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
stop_words = set(stopwords.words('english'))
nltk.pos_tag(d_pp.keys())
# nltk.download('averaged_perceptron_tagger')
def pos(txt):
tokenized = txt
f=[]
f2=[]
for i in tokenized:
wordsList = nltk.word_tokenize(i)
# removing stop words from wordList
wordsList = [w for w in wordsList if not w in stop_words]
tagged = nltk.pos_tag(wordsList)
tgg=[x for x in tagged if x[1] =='NNP' and x[0].capitalize() in d_pp.keys()]
tgg=list(set(tgg))
if len(tgg)>=4:
f.append(" ".join([d_pp[x[0].capitalize()] for x in tgg ]))
ret_list=sorted(f, key=len,reverse=True)[0:2]
if ret_list==[]:
return ['','']
elif len(ret_list)==1:
return [ret_list[0],'0']
else:
return sorted(f, key=len,reverse=True)[0:2]
# print(test_pdf[40])
# for pdf in test_pdf:
# print(pdf)
# if pdf.startswith('.')==False:
# f=open(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt',"r+")
# # print(f.readlines())
# print(pos(f.readlines()))
# f.close()
# break
```
## Accounting Basis Extraction
* Find targetted word **basis of accounting** and take text preceeding and following sentences.
* COunter of specific [‘Accrual’, ‘Cash’, ‘Modified Accrual’, ‘Modified Cash’, ‘Regulatory’] words in text
* find most common
```
#accrual, audited, actual , unaudited, estimate, budgeted, projected, training
# [‘Accrual’, ‘Cash’, ‘Modified Accrual’, ‘Modified Cash’, ‘Regulatory’]
from collections import Counter
# def acc_basis(t):
# t=" ".join(t)
# t=t.lower()
# d={}
# word_counts=Counter(t.split())
# d['cash']=word_counts.get('cash')
# d['accrual']=word_counts.get('accrual')
# d['modified accrual']=t.count('modified accrual')
# d['modified cash']=t.count('modified cash')
# d['regulatory']=word_counts.get('regulatory')
# for k,v in d.items():
# if v==None:
# d[k]=0
# d['accrual']=d['accrual']-d['modified accrual']
# accbase=sorted(d.items(), key = lambda x : -x[1])[0][0]
# if d['modified cash']!=0:
# return 'Modified Cash'
# return sorted(d.items(), key = lambda x : -x[1])
def acc_basis1(t):
main=[]
for txt in t:
if txt.lower().find('basis of accounting')!=-1:
main.append((txt[txt.lower().find('basis of accounting')-800:txt.lower().find('basis of accounting')+300]))
d={}
word_counts=Counter(" ".join(main).split())
d['cash']=word_counts.get('cash')
d['accrual']=word_counts.get('accrual')
d['modified accrual']=" ".join(main).count('modified accrual')
d['modified cash']=" ".join(main).count('modified cash')
d['regulatory']=word_counts.get('regulatory')
for k,v in d.items():
if v==None:
d[k]=0
d['cash']=d['cash']-d['modified cash']
d['accrual']=d['accrual']-d['modified accrual']
return sorted(d.items(), key = lambda x : -x[1])[0][0]
# # print(test_pdf[60])
# for pdf in test_pdf:
# print(pdf)
# if pdf.startswith('.')==False:
# f=open(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt',"r+")
# # print(f.readlines())
# print(acc_basis1(f.readlines()))
# f.close()
# # break
# def pension_details(t):
# d={}
# word_counts=Counter(t.split())
# for k,v in d_pp.items():
# d[k]=word_counts.get(k)
# for k,v in d.items():
# if v==None:
# d[k]=0
# accbase=sorted(d.items(), key = lambda x : -x[1])[0][0]
# return sorted(d.items(), key = lambda x : -x[1])[:7]
```
## Pension Plan 1 and Pension Plan 2 Measurement date Extraction
* Find dates in the text which has **measurement date** in it
* Filter out those days within following years 2016,2017,2018,2019,2020
* Return them
```
!pip install datefinder
import dateutil.parser as dparser
from dateutil.parser import parse
def find_date(val):
try:
dat=[]
for v in val:
arr=[x.strftime('%B %d, %Y') for x in datefinder.find_dates(v) if datetime.strptime(x.strftime('%B %d, %Y'), '%B %d, %Y').year in [2016,2017,2018,2019,2020] and v.lower().find('measurement date')!=-1]
if arr!=[]:
dat.append(arr[0])
if dat==[]:
return [0,0]
elif len(list(set(dat)))==1:
return [list(set(dat))[0],0]
else:
return list(set(dat))[:2]
except:
return [0,0]
# # print(test_pdf[60])
# for pdf in test_pdf:
# print(pdf)
# if pdf.startswith('.')==False:
# f=open(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt',"r+")
# # print(f.readlines())
# print(find_date(f.readlines()))
# f.close()
# break
```
## Pension Plan 1 and Pension Plan 2 Total pension Liability Extraction
```
# def tpl(t):
# try:
# amt=[]
# for g in range(len(t)):
# if t[g].lower().find('pension liability')!=-1:
# p=[x for x in nltk.pos_tag((t[g]+t[g-1]+t[g+1]).split()) if x[1]=='JJ' and x[0].startswith('$')]
# if p!=[]:
# amt.append(p[0][0][1:])
# if list(set(amt))==[]:
# return [0,0]
# elif len(list(set(amt)))==1:
# return [list(set(amt))[0],0]
# else:
# return list(set(amt))
# except:
# return [0,0]
def tpl(t):
try:
t = [i + j for i, j in zip(t[::2], t[1::2])]
amt=[]
for g in range(len(t)):
if t[g].lower().find('pension liability')!=-1:
p=[x for x in nltk.pos_tag((t[g-1]+t[g]+t[g+1]).split()) if x[1]=='JJ' and x[0].startswith('$')]
if p!=[]:
amt.append(p[0][0][1:])
amt=["{:,}".format(toNumber(x)) for x in amt if "{:,}".format(toNumber(x))!='0']
# print(amt)
if list(set(amt))==[]:
return [0,0]
elif len(list(set(amt)))==1:
return [list(set(amt))[0],0]
else:
return list(set(amt))[:2]
except:
return [0,0]
# print(test_pdf[60])
# for pdf in test_pdf[10:20]:
# print(pdf)
# if pdf.startswith('.')==False:
# f=open(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt',"r+")
# # print(f.readlines())
# g=f.readlines()
# print(tpl(g))
# f.close()
# # break
# Dodge City_KS_15059_G O Municipality & County_City _2017.pdf
# ['673,669.0', 0]
# Annapolis_MD_1784_G O Municipality & County_City_2018.pdf
# [0, 0]
# .DS_Store
# Sussex Cnty_DE_13218_G O Municipality & County_County_2018.pdf
# ['104,655,672.0', '138.0']
# Talladega_AL_8999_G O Municipality & County_City_2017.pdf
# ['1.0', '632,981.0']
# Kitsap Cnty_WA_11962_G O Municipality & County_County_2017.pdf
# ['13.0', '353,828.0']
# Sweetwater Cnty_WY_15662_G O Municipality & County_County_2017.pdf
# ['4,453,523.0', 0]
# East Haddam_CT_12833_G O Municipality & County_Town_2018.pdf
# ['84,856.0', 0]
# Pine Bluff_AR_14783_G O Municipality & County_City _2017.pdf
# ['16,573,277.0', '95,986.0']
# Sharon_PA_26199_G O Municipality & County_City_2017.pdf
# [0, 0]
```
## Balance Sheet
```
cash_terms=['cash',
'cash and demand accounts',
'cash and investments',
'cash and deposits',
'cash and time deposits',
'cash and equivalents',
'cash in county treasury',
'cash in revolving fund',
'cash on hands',
'cash on hand',
'cash in banks',
'collection awaiting deposit',
'non-pooled cash', 'petty cash', 'pooled cash',
'demand deposit',
'demand accounts',
'equity in cash and investments',
'equity in pooled cash and cash equivalents']
import re
def toNumber(b):
try:
return float(re.sub(r'[^0-9]',"",b))
except:
return 0
def ffff(ind,indgf,t):
amt=0
ind.sort(reverse=True)
indgf.sort(reverse=True)
ix=max(ind[0],indgf[0])
t=t[ix:]
c=0
for xx in t:
if [g for g in cash_terms if g in xx.lower()]!=[] and [v for v in nltk.pos_tag(xx.split()) if v[1]=='CD' ]!=[]:
bs=[b for b in nltk.pos_tag(xx.split()) if b[1]=='CD' and b[0].find(',')!=-1 and len(b[0])>=6]
if bs!=[]:
return "{:,}".format(toNumber(bs[0][0]))
# c+=1
# if c>=2:
# return "{:,}".format(amt)
def ffffxx(t):
amt=0
c=0
for xx in t:
if [g for g in cash_terms if g in xx.lower()]!=[] and [v for v in nltk.pos_tag(xx.split()) if v[1]=='CD' ]!=[]:
bs=[b for b in nltk.pos_tag(xx.split()) if b[1]=='CD' and b[0].find(',')!=-1 and len(b[0])>=6 ]
if bs!=[]:
return "{:,}".format(toNumber(bs[0][0]))
# c+=1
# if c>=2:
# return "{:,}".format(amt)
def balance_sheet(t):
ind=[]
indgf=[]
for x in range(len(t)):
if 'balance sheet' in t[x].lower():
ind.append(t[x].find('balance sheet'))
if 'governmental funds' in t[x].lower():
indgf.append(t[x].find('governmental funds'))
if ind!=[] and indgf!=[]:
return ffff(ind,indgf,t)
else:
return ffffxx(t)
# fil=76
# print(test_pdf[fil])
# for pdf in test_pdf:
# print(pdf)
# if pdf.startswith('.')==False:
# f=open(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt',"r+")
# # print(f.readlines())
# print(balance_sheet(f.readlines()))
# f.close()
# break
```
# Final Prediction method
```
months = ['january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
import datefinder
from datetime import datetime
acc_basis_list=[]
pp1name=[]
pp2name=[]
mm1date=[]
mm2date=[]
pp1amt=[]
pp2amt=[]
bsam=[]
for st,org in res[['State','Org ID']].values:
for pdf in test_pdf:
if str(org) in pdf.split("_") and pdf.startswith('.')==False:
print(pdf.split('&')[0],org)
if os.path.exists(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt'):
#reading text file
f=open(pdf.split('&')[0]+'/out_text'+pdf.split('&')[0]+'.txt',"r+")
ftext=f.readlines()
# Accounting basis
acc_basis_list.append(acc_basis1(ftext))
# Pension plan names
jj=pos(ftext)
pp1name.append(st+'_'+jj[0])
pp2name.append(st+'_'+jj[1])
# Pension Plan Measurement date
dt=find_date(ftext)
mm1date.append(dt[0])
mm2date.append(dt[1])
# Pension Plan total pension liability
ppamt=tpl(ftext)
pp1amt.append(ppamt[0])
pp2amt.append(ppamt[1])
# Balance sheet
bsam.append(balance_sheet(ftext))
f.close()
break
else:
print('>>>>>>>>',pdf)
```
# Submission
```
res=pd.read_csv('../Results5cf9666.csv')
res.head()
len(pp1name),res.shape
```
# All values - 1
```
res['Accounting Basis']=acc_basis_list
res['Pension Plan 1 Total Pension Liability']=pp1amt
res['Pension Plan 2 Total Pension Liability']=pp2amt
res['Pension Plan 1 Measurement Date']=mm1date
res['Pension Plan 2 Measurement Date']=mm2date
res['Balance Sheet Cash']=bsam
res['Pension Plan 1 Name']=pp1name
res['Pension Plan 2 Name']=pp2name
res['Accounting Basis']=res['Accounting Basis'].apply(lambda x: x.capitalize())
res.to_csv('submission_submitted1_bsam.csv',index=False)
res.head()
```
# Values -2
```
res['Accounting Basis']=acc_basis_list
res['Pension Plan 1 Total Pension Liability']=0
res['Pension Plan 2 Total Pension Liability']=0
res['Pension Plan 1 Measurement Date']=0
res['Pension Plan 2 Measurement Date']=0
res['Balance Sheet Cash']=0
res['Pension Plan 1 Name']=pp1name
res['Pension Plan 2 Name']=pp2name
res['Accounting Basis']=res['Accounting Basis'].apply(lambda x: x.capitalize())
res.to_csv('submission_submitted_Zero_1.csv',index=False)
res.head()
#28.XX
```
# Values - 3 - Best
* With all other values the score is decreasing!! so then only accounting basis is the best extracted data column
```
res['Accounting Basis']=acc_basis_list
res['Pension Plan 1 Total Pension Liability']=0
res['Pension Plan 2 Total Pension Liability']=0
res['Pension Plan 1 Measurement Date']=0
res['Pension Plan 2 Measurement Date']=0
res['Balance Sheet Cash']=0
res['Pension Plan 1 Name']=0
res['Pension Plan 2 Name']=0
res['Accounting Basis']=res['Accounting Basis'].apply(lambda x: x.capitalize())
res.to_csv('submission_submitted_Zero_All.csv',index=False)
res.head()
#35.7
```
# Values - 4
```
res['Accounting Basis']=acc_basis_list
res['Pension Plan 1 Total Pension Liability']=0
res['Pension Plan 2 Total Pension Liability']=0
res['Pension Plan 1 Measurement Date']=0
res['Pension Plan 2 Measurement Date']=0
res['Balance Sheet Cash']=bsam
res['Pension Plan 1 Name']=0
res['Pension Plan 2 Name']=0
res['Accounting Basis']=res['Accounting Basis'].apply(lambda x: x.capitalize())
res.to_csv('submission_submitted_Zero_bsam.csv',index=False)
res.head()
#reduced a bit 35.45
```
# Values - 5
```
res['Accounting Basis']=acc_basis_list
res['Pension Plan 1 Total Pension Liability']=pp1amt
res['Pension Plan 2 Total Pension Liability']=pp1amt
res['Pension Plan 1 Measurement Date']=0
res['Pension Plan 2 Measurement Date']=0
res['Balance Sheet Cash']=0
res['Pension Plan 1 Name']=0
res['Pension Plan 2 Name']=0
res['Accounting Basis']=res['Accounting Basis'].apply(lambda x: x.capitalize())
res.to_csv('submission_submitted_Zero_tpl.csv',index=False)
res.head()
#reduced a bit 33.67
```
# Values 6
```
res['Accounting Basis']=acc_basis_list
res['Pension Plan 1 Total Pension Liability']=0
res['Pension Plan 2 Total Pension Liability']=0
res['Pension Plan 1 Measurement Date']=mm1date
res['Pension Plan 2 Measurement Date']=mm2date
res['Balance Sheet Cash']=0
res['Pension Plan 1 Name']=0
res['Pension Plan 2 Name']=0
res['Accounting Basis']=res['Accounting Basis'].apply(lambda x: x.capitalize())
res.to_csv('submission_submitted_Zero_date.csv',index=False)
res.head()
#reduced a bit 33.54
```
| github_jupyter |
<img src="https://juniorworld.github.io/python-workshop-2018/img/portfolio/week10.jpg" width="350px">
---
# Supervised Machine Learning
- Train a model to give predictions based on **labeled data** (X + Y)
- Information Retrieval: KNN
- Regression:
- Linear regression
- Generalize Linear Model: logistic (binary), poisson (count)
- Classification:
- Binary Classification: Naive Bayesian classifier
- Multiclass Classification: Multinomial Bayesian classifier, KNN
- (Advanced) Deep Neural Network
## Procedure:
- STEP 1: Split dataset
- 2 parts: train vs test, e.g. 60:40 or 70:30
- 3 parts: train vs test vs validation: e.g. 60:30:10 or 70:20:10 [not usual]
- STEP 2: Train the model
- STEP 3: Test the model
- STEP 4: Parameter tuning. If the result is not satisfactory, retrain the model with new parameters and retest the newly trained model.
- STEP 5: Report the model performance with Test/Validation data.
- STEP 6: Apply the model to new data set.
## K-Nearest Neighbors (KNN)
- Distance-based Spatial Voting Model
- Purpose: Retrieve the most similar information from database + Classify a new point according its nearest neighbors
- Input: a set of data with labels
- Output: K nearest neighbors
- Assign the category according K's labels
- Parameter: K
|Movie|#Fight scenes|#Kiss scenes|Genre|
|-----|:-----------:|:----------:|:---:|
|California Man|3|104|Love|
|He's not that into you|2|100|Love|
|Beautiful Woman|1|81|Love|
|Kevin Longblade|101|10|Action|
|Robo Slayer 3000|99|5|Action|
|Amped II|98|2|Action|
|<font style='color:blue'>XXXXX</font>|<font style='color:blue'>18</font>|<font style='color:blue'>90</font>|<font style='color:red'>?</font>|
```
import pandas as pd
import numpy as np
from sklearn.metrics import *
from sklearn.datasets import load_iris
iris=load_iris()
train_index=np.random.choice(range(150),100,replace=False)
train_X=iris.data[train_index] #extract 100 data records as our training data
train_Y=iris.target[train_index] #training labels
test_index=[i for i in range(150) if i not in train_index]
test_X=iris.data[test_index]
test_Y=iris.target[test_index]
print(train_X.shape)
print(test_X.shape)
train_X[0] #first data point in training set
test_X[0] #first test point
train_Y[0]
#distance between first test data and first train data
distance=np.linalg.norm(test_X[0]-train_X[0])
distance
#distances between first test data and ALL train data
distances=[]
for i in train_X:
distance=np.linalg.norm(test_X[0]-i)
distances.append(distance)
len(distances)
```
**Suppose K=4**
```
np.argsort([2,1,3,4])
np.argsort(distances)[:4] #extract the indexes of K smallest distances
#get the labels of those points
KNNs=train_Y[np.argsort(distances)[:4]]
KNNs
a=[1,2,3,4,5,5,4,4,4,4,2,1]
pd.Series(a).value_counts()
#find the most frequent label and use it as your predicted label for first testing point
pd.Series(KNNs).value_counts().index[0]
#create a for loop to go over every testing data and predict their labels
predict_Y=[]
for j in test_X:
distances=[]
for i in train_X:
distance=np.linalg.norm(j-i)
distances.append(distance)
KNN_index=np.argsort(distances)[:4]
KNNs=train_Y[KNN_index]
y=pd.Series(KNNs).value_counts().index[0]
predict_Y.append(y)
```
## Performance diagnosis
- **Accuracy** rate:
- formula: true predictions/total sample size
- **Precision** rate:
- formula: true positive/predicted positive
- **macro**: calculate the precision of each label and get their means
- micro: sum up the number of true positive and get the total precision [= accuracy]
- **Recall** rate:
- formula: true positive/real positive
- **macro**
- micro
- **F1** score
<img src='https://wikimedia.org/api/rest_v1/media/math/render/svg/057ffc6b4fa80dc1c0e1f2f1f6b598c38cdd7c23'>
```
diagnosis=pd.crosstab(np.array(predict_Y),test_Y, rownames=['predict'], colnames=['real'])
diagnosis
np.diagonal(diagnosis) #extract numbers on diagnonal
#Metric1: ACC
accuracy_rate=sum(np.diagonal(diagnosis))/len(predict_Y)
accuracy_rate
#Metric2: precision
precisions=np.diagonal(diagnosis)/np.sum(diagnosis,axis=1) #diagnoals in row sums
precision_rate=np.mean(precisions)
precision_rate
#Metric3: recall
recalls=np.diagonal(diagnosis)/np.sum(diagnosis,axis=0) #diagnoals in col sums
recall_rate=np.mean(recalls)
recall_rate
#Metric4: F1
f1s=2*precisions*recalls/(precisions+recalls)
F1=np.mean(f1s)
F1
print(accuracy_score(test_Y, predict_Y))
print(precision_score(test_Y, predict_Y, average='macro'))
print(recall_score(test_Y, predict_Y, average='macro'))
print(f1_score(test_Y, predict_Y, average='macro'))
print(accuracy_score(test_Y, predict_Y))
print(precision_score(test_Y, predict_Y, average='micro'))
print(recall_score(test_Y, predict_Y, average='micro'))
print(f1_score(test_Y, predict_Y, average='micro'))
```
## Practice
Set K=6. Please apply KNN technique to digit data:
1. Split the data into training set (1000 samples) and testing set (797 samples).
2. Apply KNN
3. Report model performance metrics (accuracy, precision, recall, f1)
```
from sklearn.datasets import load_digits
digits = load_digits()
digits.data.shape
#WRITE YOUR CODE HERE
train_index=np.random.choice(range(1797),1000,replace=False)
train_X=digits.data[train_index] #extract 100 data records as our training data
train_Y=digits.target[train_index]
test_index=[i for i in range(1797) if i not in train_index]
test_X=digits.data[test_index]
test_Y=digits.target[test_index]
predict_Y=[]
for j in test_X:
distances=[]
for i in train_X:
distance=np.linalg.norm(j-i)
distances.append(distance)
KNN_index=np.argsort(distances)[:6]
KNNs=train_Y[KNN_index]
y=pd.Series(KNNs).value_counts().index[0]
predict_Y.append(y)
len(predict_Y)
print(accuracy_score(test_Y, predict_Y))
print(precision_score(test_Y, predict_Y, average='macro'))
print(recall_score(test_Y, predict_Y, average='macro'))
print(f1_score(test_Y, predict_Y, average='macro'))
labels=['dem','rep','dem','rep','dem','dem']
unqiue_labels=list(np.unique(labels))
labels_num=[]
for label in labels:
labels_num.append(unqiue_labels.index(label))
labels_num
```
## Parameter Tuning
- Purpose: find the best parameter
- For KNN, the only parameter is K
```
#Wrap previous lines into a function
def KNN(K,train_X,train_Y,test_X,test_Y):
predict_Y=[]
for j in test_X:
distances=[]
for i in train_X:
distance=np.linalg.norm(j-i)
distances.append(distance)
KNN_index=np.argsort(distances)[:K]
KNNs=train_Y[KNN_index]
y=pd.Series(KNNs).value_counts().index[0]
predict_Y.append(y)
predict_Y=np.array(predict_Y)
return(predict_Y)
f1s=[]
for K in range(1,10):
predict_Y=KNN(K,train_X,train_Y,test_X,test_Y)
f1s.append(f1_score(test_Y, predict_Y, average='macro'))
f1s #K=1, nearest neighbor can inform us more accurately about labels
movie_df=pd.read_csv('doc/movies.csv')
movie_df.head()
len(np.unique(movie_df['Genre']))
train_index=np.random.choice(range(movie_df.shape[0]),20,replace=False)
train_X=movie_df.iloc[train_index,2:].get_values()
train_Y=movie_df.iloc[train_index,1].get_values()
test_index=[i for i in range(movie_df.shape[0]) if i not in train_index]
test_X=movie_df.iloc[test_index,2:].get_values()
test_Y=movie_df.iloc[test_index,1].get_values()
KNN(1,train_X,train_Y,test_X,test_Y)
test_Y
```
---
## Break
---
## Decision Tree
<img src='https://cdn-images-1.medium.com/max/900/1*XMId5sJqPtm8-RIwVVz2tg.png' width='300px' align='left'>
- Decision Tree is a non-parametric supervised learning method.
- [Advanced] Random Forest
- STEP 1: Calculate the entropy of labels
- STEP 2: Choose a variable and split the dataset along that variable
- STEP 3: Calculate the entropy of splitting
- STEP 4: Repeat STEP 2-3 for all variables
- STEP 5: Find the variable with greatest information gain
- STEP 6: Add it to the root
- STEP 7: Repeat STEP 2-6 for all variables
- STEP 8: Build up the tree
### Entropy
- Entropy: Information load
- formula: <img src='https://wikimedia.org/api/rest_v1/media/math/render/svg/7de5d59a442f5305853d4392826b1f51dc43f6d0' width='200px'>
```
def entropy(data):
freq=pd.Series(data).value_counts()
freq=freq/sum(freq)
H=sum(-freq*np.log2(freq))
return(H)
a=[2,2,2,2,2,2,2,2] #one group
b=[1,1,1,1,2,2,2,2] #two balanced group
c=[1,2,2,2,2,2,2,2] #two unbalanced group
Ha=entropy(a)
Hb=entropy(b)
Hc=entropy(c)
print(Ha)
print(Hb)
print(Hc)
#to what degree one variable can inform us about another variable
var1=['red','blue','red','blue']
var2=[0,1,1,1]
df=pd.DataFrame({'color':var1,'number':var2})
df
#split the data into two parts and check the entropy of 'number'
H1=entropy(df[df['color']=='red']['number'])
H1
H2=entropy(df[df['color']=='blue']['number']) #exteremly certain
H2
H=H1+H2 #integrated entropy for two groups
H
iris_df=pd.DataFrame(iris.data,columns=iris.feature_names)
iris_df['label']=iris.target
iris_df.head()
#Use threshold of sepal length at 5.0 first and find out the entropy of this splitting
threshold=5
H1=entropy(iris_df[iris_df['sepal length (cm)']>=threshold]['label'])
H2=entropy(iris_df[iris_df['sepal length (cm)']<threshold]['label'])
H1+H2
iris_df.iloc[:,0] #reference by column index, rather than column name
#Find out the threshold that can produce minimal entropy
def best_threshold(data,col): #col is the column index that you want to apply threshold searching
thresholds=np.unique(data.iloc[:,col]) #threshold candidates
Hs=[]
for threshold in thresholds:
#WRITE YOUR CODE HERE
best_threshold= #best threshold is the one with minimal entropy
min_Hs=min(Hs)
return(best_threshold,min_Hs)
#run over all columns and find out the best feature with smallest entropy
Hs=[]
thresholds=[]
for i in range(4):
threshold,H=best_threshold(iris_df,i)
thresholds.append(threshold)
Hs.append(H)
Hs
np.argmin(Hs) #the root feature is petal length as it has the smallest entropy -> it can inform us about labels at most
def best_feature(data):
Hs=[]
thresholds=[]
for i in range(4):
threshold,H=best_threshold(data,i)
thresholds.append(threshold)
Hs.append(H)
return(np.argmin(Hs),thresholds[np.argmin(Hs)])
```
## Build up Decision Tree
- RULE: Assign the feature according to their entropy in ascending order
- Create a dictionary about Tree:
- Four elements
- 'col' is the column index used at current level
- 'threshold' is its threshold
- 'greater' contains a dictionary about its child leaf for data points greater than the threshold
- 'smaller' contains a dictionary about its child leaf for data points smaller than the threshold
>```python
{'col':2,
'threshold':thresholds[2],
'larger':{ #containing a dictionary for child leaf where data points greater than threshold will go
'col': ...,
'threshold': ...,
'larger': ...,
'smaller': ...
},
'smaller':{ #containing a dictionary for child leaf where data points smaller than threshold will go
'col': ...,
'threshold': ...,
'larger': ...,
'smaller': ...
}
}
```
```
def create_tree(data,max_group_size):
if len(np.unique(data['label']))>1 and data.shape[0]>max_group_size:
feature,threshold=best_feature(data)
feature_name=data.columns[feature]
tree_dict={'col':feature,'threshold':threshold}
#split dataset into two parts: larger and smaller than threshold
#this is the larger part
subset1=data[data[feature_name]>=threshold]
if subset1.shape[0] == data.shape[0]: #check whether dataset is really split into two part
return(data['label'].value_counts().index[0])
tree_dict['larger']=create_tree(subset1,max_group_size)
#this is the smaller part
subset2=data[data[feature_name]<threshold]
if subset2.shape[0] == data.shape[0]:
return(data['label'].value_counts().index[0])
tree_dict['smaller']=create_tree(subset2,max_group_size)
return(tree_dict)
else:
return(data['label'].value_counts().index[0])
train_iris=iris_df.sample(n=100)
test_iris=iris_df.sample(n=50)
tree_dict=create_tree(train_iris,50)
tree_dict
tree_dict=create_tree(train_iris,30)
tree_dict
def apply_tree(point,tree,predicts):
if point[tree['col']]>=tree['threshold']:
if np.isscalar(tree['larger']):
predicts.append(tree['larger'])
else:
apply_tree(point,tree['larger'],predicts)
else:
if np.isscalar(tree['smaller']):
predicts.append(tree['smaller'])
else:
apply_tree(point,tree['smaller'],predicts)
predicts=[]
for i in range(test_iris.shape[0]):
apply_tree(test_iris.iloc[i],tree_dict,predicts)
accuracy_score(predicts,test_iris['label'])
```
| github_jupyter |
```
!git clone https://github.com/adobe-research/deft_corpus.git
!unzip src.zip
```
# Loading The Data
```
from source.data_loader import DeftCorpusLoader
loader = DeftCorpusLoader('deft_corpus/data')
train_df, dev_df = loader.load_classification_data()
train_df.head()
```
# Imports
```
import pandas as pd
import numpy as np
from tqdm import tqdm
from gensim.models import Doc2Vec
from gensim.models import Word2Vec
from sklearn import utils
from sklearn.model_selection import train_test_split
import gensim
from sklearn.linear_model import LogisticRegression
from gensim.models.doc2vec import TaggedDocument
import re
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.layers import LSTM, Bidirectional
from tensorflow.keras.layers import Embedding
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Activation, GlobalMaxPooling1D, Dropout, Flatten
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
```
# Data Preprocessing
```
loader.preprocess_data(train_df)
loader.clean_data(train_df)
loader.preprocess_data(dev_df)
loader.clean_data(dev_df)
train_df.head()
vocab = np.unique([y for x in train_df['Parsed'] for y in x])
vocab_size = len(vocab)
max_length = np.max([np.count_nonzero(x) for x in train_df['Parsed']])
avg_length = int(np.ceil(np.average([np.count_nonzero(x) for x in train_df['Parsed']])))
len(train_df['HasDef'])
train_positive_class_length = np.count_nonzero([x for x in train_df['HasDef'] if x == 1])
train_negative_class_length = np.abs(len(train_df['HasDef']) - train_positive_class_length)
MAX_NB_WORDS = vocab_size # max no. of words for tokenizer
MAX_SEQUENCE_LENGTH = avg_length # max length of each entry (sentence), including padding
EMBEDDING_DIM = 100 # embedding dimensions for word vectors (word2vec/GloVe)
GLOVE_DIR = "glove.6B."+str(EMBEDDING_DIM)+"d.txt"
```
# LSTM
Long short-term memory (LSTM) is an artificial recurrent neural network (RNN) architecture. A common LSTM unit is composed of a cell, an input gate, an output gate and a forget gate. The cell remembers values over arbitrary time intervals and the three gates regulate the flow of information into and out of the cell.
LSTM networks are well-suited to classifying, processing and making predictions based on time series data, since there can be lags of unknown duration between important events in a time series.
Intuitively, the cell is responsible for keeping track of the dependencies between the elements in the input sequence. The input gate controls the extent to which a new value flows into the cell, the forget gate controls the extent to which a value remains in the cell and the output gate controls the extent to which the value in the cell is used to compute the output activation of the LSTM unit. The activation function of the LSTM gates is often the logistic sigmoid function.

## Data Preprocessing for Model
To use keras and make a model with an embedding layer as an input layer we need each word in the vocab to be represented by a number so the Tokenizer class is used. The input, also, has to be a vector of numbers and that vector has to be of the same size for all the documents. The average length is taken and extra words are removed form the vectors and padding is used if the length is less than the average.
```
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(train_df['Parsed'])
train_sequences = tokenizer.texts_to_sequences(train_df['Parsed'])
word_index = tokenizer.word_index
trunc_type = 'post'
padding_type = 'post'
train_padded = pad_sequences(train_sequences, maxlen=avg_length, padding=padding_type, truncating=trunc_type)
print(len(train_sequences[0]))
print(len(train_padded[0]))
print(len(train_sequences[1]))
print(len(train_padded[1]))
print(len(train_sequences[10]))
print(len(train_padded[10]))
print(train_padded[10])
dev_sequences = tokenizer.texts_to_sequences(dev_df['Parsed'])
dev_padded = pad_sequences(dev_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print(len(dev_sequences))
print(dev_padded.shape)
```
## Word2Vec Model
The first approach to use with the LSTM is Word2Vec. It's used as the pretrained, freezed embeddings to the embeddings layer. Firstly, the word2vec continous bag of words model is trained on the train dataset. The embeddings of the dataset's vocab are added to the embedding layer and freezed.
```
w2v_model = Word2Vec(size=100, min_count=2, window=5, iter=100)
w2v_model.build_vocab(train_df['Parsed'])
w2v_model.train(train_df['Parsed'], total_examples=w2v_model.corpus_count, epochs=w2v_model.epochs)
w2v_model.wv.init_sims(replace=True)
w2v_model.wv.syn0.shape
w2v_pretrained_weights = w2v_model.wv.syn0
vocab_size, emdedding_size = w2v_pretrained_weights.shape
print('Result embedding shape:', w2v_pretrained_weights.shape)
print('Checking similar words:')
for word in ['model', 'network', 'train', 'learn']:
most_similar = ', '.join('%s (%.2f)' % (similar, dist) for similar, dist in w2v_model.most_similar(word)[:8])
print(' %s -> %s' % (word, most_similar))
def word2idx(word):
return word_model.wv.vocab[word].index
def idx2word(idx):
return word_model.wv.index2word[idx]
```
## Glove Pretrained Embeddings
The second approach used with LSTM is using Glove's embeddings as the fixed embeddings of the embeddings layer.GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and the resulting representations showcase interesting linear substructures of the word vector space. The network is used with freezing the weights and with training them to experiment on the different results.
```
!wget http://nlp.stanford.edu/data/glove.6B.zip
!unzip glove.6B.zip
word, i = zip(*word_index.items())
embeddings_index = {}
f = open(GLOVE_DIR)
print('Loading GloVe from:', GLOVE_DIR,'...', end='')
for line in f:
values = line.split()
word = values[0]
embeddings_index[word] = np.asarray(values[1:], dtype='float32')
f.close()
print("Done.\n Proceeding with Embedding Matrix...", end="")
glove_embedding_matrix = np.random.random((len(word_index), EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
glove_embedding_matrix[i-1] = embedding_vector
print(" Completed!")
```
## Model Defintition
```
def get_lstm_embeddings_model(weights=[], weights_trainable=False):
model = Sequential()
if weights != []:
weights = [weights]
model.add(Embedding(input_dim=vocab_size, output_dim=EMBEDDING_DIM, weights=weights, trainable=weights_trainable))
model.add(Bidirectional(LSTM(units=EMBEDDING_DIM)))
model.add(Dense(vocab_size, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
validation_index = int(len(dev_padded)/2)
```
## Training Using Our Word2Vec Pretrained Weights
```
num_epochs = 3
word2VecLSTMModel = get_lstm_embeddings_model(w2v_pretrained_weights, True)
history = word2VecLSTMModel.fit(train_padded, train_df['HasDef'].values, epochs=num_epochs, validation_data=(dev_padded[:validation_index], dev_df['HasDef'].values[:validation_index]), verbose=2)
```
### Inference
```
from sklearn.metrics import classification_report
predicted_labels_word2vec_lstm = [1 if x>0.5 else 0 for x in word2VecLSTMModel.predict(dev_padded[validation_index:])]
print('Dev classification report:\n {}'.format(classification_report(dev_df['HasDef'].values[validation_index:], predicted_labels_word2vec_lstm)))
```
## Training Using Glove 6B 100 Dim Pretrained Weights
```
vocab_size = len(vocab)
```
### Trainable Weights
```
num_epochs = 3
glove_lstm_trainable_weights_model = get_lstm_embeddings_model(glove_embedding_matrix, True)
history = glove_lstm_trainable_weights_model.fit(train_padded, train_df['HasDef'].values, epochs=num_epochs, validation_data=(dev_padded[:validation_index], dev_df['HasDef'].values[:validation_index]), verbose=2)
```
#### Inference
```
from sklearn.metrics import classification_report
predicted_labels_glove_lstm = [1 if x>0.5 else 0 for x in glove_lstm_trainable_weights_model.predict(dev_padded[validation_index:])]
print('Dev classification report:\n {}'.format(classification_report(dev_df['HasDef'].values[validation_index:], predicted_labels_glove_lstm)))
```
### Non Trainable Weights
```
num_epochs = 3
glove_lstm_freezed_weights_model = get_lstm_embeddings_model(glove_embedding_matrix, False)
history = glove_lstm_freezed_weights_model.fit(train_padded, train_df['HasDef'].values, epochs=num_epochs, validation_data=(dev_padded[:validation_index], dev_df['HasDef'].values[:validation_index]), verbose=2)
```
#### Inference
```
from sklearn.metrics import classification_report
predicted_labels_glove_lstm_freezed_weights = [1 if x>0.5 else 0 for x in glove_lstm_freezed_weights_model.predict(dev_padded[validation_index:])]
print('Dev classification report:\n {}'.format(classification_report(dev_df['HasDef'].values[validation_index:], predicted_labels_glove_lstm_freezed_weights)))
```
| github_jupyter |
### *The Leading Edge*
# Machine learning contest 2016
**Welcome to an experiment!**
You mission, should you choose to accept it, is to make the best lithology prediction you can. We want you to try to beat the accuracy score Brendon Hall achieved in his Geophyscial Tutorial (TLE, October 2016).
First, read the [open access](https://en.wikipedia.org/wiki/Open_access) tutorial by Brendon in [the October issue of *The Leading Edge*](http://library.seg.org/toc/leedff/35/8).
Here's the text of that box again:
> I hope you enjoyed this month's tutorial. It picks up on a recent wave of interest in artificial intelligence approaches to prediction. I love that Brendon shows how approachable the techniques are — the core part of the process only amounts to a few dozen lines of fairly readable Python code. All the tools are free and open source, it's just a matter of playing with them and learning a bit about data science.
> In the blind test, Brendon's model achieves an accuracy of 43% with exact facies. We think the readers of this column can beat this — and we invite you to have a go. The repository at [github.com/seg/2016-ml-contest](http://github.com/seg/2016-ml-contest) contains everything you need to get started, including the data and Brendon's code. We invite you to find a friend or two or more, and have a go!
> To participate, fork that repo, and add a directory for your own solution, naming it after your team. You can make pull requests with your contributions, which must be written in Python, R, or Julia. We'll run them against the blind well — the same one Brendon used in the article — and update the leaderboard. You can submit solutions as often as you like. We'll close the contest at **23:59 UT on 31 January 2017**. There will be a goody bag of completely awesome and highly desirable prizes for whoever is at the top of the leaderboard when the dust settles. The full rules are in the repo.
> Have fun with it, and good luck!
## Now for the code
All the code and data to reproduce *everything* in that article is right here in this repository. You can read the code in a [Jupyter Notebook](http://jupyter.org/) here...
<div style="width:50%; margin: 12px 0px 6px 20px; padding: 8px; border: 2px solid darkblue; border-radius: 6px; font-size: 125%; background: #EEEEFF;">
[**Facies_classification.ipynb**](Facies_classification.ipynb)
</div>
See [the February issue of *The Leading Edge*](http://library.seg.org/doi/abs/10.1190/tle35020190.1) for Matt Hall's user guide to the tutorials; it explains how to run a Jupyter Notebook.
See **Running the notebook live** (below) for information on running that noteobook live right now this minute in your web browser.
## Entering the contest
- Find a friend or two or ten (optional) and form a team.
- To get a copy of the repo that you can make pull requests from (that is, notify us that you want to send us an entry to the contest), you need to [fork the repo](https://help.github.com/articles/fork-a-repo/)
- Use Jupyter Notebook (to make our life easy!) with Python, R, or Julia kernels, or write scripts and put them in the repo in a directory named after your team.
- When you have a good result, send it to us by [making a pull request](https://help.github.com/articles/about-pull-requests/).
- Everyone can see your entry. If you're not familiar with open source software, this might feel like a bug. It's not, it's a feature. If it's good, your contribution will improve others' results. Welcome to reproducible science!
## Running the notebook live
To make it even easier to try machine learning for yourself, you can launch this notebook on [**mybinder.org**](http://www.mybinder.org/) and run it there. You can load the data, change the code, and do everything... except enter the contest. Everything on your mybinder.org machine is **temporary**. If you make something awesome, be sure to use **File > Download as... > Notebook (.ipynb)** to save it locally. Then you can fork the repo in GitHub, add your new notebook, and make your pull request.
## Rules
We've never done anything like this before, so there's a good chance these rules will become clearer as we go. We aim to be fair at all times, and reserve the right to make judgment calls for dealing with unforeseen circumstances.
- You must submit your result as code and we must be able to run your code.
- The result we get with your code is the one that counts as your result.
- To make it more likely that we can run it, your code must be written in Python or R or Julia.
- The contest is over at 23:59:59 UT (i.e. midnight in London, UK) on 31 January 2017. Pull requests made aftetr that time won't be eligible for the contest.
- If you can do even better with code you don't wish to share fully, that's really cool, nice work! But you can't enter it for the contest. We invite you to share your result through your blog or other channels... maybe a paper in *The Leading Edge*.
- This document and documents it links to will be the channel for communication of the leading solution and everything else about the contest.
- This document contains the rules. Our decision is final. No purchase necessary. Please exploit artificial intelligence responsibly.
<hr />
© 2016 SEG, licensed CC-BY, please share this!
| github_jupyter |
### Deep Kung-Fu with advantage actor-critic
In this notebook you'll build a deep reinforcement learning agent for atari [KungFuMaster](https://gym.openai.com/envs/KungFuMaster-v0/) and train it with advantage actor-critic.

```
from __future__ import print_function, division
from IPython.core import display
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
# If you are running on a server, launch xvfb to record game videos
# Please make sure you have xvfb installed
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
%env DISPLAY = : 1
```
For starters, let's take a look at the game itself:
* Image resized to 42x42 and grayscale to run faster
* Rewards divided by 100 'cuz they are all divisible by 100
* Agent sees last 4 frames of game to account for object velocity
```
import gym
from atari_util import PreprocessAtari
def make_env():
env = gym.make("KungFuMasterDeterministic-v0")
env = PreprocessAtari(env, height=42, width=42,
crop=lambda img: img[60:-30, 5:],
dim_order='tensorflow',
color=False, n_frames=4,
reward_scale=0.01)
return env
env = make_env()
obs_shape = env.observation_space.shape
n_actions = env.action_space.n
print("Observation shape:", obs_shape)
print("Num actions:", n_actions)
print("Action names:", env.env.env.get_action_meanings())
s = env.reset()
for _ in range(100):
s, _, _, _ = env.step(env.action_space.sample())
plt.title('Game image')
plt.imshow(env.render('rgb_array'))
plt.show()
plt.title('Agent observation (4-frame buffer)')
plt.imshow(s.transpose([0, 2, 1]).reshape([42, -1]))
plt.show()
```
### Build an agent
We now have to build an agent for actor-critic training - a convolutional neural network that converts states into action probabilities $\pi$ and state values $V$.
Your assignment here is to build and apply a neural network - with any framework you want.
For starters, we want you to implement this architecture:

After you get above 50 points, we encourage you to experiment with model architecture to score even better.
```
import tensorflow as tf
tf.reset_default_graph()
sess = tf.InteractiveSession()
from keras.layers import Conv2D, Dense, Flatten
class Agent:
def __init__(self, name, state_shape, n_actions, reuse=False):
"""A simple actor-critic agent"""
with tf.variable_scope(name, reuse=reuse):
# Prepare neural network architecture
# Your code here: prepare any necessary layers, variables, etc.
# prepare a graph for agent step
self.state_t = tf.placeholder(
'float32', [None, ] + list(state_shape))
self.agent_outputs = self.symbolic_step(self.state_t)
def symbolic_step(self, state_t):
"""Takes agent's previous step and observation, returns next state and whatever it needs to learn (tf tensors)"""
# Apply neural network
# Your code here: apply agent's neural network to get policy logits and state values.
logits = <logits go here >
state_value = <state values go here >
assert tf.is_numeric_tensor(state_value) and state_value.shape.ndims == 1, \
"please return 1D tf tensor of state values [you got %s]" % repr(
state_value)
assert tf.is_numeric_tensor(logits) and logits.shape.ndims == 2, \
"please return 2d tf tensor of logits [you got %s]" % repr(logits)
# hint: if you triggered state_values assert with your shape being [None, 1],
# just select [:, 0]-th element of state values as new state values
return (logits, state_value)
def step(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.agent_outputs, {self.state_t: state_t})
def sample_actions(self, agent_outputs):
"""pick actions given numeric agent outputs (np arrays)"""
logits, state_values = agent_outputs
policy = np.exp(logits) / np.sum(np.exp(logits),
axis=-1, keepdims=True)
return np.array([np.random.choice(len(p), p=p) for p in policy])
agent = Agent("agent", obs_shape, n_actions)
sess.run(tf.global_variables_initializer())
state = [env.reset()]
logits, value = agent.step(state)
print("action logits:\n", logits)
print("state values:\n", value)
```
### Let's play!
Let's build a function that measures agent's average reward.
```
def evaluate(agent, env, n_games=1):
"""Plays an a game from start till done, returns per-game rewards """
game_rewards = []
for _ in range(n_games):
state = env.reset()
total_reward = 0
while True:
action = agent.sample_actions(agent.step([state]))[0]
state, reward, done, info = env.step(action)
total_reward += reward
if done:
break
game_rewards.append(total_reward)
return game_rewards
env_monitor = gym.wrappers.Monitor(env, directory="kungfu_videos", force=True)
rw = evaluate(agent, env_monitor, n_games=3,)
env_monitor.close()
print(rw)
# show video
from IPython.display import HTML
import os
video_names = list(filter(lambda s: s.endswith(
".mp4"), os.listdir("./kungfu_videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./kungfu_videos/"+video_names[-1])) # this may or may not be _last_ video. Try other indices
```
### Training on parallel games

To make actor-critic training more stable, we shall play several games in parallel. This means ya'll have to initialize several parallel gym envs, send agent's actions there and .reset() each env if it becomes terminated. To minimize learner brain damage, we've taken care of them for ya - just make sure you read it before you use it.
```
class EnvBatch:
def __init__(self, n_envs=10):
""" Creates n_envs environments and babysits them for ya' """
self.envs = [make_env() for _ in range(n_envs)]
def reset(self):
""" Reset all games and return [n_envs, *obs_shape] observations """
return np.array([env.reset() for env in self.envs])
def step(self, actions):
"""
Send a vector[batch_size] of actions into respective environments
:returns: observations[n_envs, *obs_shape], rewards[n_envs], done[n_envs,], info[n_envs]
"""
results = [env.step(a) for env, a in zip(self.envs, actions)]
new_obs, rewards, done, infos = map(np.array, zip(*results))
# reset environments automatically
for i in range(len(self.envs)):
if done[i]:
new_obs[i] = self.envs[i].reset()
return new_obs, rewards, done, infos
```
__Let's try it out:__
```
env_batch = EnvBatch(10)
batch_states = env_batch.reset()
batch_actions = agent.sample_actions(agent.step(batch_states))
batch_next_states, batch_rewards, batch_done, _ = env_batch.step(batch_actions)
print("State shape:", batch_states.shape)
print("Actions:", batch_actions[:3])
print("Rewards:", batch_rewards[:3])
print("Done:", batch_done[:3])
```
# Actor-critic
Here we define a loss functions and learning algorithms as usual.
```
# These placeholders mean exactly the same as in "Let's try it out" section above
states_ph = tf.placeholder('float32', [None, ] + list(obs_shape))
next_states_ph = tf.placeholder('float32', [None, ] + list(obs_shape))
actions_ph = tf.placeholder('int32', (None,))
rewards_ph = tf.placeholder('float32', (None,))
is_done_ph = tf.placeholder('float32', (None,))
# logits[n_envs, n_actions] and state_values[n_envs, n_actions]
logits, state_values = agent.symbolic_step(states_ph)
next_logits, next_state_values = agent.symbolic_step(next_states_ph)
next_state_values = next_state_values * (1 - is_done_ph)
# probabilities and log-probabilities for all actions
probs = tf.nn.softmax(logits) # [n_envs, n_actions]
logprobs = tf.nn.log_softmax(logits) # [n_envs, n_actions]
# log-probabilities only for agent's chosen actions
logp_actions = tf.reduce_sum(
logprobs * tf.one_hot(actions_ph, n_actions), axis=-1) # [n_envs,]
# compute advantage using rewards_ph, state_values and next_state_values
gamma = 0.99
advantage = # YOUR CODE
assert advantage.shape.ndims == 1, "please compute advantage for each sample, vector of shape [n_envs,]"
# compute policy entropy given logits_seq. Mind the sign!
entropy = # YOUR CODE
assert entropy.shape.ndims == 1, "please compute pointwise entropy vector of shape [n_envs,] "
actor_loss = - tf.reduce_mean(logp_actions *
tf.stop_gradient(advantage)) - 0.001 * tf.reduce_mean(entropy)
# compute target state values using temporal difference formula. Use rewards_ph and next_step_values
target_state_values = # YOUR CODE
critic_loss = tf.reduce_mean(
(state_values - tf.stop_gradient(target_state_values))**2)
train_step = tf.train.AdamOptimizer(1e-4).minimize(actor_loss + critic_loss)
sess.run(tf.global_variables_initializer())
# Sanity checks to catch some errors. Specific to KungFuMaster in assignment's default setup.
l_act, l_crit, adv, ent = sess.run([actor_loss, critic_loss, advantage, entropy], feed_dict={
states_ph: batch_states,
actions_ph: batch_actions,
next_states_ph: batch_states,
rewards_ph: batch_rewards,
is_done_ph: batch_done,
})
assert abs(l_act) < 100 and abs(l_crit) < 100, "losses seem abnormally large"
assert 0 <= ent.mean() <= np.log(
n_actions), "impossible entropy value, double-check the formula pls"
if ent.mean() < np.log(n_actions) / 2:
print("Entropy is too low for untrained agent")
print("You just might be fine!")
```
# Train
Just the usual - play a bit, compute loss, follow the graidents, repeat a few million times.

```
from IPython.display import clear_output
from tqdm import trange
from pandas import DataFrame
moving_average = lambda x, **kw: DataFrame(
{'x': np.asarray(x)}).x.ewm(**kw).mean().values
env_batch = EnvBatch(10)
batch_states = env_batch.reset()
rewards_history = []
entropy_history = []
for i in trange(100000):
batch_actions = agent.sample_actions(agent.step(batch_states))
batch_next_states, batch_rewards, batch_done, _ = env_batch.step(
batch_actions)
feed_dict = {
states_ph: batch_states,
actions_ph: batch_actions,
next_states_ph: batch_next_states,
rewards_ph: batch_rewards,
is_done_ph: batch_done,
}
batch_states = batch_next_states
_, ent_t = sess.run([train_step, entropy], feed_dict)
entropy_history.append(np.mean(ent_t))
if i % 500 == 0:
if i % 2500 == 0:
rewards_history.append(np.mean(evaluate(agent, env, n_games=3)))
if rewards_history[-1] >= 50:
print("Your agent has earned the yellow belt" % color)
clear_output(True)
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.plot(rewards_history, label='rewards')
plt.plot(moving_aewmaverage(np.array(rewards_history),
span=10), marker='.', label='rewards ewma@10')
plt.title("Session rewards")
plt.grid()
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(entropy_history, label='entropy')
plt.plot(moving_average(np.array(entropy_history),
span=1000), label='entropy ewma@1000')
plt.title("Policy entropy")
plt.grid()
plt.legend()
plt.show()
```
Relax and grab some refreshments while your agent is locked in an infinite loop of violence and death.
__How to interpret plots:__
The session reward is the easy thing: it should in general go up over time, but it's okay if it fluctuates ~~like crazy~~. It's also OK if it reward doesn't increase substantially before some 10k initial steps. However, if reward reaches zero and doesn't seem to get up over 2-3 evaluations, there's something wrong happening.
Since we use a policy-based method, we also keep track of __policy entropy__ - the same one you used as a regularizer. The only important thing about it is that your entropy shouldn't drop too low (`< 0.1`) before your agent gets the yellow belt. Or at least it can drop there, but _it shouldn't stay there for long_.
If it does, the culprit is likely:
* Some bug in entropy computation. Remember that it is $ - \sum p(a_i) \cdot log p(a_i) $
* Your agent architecture converges too fast. Increase entropy coefficient in actor loss.
* Gradient explosion - just [clip gradients](https://stackoverflow.com/a/43486487) and maybe use a smaller network
* Us. Or TF developers. Or aliens. Or lizardfolk. Contact us on forums before it's too late!
If you're debugging, just run `logits, values = agent.step(batch_states)` and manually look into logits and values. This will reveal the problem 9 times out of 10: you'll likely see some NaNs or insanely large numbers or zeros. Try to catch the moment when this happens for the first time and investigate from there.
### "Final" evaluation
```
env_monitor = gym.wrappers.Monitor(env, directory="kungfu_videos", force=True)
final_rewards = evaluate(agent, env_monitor, n_games=20,)
env_monitor.close()
print("Final mean reward:", np.mean(final_rewards))
video_names = list(filter(lambda s: s.endswith(
".mp4"), os.listdir("./kungfu_videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./kungfu_videos/"+video_names[-1]))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./kungfu_videos/"+video_names[-2])) # try other indices
# if you don't see videos, just navigate to ./kungfu_videos and download .mp4 files from there.
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
### Now what?
Well, 5k reward is [just the beginning](https://www.buzzfeed.com/mattjayyoung/what-the-color-of-your-karate-belt-actually-means-lg3g). Can you get past 200? With recurrent neural network memory, chances are you can even beat 400!
| github_jupyter |
# OpenCV Filters Webcam
In this notebook, several filters will be applied to webcam images.
Those input sources and applied filters will then be displayed either directly in the notebook or on HDMI output.
To run all cells in this notebook a webcam and HDMI output monitor are required.
## 1. Start HDMI output
### Step 1: Load the overlay
```
from pynq.overlays.base import BaseOverlay
from pynq.lib.video import *
base = BaseOverlay("base.bit")
```
### Step 2: Initialize HDMI I/O
```
# monitor configuration: 640*480 @ 60Hz
Mode = VideoMode(640,480,24)
hdmi_out = base.video.hdmi_out
hdmi_out.configure(Mode,PIXEL_BGR)
hdmi_out.start()
```
## 2. Applying OpenCV filters on Webcam input
### Step 1: Specify webcam resolution
```
# camera (input) configuration
frame_in_w = 640
frame_in_h = 480
```
### Step 2: Initialize camera from OpenCV
```
import cv2
videoIn = cv2.VideoCapture(0)
videoIn.set(cv2.CAP_PROP_FRAME_WIDTH, frame_in_w);
videoIn.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_in_h);
print("capture device is open: " + str(videoIn.isOpened()))
```
### Step 3: Send webcam input to HDMI output
```
import numpy as np
ret, frame_vga = videoIn.read()
if (ret):
outframe = hdmi_out.newframe()
outframe[:] = frame_vga
hdmi_out.writeframe(outframe)
else:
raise RuntimeError("Error while reading from camera.")
```
### Step 4: Edge detection
Detecting edges on webcam input and display on HDMI out.
```
import time
num_frames = 20
readError = 0
start = time.time()
for i in range (num_frames):
# read next image
ret, frame_vga = videoIn.read()
if (ret):
outframe = hdmi_out.newframe()
laplacian_frame = cv2.Laplacian(frame_vga, cv2.CV_8U, dst=outframe)
hdmi_out.writeframe(outframe)
else:
readError += 1
end = time.time()
print("Frames per second: " + str((num_frames-readError) / (end - start)))
print("Number of read errors: " + str(readError))
```
### Step 5: Canny edge detection
Detecting edges on webcam input and display on HDMI out.
Any edges with intensity gradient more than maxVal are sure to be edges and those below minVal are sure to be non-edges, so discarded. Those who lie between these two thresholds are classified edges or non-edges based on their connectivity. If they are connected to “sure-edge” pixels, they are considered to be part of edges. Otherwise, they are also discarded. As we only need a single output channel reconfigure the HDMI output to work in grayscale mode. This means that our output frame is in the correct format for the edge detection algorith,
```
num_frames = 20
Mode = VideoMode(640,480,8)
hdmi_out = base.video.hdmi_out
hdmi_out.configure(Mode,PIXEL_GRAY)
hdmi_out.start()
start = time.time()
for i in range (num_frames):
# read next image
ret, frame_webcam = videoIn.read()
if (ret):
outframe = hdmi_out.newframe()
cv2.Canny(frame_webcam, 100, 110, edges=outframe)
hdmi_out.writeframe(outframe)
else:
readError += 1
end = time.time()
print("Frames per second: " + str((num_frames-readError) / (end - start)))
print("Number of read errors: " + str(readError))
```
### Step 6: Show results
Now use matplotlib to show filtered webcam input inside notebook.
```
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
frame_canny = cv2.Canny(frame_webcam, 100, 110)
plt.figure(1, figsize=(10, 10))
frame_vga = np.zeros((480,640,3)).astype(np.uint8)
frame_vga[:,:,0] = frame_canny
frame_vga[:,:,1] = frame_canny
frame_vga[:,:,2] = frame_canny
plt.imshow(frame_vga)
plt.show()
```
### Step 7: Release camera and HDMI
```
videoIn.release()
hdmi_out.stop()
del hdmi_out
```
Copyright (C) 2020 Xilinx, Inc
| github_jupyter |
# Module 2 Tutorial 1
There are numerous open-source libraries, collections of functions, that have been developed in Python that we will make use of in this course.
The first one is called NumPy and you can find the documentation [here](https://numpy.org/). It is one of the most widely-used libraries for scientific computating in python. The second library we will use will be a module from Scipy, called scipy.stats ([scipy.stats documentation](https://docs.scipy.org/doc/scipy/reference/stats.html)), and the third is a library for handling database-like structures called Pandas for which you can find the documentation at this link: [Pandas documentation](https://pandas.pydata.org/docs/user_guide/index.html).
We import the libraries with the following statement:
```
import numpy
from scipy import stats
import pandas
```
Now we will start building our toolbox with some simple tools to describe our data:
## Confidence Intervals and Descriptive Statistics
In this module of the course one of the first things that is covered is confidence intervals. As we only have access to samples of data we assume that neither the population mean or the population standard deviation are known and we work with point estimates, sample mean, and sample standard deviation (also called standard error).
To build a confidence interval we must specify a confidence level and provide the sample of our data.
Below is a simple function to obtain the confidence interval of your sample.
```
def get_confidence_interval(data, confidence=0.95):
""" Determines the confidence interval for a given set of data,
assuming the population standard deviation is not known.
Args: # 'arguments', or inputs to the function
data (single-column or list): The data
confidence (float): The confidence level on which to produce the interval.
Returns:
c_interval (tuple): The confidence interval on the given data (lower, upper).
"""
n = len(data) # determines the sample size
m = numpy.mean(data) # obtains mean of the sample
se = stats.sem(data) # obtains standard error of the sample
c_interval = stats.t.interval(confidence, n-1, m, se) # determines the confidence interval
return c_interval # which is of the form (lower bound, upper bound)
```
We can walk through the function above:
The name of the function is *get_confidence_interval* and the function takes two arguments, the first is the sample that you are interested in calculating the confidence interval for, and the second is the desired confidence level. The second argument is optional and will default to 95% if not specified. 95% is a very typical confidence level used in most applications.
Inside the function we first obtain *n*, the sample size. Then we calculate the sample mean using the numpy.mean function ([numpy.mean documentation](https://numpy.org/doc/stable/reference/generated/numpy.mean.html)), and the sample standard error with the scipy.stats.sem function ([scipy.stats.mean documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sem.html)).
Finally, we calculate the confidence interval using the scipy.stats.t.interval function ([scipy.stats.t documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.t.html)), this function needs the desired confidence level, the degrees of freedom, the sample mean, and the standard error, in order to calculate the upper and lower bounds of the confidence interval.
Let us illustrate this function with Example 12.6 from the course book: in this example both $\mu$ and $\sigma$, the population parameters are unknown. The sample data is given as {1, 0, 5} and the question asks for the 99% confidence interval for $\mu$ assuming a normally distributed population.
This is easily calculated using the function we defined above:
```
sample_data = [1, 0, 5]
get_confidence_interval(sample_data, confidence=0.99)
```
Another tool that could be useful in order to help us understand our data is provided by the pandas library. The .describe() function produces a statistical description of our sample. In order to call this function however our data needs to be in a pandas.Series or pandas.DataFrame object, and we need a column for each group we want to describe.
Let's say we have some data stored in two columns labeled "Day" and "Temperature", which contains the temperature readings at 6 set times over the course of two different days:
```
sample_dataframe = pandas.DataFrame(
{
"Day": ["Day 1"]*6 + ["Day 2"]*6,
"Temperature": [15, 17, 19, 19, 18, 16, 14, 15, 18, 19, 21, 18]
}
)
```
The DataFrame looks like this:
```
sample_dataframe
```
Now, because we created the DataFrame we know what is in it. But if we didn't, we could ask what the columns are:
```
column_names = sample_dataframe.columns
column_names
```
We can assign these column names as either independent or dependent to help us keep track of what we're doing:
```
independent_col = column_names[0]
dependent_col = column_names[1]
```
And we can also see how many values our independent variable takes and what they are by using the pandas.unique function:
```
independent_variable_values = pandas.unique(sample_dataframe[independent_col])
independent_variable_values
```
Now, we want to separate the samples corresponding to each independent variable and obtain a statistical description of them.
We do this as follows:
```
dependent_variable_data = pandas.DataFrame(columns=[day for day in independent_variable_values])
```
This is equivalent to pandas.DataFrame(columns=["Day 1", "Day 2"]), but the code in the block above would automatically create an additional column for any more days added to the dataset.
Right now the DataFrame looks like this:
```
dependent_variable_data
```
Let's put the correct data into it now.
It looks complicated, but don't worry too much.
If we unpack the lines below:
1. sample_dataframe[dependent_col] selects the data in the dependent variable column.
2. [sample_dataframe[independent_col]==independent_variable_values[0]] selects all the data where independent_col (Day) is equal to a specific value, independent_variable_values[0], ('Day 1').
3. The final .reset_index(drop) ensures that the selected data does not retain a label showing its index in the original file.
```
dependent_variable_data["Day 1"] = sample_dataframe[dependent_col][sample_dataframe[independent_col]==independent_variable_values[0]].reset_index(drop=True)
dependent_variable_data["Day 2"] = sample_dataframe[dependent_col][sample_dataframe[independent_col]==independent_variable_values[1]].reset_index(drop=True)
```
Just to be clear, the following is equivalent, but less general:
```
dependent_variable_data["Day 1"] = sample_dataframe["Temperature"][sample_dataframe["Day"]=="Day 1"].reset_index(drop=True)
dependent_variable_data["Day 2"] = sample_dataframe["Temperature"][sample_dataframe["Day"]=="Day 2"].reset_index(drop=True)
```
The data now looks like this:
```
dependent_variable_data
```
We can now request a statistical description of each column from our dataset:
```
sample_statistics = dependent_variable_data.describe()
sample_statistics
```
And what we see returned is the sample size, the mean of our sample, the standard deviation (which is not of great use, can you explain why?), the minumum, maximum, and different percentiles. We can access the different information from each column by name or by index:
```
print(sample_statistics["Day 1"]["mean"])
print(sample_statistics["Day 1"][1])
```
We can now move on to the next part of module 1.
## Hypothesis Testing
We have already constructed a set of sample data to test our functions with, we have one indepedendent variable (Day) which takes on two different values (Day 1 and Day 2), and we have the temperature on those days as our dependent variables.
The question we could ask now is, is the mean temperature of the two days statistically different? We can write this as a hypothesis test:
$H_0 : \mu_1 - \mu_2 = 0$\
$H_1 : \mu_1 - \mu_2 \neq 0$
The independent sample t-test can be used to test this hypothesis. But, an underlying assumption of the independent samples t-test is that the two populations being compared have equal variances.
The test for equal variance can be written as another hypothesis test and is commonly called the Levene test:
$H_0 : \sigma^2_1 - \sigma^2_2 = 0$\
$H_1 : \sigma^2_1 - \sigma^2_2 \neq 0$
So let's add to our toolbox again.
This time we are not writing our own function immediately, but first using a function from the stats library. This library includes the function stats.levene which performs the levene test.
The Levene test function takes the data from each group and returns an $F$ and $p$ value. Depending on the desired significance level, we can then accept or reject the Levene test.
```
stats.levene(dependent_variable_data["Day 1"], dependent_variable_data["Day 2"])
```
If our significance level, $\alpha$, is 0.05 (a common value), we can observe that in this case the $p$ value is larger than $\alpha$ and so we fail to reject the null hypothesis: we cannot statistically discount the possibility that the two samples have equal variance at this level of significance.
We now want to adress the question of equal means. Let's add the t-test to our toolbox:
```
def t_test(data_group1, data_group2, confidence=0.95):
alpha = 1-confidence
if stats.levene(data_group1, data_group2)[1]>alpha:
equal_variance = True
else:
equal_variance = False
t, p = stats.ttest_ind(data_group1, data_group2, equal_var = equal_variance)
reject_H0 = "True"
if p > alpha:
reject_H0 = "False"
return({'t': t, "p": p, "Reject H0": reject_H0})
```
Our function to perform the $t$-test is called "t-test" and it takes three possible inputs:
1. The data of the first column (or group) that would correspond to $\mu_1$ in the hypothesis test,
2. The data of the second column (or group) that would correspond to $\mu_2$ in the hypothesis test, and finally,
3. The desired confidence level, this will default to 95% if not specified.
Inside the function, the confidence level is used to determine the $\alpha$ value which is the significance level for the $t$-test.\
Then, the Levene test (which we discussed previously) is run to determine if the two groups have equal variance or not. This is done because the function that performs the $t$-test, [stats.ttest_ind](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html), needs this as an input; it modifies the calculations based on whether or not the two groups have equal variance.
So, after the Levene test we calculate the $t$ value and $p$ value of the $t$-test. The inputs to "stats.ttest_ind" are the data for the first group, the data for the second group, and the results of the Levene test.
Finally, we check if $p$ is larger than our desired significance level.
Let us illustrate this for our previous temperature dataset:
```
t_test(dependent_variable_data["Day 1"], dependent_variable_data["Day 2"], confidence=0.95)
```
The outputs from our function are the $t$ value, the $p$ value, and whether or not we accept the null hypothesis.
We see that because the $p$ value is larger than our confidence level, we fail to reject the null hypothesis.
| github_jupyter |
```
import intake
import xarray as xr
# open ESMCol catalog
cat_url = "https://raw.githubusercontent.com/NCAR/intake-esm-datastore/master/catalogs/pangeo-cmip6.json"
col = intake.open_esm_datastore(cat_url)
grid_label = 'gn' # for model native grid
#grid_label = 'gr' # for regridded data
# search and display data
cat = col.search(activity_id='CMIP',
experiment_id=['historical','ssp126', 'ssp370','ssp245','ssp585'],
table_id='Amon', variable_id='tas', grid_label=grid_label)
len(cat.df)
dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True})
# these are all of the different models we found
list(dset_dict.keys())
# just operate on one of these
ds = dset_dict['CMIP.BCC.BCC-CSM2-MR.historical.Amon.gn']
ds
# our cities
# cmip data use 0-360 lons
cities = {'New York': (40.7128, 360-74.0060),
'Paris': (48.8566, 2.3522),
'Hong Kong': (22.3193, 114.1694)}
# as xarray dataset
cities_ds = xr.Dataset({'lon': ('city', [v[1] for v in cities.values()]),
'lat': ('city', [v[0] for v in cities.values()])},
coords={'city': list(cities.keys())})
cities_ds
tas_on_cities = ds.tas.interp(lon=cities_ds.lon, lat=cities_ds.lat)
tas_on_cities
# this takes time because the full dataset has to be read
# because the data are chunked contiguously in space
# to speed this up, we could store the "cities" data in its own zarr store on the cloud
%time tas_on_cities.load()
tas_pentad_mean = tas_on_cities.resample(time='5AS').mean()
tas_pentad_mean.plot.line(x='time', col='city')
# now do that for a couple of different models
def load_and_resample_timeseries(ds):
# maybe fix poorly named coordinates
if 'latitude' in ds.dims:
ds = ds.rename({'latitude': 'lat', 'longitude': 'lon'})
tas_on_cities = ds.tas.interp(lon=cities_ds.lon, lat=cities_ds.lat)
return tas_on_cities.resample(time='5AS').mean()
from dask.distributed import Client
from dask_kubernetes import KubeCluster
cluster = KubeCluster()
cluster.adapt(minimum=1, maximum=10)
client = Client(cluster)
cluster
import dask
all_results = [load_and_resample_timeseries(ds) for ds in dset_dict.values()]
all_results_computed = dask.compute(*all_results)
from matplotlib import pyplot as plt
for model, ds_model in zip(dset_dict.keys(), all_results_computed):
plt.figure()
ds_model.plot.line(x='time', col='city')
plt.suptitle(model)
# can we concatenate the datasets across the model dimension - NO!
ds_all = xr.concat(all_results_computed, dim='source_id')
ds_all
```
The models are on different time calendars. There is no way to merge them without ignoring fundamental differences in the way they represent time. One way to do this would be to just overwrite the time with an integer year.
```
ds.time.dt.year
fig, ax = plt.subplots()
for model, ds_model in zip(dset_dict.keys(), all_results_computed):
ds_model.coords['year'] = ds_model.time.dt.year
ds_model = ds_model.swap_dims({'time': 'year'})
ds_model.sel(city='New York').mean(dim='member_id').plot(ax=ax, label=model)
plt.legend()
```
| github_jupyter |
Want, the ability to generate various tensors and measure their properties.
Pretty much, want to do unsupervised learning of matrices and tensors.
Properties are defined not by their values but by how they can be composed, transformed, ... Not sure how to make this happen, but !!!.
What are we generating based on?
- Structure/symmetry?
- How it transforms a space?
- Topology of a TN?
- ?
Is it possible to construct a toeplitz matrix with a tensor-network?
***
Toeplitz (can be used to encode convolutions), circulant (?) and hankel (can be used to encode automata!?).
Orthogonal, stochastic, orthonormal,
Block, block-diagonal (used to encode independent systems?), ?
Hermitian,
https://en.wikipedia.org/wiki/List_of_matrices
## Approaches to TN contraction
* Boundary conditions!?!?
* Algebraically contractible tensor networks
*
https://en.wikipedia.org/wiki/Multigraph
https://en.wikipedia.org/wiki/Hypergraph
> As far as hypergraphs are concerned, a hypergraph is equivalent to a bi-partite graph, with two different set of nodes. One represents the regular nodes, the other a set of edges
http://20bits.com/article/graph-theory-part-ii-linear-algebra
http://www.math.utah.edu/~gustafso/s2017/2270/projects-2017/dylanJohnson/Dylan%20Johnson%20Graph%20Theory%20and%20Linear%20Algebra.pdf
```
import numpy as np
import numpy.random as rnd
import networkx as nx
class TreeTN():
# https://arxiv.org/pdf/1710.04833.pdf
def construct():
pass
# cool. I can construct one. But how can I make a fast kernel for calculating it...
# that is what i need tensor-comprehension and/or xla and/or hand-written c/cuda.
# need to define matrix vector ops (and others) that can take advantage of the structure.
m = rnd.randint(0, 2, (10,10))
m
np.sum(m, axis=0)
np.sum(m, axis=1)
g = nx.from_numpy_matrix(m)
g = nx.convert_node_labels_to_integers(g)
nx.draw(g, pos=nx.spring_layout(g))
import scipy.linalg as lin
m = lin.circulant([0,1,0,0,0,0,1,1])
print(m)
g = nx.from_numpy_matrix(m)
g = nx.convert_node_labels_to_integers(g)
nx.draw(g, pos=nx.spring_layout(g))
m = lin.toeplitz([0,1,0,0,0,0,0,1], [0,1,0,1,1,1,0,1])
print(m)
g = nx.from_numpy_matrix(m)
g = nx.convert_node_labels_to_integers(g)
nx.draw(g, pos=nx.spring_layout(g))
m = lin.hankel([0,1,0,0,0,0,0,1], [0,1,0,1,1,0,0,1])
print(m)
g = nx.from_numpy_matrix(m)
g = nx.convert_node_labels_to_integers(g)
nx.draw(g, pos=nx.spring_layout(g))
m = np.kron(lin.circulant([0,1,0,1]), np.ones((2,2)), )
print(m)
g = nx.from_numpy_matrix(m)
g = nx.convert_node_labels_to_integers(g)
nx.draw(g, pos=nx.spring_layout(g))
def construct_cores(m, k=2): # assume all connections are of the same dimension
d = np.sum(m ,axis=0)
return [rnd.random([2]*int(i)) for i in d]
cores = construct_cores(m)
print([c.shape for c in cores])
G = nx.MultiGraph()
G.add_node(1)
G.add_nodes_from([2,3])
G.add_edge(2,3) #G.add_edge(1,2)
G.add_edge(2,3)
nx.draw(G, pos=nx.spring_layout(G))
```
| github_jupyter |
```
cc.VerificationHandler.close_browser()
VerificationHandler.close_browser()
import numpy as np
import pandas as pd
from bs4.element import NavigableString
% run contactsScraper.py
% run contactChecker.py
ContactSheetOutput.set_output(contactKeys)
VerificationHandler.set_orgRecords(dm.OrgSession(orgRecords))
VerificationHandler.set_contactRecords(cr)
print('Local Contact Checker Ready')
orgsForToday = ['National Association for Multi-Ethnicity In Communications (NAMIC)',
'Association for Women in Science',
'Brain Injury Association of America',
'American Society of Home Inspectors',
'NAADAC, the Association for Addiction Professionals',
'American Public Transportation Association',
'Indiana Soybean Alliance',
'Associated Builders and Contractors (ABC)',
'National Association of Social Workers',
'American Marketing Association (AMA)']
org = orgsForToday[0]
vh = MotherSetVerifier(org)
org = orgsForToday[0]
vh = ContactScraperVerifier(org)
VerificationHandler.orgRecords.orgSessionStatusCheck()
ContactSheetOutput.currentRow
vh.write_contact_pointers()
class ContactScraperVerifier(MotherSetVerifier):
def __init__(self, org):
MotherSetVerifier.__init__(self, org)
self.pointers = self.verifiedPointers
# Get Grand Mother elements
self.gmElements, self.grandMotherMatrix = ContactScraperVerifier.getGrandMotherElements(self.pointers)
self.noGm = len(self.gmElements)
if self.noGm == 1: ## Single Grandmother Case
self.gm = self.gmElements[0]
self.distinct_gm = ContactScraperVerifier.distinct_gm(self.gm, self.pointers)
#if ContactScraperVerifier.distinct_gm(self.gm, self.pointers): ## Grandmother is distinct
### Extender Model Selection
# self.extenders = [Extender(self.gm, vp) for vp in self.pointers]
#
#else: ## GrandMother is not distinct
# ## Extender Model Selection
# self.extenders = [RocketOnlyExtender(self.gm, vp) for vp in self.pointers]
else:
## Extender Model Selection for Multiple GrandMothers
self.extenders = None
self.gm = None
@staticmethod
def getGrandMotherElements(pointers):
## Identify Grandmother elements
gmElements = []
gmMatrix = []
for i in range(len(pointers)):
igmElements = []
for j in range(i):
## Check to see if the Any Mother element is a Big Momma or "Bertha" Element
if pointers[i].get_mother_element() is pointers[j].get_mother_element():
gm = pointers[i].get_mother_element()
else:
gm = pointers[i].common_parent(pointers[j])
# Append Match to Grand Mother Matrix
igmElements.append(gm)
# Check to see if this is a new grand mother element,
# if so append to the gmElements list of unique grandmother elements
if gm not in gmElements:
gmElements.append(gm)
# Append Matrix Row
gmMatrix.append(igmElements)
grandMotherMatrix = np.matrix(gmMatrix)
return (gmElements, grandMotherMatrix)
@staticmethod
def distinct_gm(gm, pts):
if len(pts) == 0:
return True
if gm is pts[0].get_mother_element():
return False
else:
return ContactScraperVerifier.distinct_gm(gm, pts[1:])
class Extender(object):
def __init__(self, gm, pointer):
self.gm = gm
self.vp = pointer
self.tag_nathans()
self.tag_toms()
self.tom_missile = self.mother_to_tom_missile()
self.tom_rocket = self.nathan_to_tom_rocket()
self.nathan_shuttle = self.nathan_to_element()
self.nathan_missile = self.mother_to_nathan_missile()
self.nathan_rocket = self.tom_to_nathan_rocket()
self.tom_shuttle = self.tom_to_element()
#self.reset_tree()
## Tagging Functions ------------------------------------
##
def tag_nathans(self):
if self.vp.get_mother_element() is self.vp.nathan.parent: ## Gotta Catch those first elements before the get away
self.vp.nathan.parent['nathan'] = 0
else:
return Extender.parent_cycle_up(self.vp.get_mother_element(), self.vp.nathan.parent, 'nathan', 0)
def tag_toms(self):
if self.vp.get_mother_element() is self.vp.tom.parent: ## Gotta Catch those first elements before the get away
self.vp.tom.parent['tom'] = 0
else:
return Extender.parent_cycle_up(self.vp.get_mother_element(), self.vp.tom.parent, 'tom', 0)
def reset_tree(self):
## Clear all tom, nathan and sib atributes
for tomTag in self.gm.find_all(Extender.has_tom):
del tomTag['tom']
for nathanTag in self.gm.find_all(Extender.has_nathan):
del nathanTag['nathan']
for sibTag in self.gm.find_all(Extender.has_sib):
del sibTag['sib']
@staticmethod
def parent_cycle_up(motherElement, element, atr, num):
## We tag up THOUGH the MotherELement
if element is motherElement:
element[atr] = num
#element['sib'] = atr
else:
element[atr] = num
return Extender.parent_cycle_up(motherElement, element.parent, atr, num + 1)
@staticmethod
def contents_position(elm):
return Extender.contents_position_loop(elm, 0)
@staticmethod
def contents_position_loop(elm, num):
if len(list(elm.previous_siblings)) == 0:
return num
else:
return Extender.contents_position_loop(elm.previous_sibling, num + 1)
@staticmethod
def has_tom(tag):
try:
return 'tom' in tag.attrs
except AttributeError:
return False
@staticmethod
def has_nathan(tag):
try:
return 'nathan' in tag.attrs
except AttributeError:
return False
@staticmethod
def has_sib(tag):
try:
return 'sib' in tag.attrs
except AttributeError:
return False
@staticmethod
def check_siblings(sibs, attFunc):
for sib in sibs:
if attFunc(sib):
return True
return False
## Shuttle Functions ------------------------------------
##
def nathan_to_element(self):
return lambda start: start.contents[Extender.contents_position(self.vp.nathan)]
def tom_to_element(self):
return lambda start: start.contents[Extender.contents_position(self.vp.tom)]
## Missle Functions ------------------------------------
##
def mother_to_tom_missile(self):
motherElement = self.vp.get_mother_element()
if self.vp.tom is motherElement:
return lambda start: start
else:
return lambda start: Extender.cycle_up_mother(self.vp.tom.parent, motherElement, start).contents[Extender.contents_position(self.vp.tom)]
def mother_to_nathan_missile(self):
motherElement = self.vp.get_mother_element()
if self.vp.nathan is motherElement:
return lambda start: start
else:
return lambda start: Extender.cycle_up_mother(self.vp.nathan.parent, motherElement, start).contents[Extender.contents_position(self.vp.nathan)]
@staticmethod
def cycle_up_mother(elm, motherElement, start):
if elm is motherElement:
return start
else:
return Extender.cycle_up_mother(elm.parent, motherElement, start).contents[Extender.contents_position(elm)]
## Rocket Functions ------------------------------------
##
def nathan_to_tom_rocket(self):
## Compute the route from nathan startnode to tom by starting at tom and recursing up, accross and then down
## looing for nathan = 0 attribute
return lambda start: Extender.cycle_up(self.vp.tom, 'nathan', start)
def tom_to_nathan_rocket(self):
## Compute the route from tom startnode to nathan by starting at nathan and recursing up, accross and then down
## looing for tom = 0 attibute
return lambda start: Extender.cycle_up(self.vp.nathan, 'tom', start)
@staticmethod
def cycle_up(elm, to, start):
## Set Attribut Search Function
attrFunc = Extender.has_tom if to == 'tom' else Extender.has_nathan
## FIRST check yo'self
if attrFunc(elm): ## Switch Direction but don't move
return Extender.cycle_down(elm, to, start)
## SECOND look Left for shoulder in siblings
elif Extender.check_siblings(elm.previous_siblings, attrFunc):
return Extender.cycle_left(elm.previous_sibling, to, start).next_sibling
## Then look right
elif Extender.check_siblings(elm.next_siblings, attrFunc):
return Extender.cycle_right(elm.next_sibling, to, start).previous_sibling
## if nothing here go up
else:
return Extender.cycle_up(elm.parent, to, start).contents[Extender.contents_position(elm)]
@staticmethod
def cycle_left(elm, to, start):
attrFunc = Extender.has_tom if to == 'tom' else Extender.has_nathan
## if this is the shoulder change functions but dont move
if attrFunc(elm):
return Extender.cycle_down(elm, to, start)
## if not check the element to the left
else:
return Extender.cycle_left(elm.previous_sibling, to, start).next_sibling
@staticmethod
def cycle_right(elm, to, start):
attrFunc = Extender.has_tom if to == 'tom' else Extender.has_nathan
## if this is the shoulder change functions but dont move
if attrFunc(elm):
return Extender.cycle_down(elm, to, start)
## if not check the element to the left
else:
return Extender.cycle_right(elm.next_sibling, to, start).previous_sibling
@staticmethod
def cycle_down(elm, to, start):
return Extender.cycle_down_loop(elm[to], start)
@staticmethod
def cycle_down_loop(dist, start):
## Start Node Condition
if dist == 0:
return start
else:
return Extender.cycle_down_loop(dist - 1, start).parent
class Extractor(Extender):
nathanStartType = '__nathanStarts__'
tomStartType = '__tomStarts__'
motherStartType = '__motherStarts__'
def __init__(self, gm, pointer):
Extender.__init__(self, gm, pointer)
self.startBlock = StartBlockNoMother(gm, pointer)
self.startsType, self.starts = self.get_starts()
self.resultSet = NewPointerSet()
self.test_starts()
self.reset_tree()
def get_starts(self):
return self.startBlock.get_optimal_starts()
def test_starts(self):
return [self.test_start(self.startsType, st) for st in self.starts]
def test_start(self, startType, start):
if startType == Extractor.nathanStartType: ## Nathan Start Case, Nathan Shuttle, Tom Rocket to
np = NewPointer(start, self.nathan_shuttle, self.tom_rocket)
self.resultSet.addPointer(np)
return np
if startType == Extractor.tomStartType: ## Tom Start Case, Tom Shuttle, Nathan Rocket
np = NewPointer(start, self.nathan_rocket, self.tom_shuttle)
self.resultSet.addPointer(np)
return np
if startType == Extractor.motherStartType: ## Mother Start Case, Missile for Missle for Nathan, Missile for Tom
np = NewPointer(start, self.nathan_missile, self.tom_missile)
self.resultSet.addPointer(np)
return np
def get_result_set(self):
return self.resultSet
def quick_report(self):
print(str(self.resultSet))
class ExtractorNoMother(Extractor):
nathanStartType = '__nathanStarts__'
tomStartType = '__tomStarts__'
motherStartType = '__motherStarts__'
def __init__(self, gm, pointer):
Extractor.__init__(self, gm, pointer)
self.startBlock = StartBlockNoMother(gm, pointer)
self.startsType, self.starts = self.get_starts()
self.reset_tree()
def get_starts(self):
return self.startBlock.get_optimal_starts()
class NewPointerSet(object):
def __init__(self):
self.newPointers = []
def addPointer(self, newPointer):
self.newPointers.append(newPointer)
def __str__(self):
s = ''
for pt in self.newPointers:
s += ('%s as %s\n' % (str(pt.get_nathan()), str(pt.get_tom())))
return s
def get_pointers(self):
return self.newPointers
class MergeSet(NewPointerSet):
nathanWordLimit = 12
tomWordLimit = 20
def __init__(self, vps):
NewPointerSet.__init__(self)
self.verifiedPointers = vps
def merge_pointers(self, newPointerSet):
for np in newPointerSet.get_pointers():
self.add_pointer(np)
def add_pointer(self, newPointer):
## Pointer has to meet certain conditions to be added to the MergeSet
if MergeSet.has_a_failed_route(newPointer):
return False
if MergeSet.is_nothing(newPointer.tom) or MergeSet.is_nothing(newPointer.nathan):
return False
if MergeSet.is_an_email(newPointer.tom) or MergeSet.is_an_email(newPointer.nathan):
return False
if MergeSet.is_too_long(newPointer):
return False
if MergeSet.fails_name_test(newPointer):
return False
if self.a_match_to_verified_pointers(newPointer):
return False
if self.a_match_in_set(newPointer):
return False
## Add to MergeSetCase
self.newPointers.append(newPointer)
@staticmethod
def fails_name_test(pointer):
firstName, lastName = pointer.get_first_last_name()
return not (firstName, lastName)
@staticmethod
def is_too_long(pointer):
if pointer.no_nathan_words() > MergeSet.nathanWordLimit:
return True
if pointer.no_tom_words() > MergeSet.tomWordLimit:
return True
return False
@staticmethod
def is_an_email(pointerWord):
if '@' in pointerWord:
return True
return False
@staticmethod
def is_nothing(pointerWord):
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
for char in pointerWord:
if char in letters:
return False
return True
@staticmethod
def has_a_failed_route(pointer):
if not pointer.tom:
return True
if not pointer.nathan:
return True
return False
def a_match_in_set(self, pointer):
for np in self.newPointers:
if pointer.nathan is np.nathan:
return True
if pointer.tom is np.tom:
return True
return False
def a_match_to_verified_pointers(self, pointer):
for vp in self.verifiedPointers:
if pointer.nathan is vp.nathan:
return True
if pointer.tom is vp.tom:
return True
return False
class NewPointer(object):
def __init__(self, start, nathanTest, tomTest):
self.nathanRoute = nathanTest
self.tomRoute = tomTest
self.start = start
## Execute Test
self.nathan = self.test_nathan()
self.tom = self.test_tom()
self.output = ContactSheetOutput('New Pointer For: %s start' % str(start))
def test_nathan(self):
try:
result = self.nathanRoute(self.start)
if type(result) is not NavigableString:
result = None
except:
result = None
return result
def test_tom(self):
try:
result = self.tomRoute(self.start)
if type(result) is not NavigableString:
result = None
except:
result = None
return result
def get_tom(self):
return self.tom
def get_first_last_name(self):
passes = ['Mr', 'MR', 'Miss', 'Mrs', 'MRS', 'MS', 'Ms', 'Dr']
nameWords = self.nathan.split(' ')
spaceFiltered = [word for word in nameWords if word]
## loop through name words until you encounter the first two full words that are not passes
firstNamePt = None
lastNamePt = None
for i in range(len(spaceFiltered)):
if (spaceFiltered[i] not in passes) and ('.' not in spaceFiltered[i]):
firstNamePt = i
break
for j in range(i+1,(len(spaceFiltered))):
if (spaceFiltered[j] not in passes) and ('.' not in spaceFiltered[j]):
lastNamePt = j
break
try:
firstName = ' '.join(spaceFiltered[:firstNamePt + 1])
lastName = ' '.join(spaceFiltered[lastNamePt:])
except TypeError:
return None, None
if lastName: ## Last Name was Defined, return Tuple
return firstName, lastName
else: ## Last Name was not Properly Defined result use first Name only
return firstName, None
def get_clean_tom(self):
## Returns a cleaned up version of the title
tomWords = self.tom.split(' ')
#filter out spaces
return ' '.join([word for word in tomWords if word])
def no_tom_words(self):
return NewPointer.word_count(self.tom)
def no_nathan_words(self):
return NewPointer.word_count(self.nathan)
@staticmethod
def word_count(field):
## Returns a cleaned up version of the title
fieldWords = field.split(' ')
#filter out spaces
return len([word for word in fieldWords if word])
def get_nathan(self):
return self.nathan
class StartBlock(object):
def __init__(self, gm, pointer):
self.vp = pointer
self.gm = gm
self.nathanNameStarts = gm.find_all(pointer.nathan.parent.name)
self.noNathanNameStarts = len(self.nathanNameStarts)
self.tomNameStarts = gm.find_all(pointer.tom.parent.name)
self.noTomNameStarts = len(self.tomNameStarts)
self.motherNameStarts = gm.find_all(pointer.get_mother_element().name)
self.noMotherNameStarts = len(self.motherNameStarts)
self.noNathanClasses = self.get_no_nathan_classes()
self.noTomClasses = self.get_no_tom_classes()
self.noMotherClasses = self.get_no_mother_classes()
def get_no_nathan_classes(self):
if 'class' in self.vp.nathan.parent.attrs:
return len(self.vp.nathan.parent['class'])
else:
return 0
def get_no_tom_classes(self):
if 'class' in self.vp.tom.parent.attrs:
return len(self.vp.tom.parent['class'])
else:
return 0
def get_no_mother_classes(self):
if 'class' in self.vp.get_mother_element().attrs:
return len(self.vp.get_mother_element()['class'])
else:
return 0
def get_nathan_class_starts(self):
return self.gm.find_all(class_=self.vp.nathan.parent['class'])
def get_tom_class_starts(self):
return self.gm.find_all(class_=self.vp.tom.parent['class'])
def get_mother_class_starts(self):
return self.gm.find_all(class_=self.vp.get_mother_element()['class'])
def get_optimal_starts(self):
maxNoClasses = max([self.noNathanClasses, self.noTomClasses, self.noMotherClasses])
minNoNameStarts = min([self.noNathanNameStarts, self.noTomNameStarts, self.noMotherNameStarts])
#dispatch
if maxNoClasses > 0: ## There are classes to choose better starts
if self.noMotherClasses == maxNoClasses:
return '__motherStarts__', self.get_mother_class_starts()
elif self.noNathanClasses == maxNoClasses:
return '__nathanStarts__', self.get_nathan_class_starts()
else:
return '__tomStarts__', self.get_tom_class_starts()
else: ## There are no classes to test mus go by the minimun number of name matches
if self.noMotherNameStarts == minNoNameStarts:
return '__motherStarts__', self.motherNameStarts
elif self.noNathanNameStarts == minNoNameStarts:
return '__nathanStarts__', self.nathanNameStarts
else:
return '__tomStarts__', self.tomNameStarts
class StartBlockNoMother(StartBlock):
def __init__(self, gm, pointer):
StartBlock.__init__(self, gm, pointer)
def get_optimal_starts(self):
## Unlike get_optimal_starts from the inheritted StartBlock this one does not return mother starts
maxNoClasses = max([self.noNathanClasses, self.noTomClasses])
minNoNameStarts = min([self.noNathanNameStarts, self.noTomNameStarts])
#dispatch
if maxNoClasses > 0: ## There are classes to choose better starts
if self.noNathanClasses == maxNoClasses:
return '__nathanStarts__', self.get_nathan_class_starts()
else:
return '__tomStarts__', self.get_tom_class_starts()
else: ## There are no classes to test mus go by the minimun number of name matches
if self.noNathanNameStarts == minNoNameStarts:
return '__nathanStarts__', self.nathanNameStarts
else:
return '__tomStarts__', self.tomNameStarts
class Dorito(object):
def __init__(self, gm, pointers):
self.grandMotherElement = gm
self.verifiedPointers = pointers
self.scrapePointers = Dorito.filterPointers(pointers)
self.result = None
self.finalPointers = MergeSet(pointers)
def extract(self):
## This class will test the first One
try:
self.x = Extractor(self.grandMotherElement, self.scrapePointers[0])
self.result = self.x.get_result_set()
except IndexError:
print("No Scrapable Matches")
def merge_result(self):
if self.result:
self.finalPointers.merge_pointers(self.result)
if not str(self.finalPointers):
print("Nothing passed Merge")
else:
print("Nothing to Merge")
self.report()
def report(self):
print('Verified Pointers \t\t\t\t\t\t%s' % len(self.verifiedPointers))
print('Scrape Pointers \t\t\t\t\t\t%s' % len(self.scrapePointers))
print('Start Pointers \t\t\t\t\t\t%s' % len(self.result.get_pointers()) if self.result else 0)
print('Start Type \t\t\t\t%s' % self.x.startsType)
print('Merged (Filtered) Pointers\t\t\t\t\t\t%s' % len(self.finalPointers.get_pointers()))
print(self.finalPointers)
@staticmethod
def filterPointers(vps):
return [pointer for pointer in vps if pointer.mary_here() and pointer.nathan_here]
org = orgsForToday[0]
vh = ContactScraperVerifier(org)
d = Dorito(vh.gm, vh.pointers)
d.extract()
d.merge_result()
ds = Dorito(vh.pointers[0].get_mother_element().parent, vh.pointers)
ds.extract()
ds.merge_result()
d.finalPointers.get_pointers()[1].no_nathan_words()
"@" in d.finalPointers.get_pointers()[3].nathan
len(d.finalPointers.get_pointers())
print(d.result)
st = d.x.starts
len(st)
st
scrapePointers = Dorito.filterPointers(vh.pointers)
x = ExtractorNoMother(vh.gm, scrapePointers[0])
## No Grand Mother Single Pointer Version
y = ExtractorNoMother(vh.pointers[0].get_mother_element().parent, vh.pointers[0])
y.quick_report()
print(x.resultSet)
vh.pointers[0].nathan_here()
len(x.newPointers)
vh.write_contact_pointers()
len(vh.pointers)
vh.pointers[1].get_mother_element()
vh.noGm
vh.pointers[0].tom
vh.pointers[0].nathan
vh.pointers[0].get_mother_element() is vh.gm
vh.distinct_gm
sb = StartBlockNoMother(vh.gm, vh.pointers[0])
sb.noNameStarts
sb.get_optimal_starts()
a2 = sb.get_optimal_starts()[1]
vh.pointers[1].tom
a2
sb.get_tom_class_starts()
a = sb.get_nathan_class_starts()
testEx = Extender(vh.gm, vh.pointers[0])
n = 4
testEx.tom_shuttle(a2[n])
testEx.nathan_rocket(a2[n])
list(vh.pointers[0].tom.next_siblings)
x.starts
x.startsType
testEx.reset_tree()
len(a2)
x.get_new_pointers()
for pt in x.get_new_pointers():
print('%s as %s' % (str(pt.get_nathan()), str(pt.get_tom())))
ts = StartBlock(vh.gm, vh.pointers[0])
ts.get_optimal_starts()
len(ts.get_optimal_starts()[1])
ns = StartBlockNoMother(vh.gm, vh.pointers[0])
ns.get_optimal_starts()
len(ns.get_optimal_starts()[1])
vh.extenders[0].reset_tree()
s = ns.get_optimal_starts()[1][0]
str(s)
type(vh.pointers[0].tom) is not NavigableString
from bs4.element import NavigableString
min([1,2,3])
vp = vh.pointers[0]
vp.tom
vp.nathan
vp.nathan.parent.name
vp.nathan.parent['class']
vp.nathan.parent.attrs
vh.extenders[2].reset_tree()
len(vh.gm.find_all(vp.get_mother_element().name))
gm.find_all(class_=vp.nathan.parent['class'][0])
gm.find_all
## Identify Grandmother elements
pointers = vh.verifiedPointers
gmElements = []
gmMatrix = []
for i in range(len(pointers)):
igmElements = []
for j in range(i):
## Check to see if the Any Mother element is a Big Momma or "Bertha" Element
if pointers[i].get_mother_element() is pointers[j].get_mother_element():
gm = pointers[i].get_mother_element()
else:
gm = pointers[i].common_parent(pointers[j])
# Append Match to Grand Mother Matrix
igmElements.append(gm)
# Check to see if this is a new grand mother element,
# if so append to the gmElements list of unique grandmother elements
if gm not in gmElements:
gmElements.append(gm)
# Append Matrix Row
gmMatrix.append(igmElements)
grandMotherMatrix = np.matrix(gmMatrix)
noGm = len(gmElements)
noGm
## if 1 grandmother element test set it and test it
gm = gmElements[0]
def distinct_gm(gm, pts):
if len(pts) == 0:
return True
if gm is pts[0].get_mother_element():
return False
else:
return distinct_gm(gm, pts[1:])
## check to see if Grandmother is Distinct
distinct = distinct_gm(gm, pointers)
distinct
pointers[1].get_mother_element()
## Tag tree with pattern from nathan to tom
def tag_nathans(pt):
return parent_cycle_up(pt.get_mother_element(), pt.nathan.parent, 'nathan', 0)
def tag_toms(pt):
return parent_cycle_up(pt.get_mother_element(), pt.tom.parent, 'tom', 0)
def parent_cycle_up(motherElement, element, atr, num):
## for the bertha case we stop at the element that is right befor the mother elemenet
if element.parent is motherElement:
element[atr] = num
element['sib'] = atr
else:
element[atr] = num
return(motherElement, element.parent, atr, num + 1)
def reset_tree(pt):
## Clear all tom, nathan and sib atributes
gm = pt.get_mother_element()
for tomTag in gm.find_all(has_tom):
del tomTag['tom']
for nathanTag in gm.find_all(has_nathan):
del nathanTag['nathan']
## For Each pointer
# Tag Toms and Nathans
vp = pointers[0]
tag_nathans(vp)
tag_toms(vp)
vp.tom.parent
## Rocket Extraction: Are you ready to launch?
vp.nathan.parent['class']
vp.tom.parent['class']
vp.get_mother_element()['class']
## Tom Starts
tomStarts = vh.gm.find_all(class_=vp.tom.parent['class'])
## Nathan Starts
nathanStarts = vh.gm.find_all(class_=vp.nathan.parent['class'])
## Mother Starts
motherStarts = vh.gm.find_all(class_=vp.get_mother_element()['class'])
vp.tom.parent.previous_sibling
vp.tom.parent.previous_sibling
vp.nathan.parent.next_sibling
# Getting to a nathan with a tom start
n = 5
tomStarts[n].previous_sibling.string
# Getting to a tom with a nathan Start
n = 0
nathanStarts[n].next_sibling.string
tom_rocket = lambda startNode: startNode.next_sibling.string
n = 7
tom_rocket(nathanStarts[n])
nathan_rocket = lambda startNode: startNode.previous_sibling.string
n = 7
nathan_rocket(tomStarts[n])
nathan_missile = lambda startNode: startNode.contents[0].string
n = 7
nathan_missile(motherStarts[n])
tom_missile = lambda startNode: startNode.contents[0].next_sibling.string
n = 7
tom_missile(motherStarts[n])
## begin recursions
## We are tagged and ready
def nathan_to_tom_rocket(pt):
if
def tom_to_nathan_rocket(pt):
def mother_to_tom_missile(pt):
def mother_to_nathan_missile(pt):
vp.get_mother_element()
#chain demo 1
def next_sib_gen(n):
return lambda n:
next_sib_gen = lambda elm, n: elm if n == 0 else next_sib_gen(elm.next_sibling, n - 1)
next_sib_gen(vp.get_mother_element().contents[0],2)
## USELESS, but good attempt.
next_sib = lambda elm: elm.next_sibling
prev_sib = lambda elm: elm.previous_sibling
parent_up = lambda elm: elm.parent
child_down = lambda elm: elm.contents[0]
string_out = lambda elm: elm.string
string_out(prev_sib(tomStarts[0]))
string_out(prev_sib)
rocketString = ".previous_sibling.string"
## We have a winner! The rocketString eval model
nathan_rocket_lambda = lambda elm: eval('elm' + rocketString)
## This one has promise, look at it!
def left_right (arg):
if arg == 'Left'
return lambda elm: left_right(right)
## This one has promise, look at it! Pure higher order functions no strings
two_right = lambda elm: tratorx(1, elm).string
def tratorx(n, elm):
if n == 0:
return elm
else:
return tratorx(n-1,elm).next_sibling
## Another Winner!
two_right(nathanStarts[0])
## Time to fly!!
```
## Missile Work
```
## Helpers
def contents_position(elm):
return contents_position_loop(elm, 0)
def contents_position_loop(elm, num):
if len(list(elm.previous_siblings)) == 0:
return num
else:
return contents_position_loop(elm.previous_sibling, num + 1)
def has_tom(tag):
return 'tom' in tag.attrs
def has_nathan(tag):
return 'nathan' in tag.attrs
def check_siblings(sibs, attFunc):
for sib in sibs:
if attFunc(sib):
return True
return False
def mother_to_tom_missile(pt):
motherElement = pt.get_mother_element()
if pt.tom is motherElement:
return lambda start: start
else:
return lambda start: cycle_up_mother(pt.tom.parent, motherElement, start).contents[contents_position(pt.tom)]
def mother_to_nathan_missile(pt):
motherElement = pt.get_mother_element()
if pt.nathan is motherElement:
return lambda start: start
else:
return lambda start: cycle_up_mother(pt.nathan.parent, motherElement, start).contents[contents_position(pt.nathan)]
def cycle_up_mother(elm, motherElement, start):
if elm is motherElement:
return start
else:
return cycle_up_mother(elm.parent, motherElement, start).contents[contents_position(elm)]
tom_missile = mother_to_tom_missile(vp)
tom_missile
n = 7
tom_missile(motherStarts[n])
nathan_missile = mother_to_nathan_missile(vp)
n = 6
nathan_missile(motherStarts[n])
```
## Rocket Time !!
```
def nathan_to_tom_rocket(pt):
## Compute the route from nathan startnode to tom by starting at tom and recursing up, accross and then down
## looing for nathan = 0 attribute
return lambda start: cycle_up(pt.tom, 'nathan', start)
def tom_to_nathan_rocket(pt):
## Compute the route from tom startnode to nathan by starting at nathan and recursing up, accross and then down
## looing for tom = 0 attibute
return lambda start: cycle_up(pt.nathan, 'tom', start)
def cycle_up(elm, to, start):
## Set Attribut Search Function
attrFunc = has_tom if to == 'tom' else has_nathan
## First look Left for shoulder in siblings
if check_siblings(elm.previous_siblings, attrFunc):
return cycle_left(elm.previous_sibling, to, start).next_sibling
## Then look right
elif check_siblings(elm.next_siblings, attrFunc):
return cycle_right(elm.next_sibling, to, start).previous_sibling
## if nothing here go up
else:
return cycle_up(elm.parent, to, start).contents[contents_position(elm)]
def cycle_left(elm, to, start):
attrFunc = has_tom if to == 'tom' else has_nathan
## if this is the shoulder change functions but dont move
if attrFunc(elm):
return cycle_down(elm, to, start)
## if not check the element to the left
else:
return cycle_left(elm.previous_sibling, to, start).next_sibling
def cycle_right(elm, to, start):
attrFunc = has_tom if to == 'tom' else has_nathan
## if this is the shoulder change functions but dont move
if attrFunc(elm):
return cycle_down(elm, to, start)
## if not check the element to the left
else:
return cycle_right(elm.next_sibling, to, start).previous_sibling
def cycle_down(elm, to, start):
return cycle_down_loop(elm[to], start)
def cycle_down_loop(dist, start):
## Start Node Condition
if dist == 0:
return start
else:
return cycle_down_loop(dist - 1, start).parent
## Rocket Tests
tom_rocket = nathan_to_tom_rocket(vp)
n = 7
tom_rocket(nathanStarts[n])
nathan_rocket = tom_to_nathan_rocket(vp)
## Success
n = 7
nathan_rocket(tomStarts[n])
```
## Vehicle Assembley
#### nathan_to_tom_rocket
#### tom_to_nathan_rocket
#### mother_to_nathan_missile
#### mother_to_tom_missile
#### Are all recursive constructors, and are used to get to targets by adding to the 'rocketString' variable recuscursively. Once the base case is met, a single lambda is returned which combines the element and the string of opporations in the rocket string.
### Here we go, finnish strong!
```
nathan_rocket_lambda(tomStarts[0])
motherStarts[0].contents[0].string
vp.nathan.parent
vp.tom.parent
vp.mary
```
| github_jupyter |
# Introducing CartPole
Cartpole is a classic control problem from OpenAI.
https://gym.openai.com/envs/CartPole-v0/
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center.
## Load libraries
```
import gym
import matplotlib.pyplot as plt
import numpy as np
import random
# Turn warnings off to keep notebook tidy
import warnings
warnings.filterwarnings("ignore")
# Set whether enviornment will be rendered
RENDER = True
```
## Random choice
Our first baseline is random action of pushing the cart left or right.
Note: The CartPole visualisation of this demo may not work on remote servers. If this does not work set `RENDER = False` in cell above to run rest of Notebook with visualisation (re-run the cell after changing the setting).
```
def random_choice(obs):
"""
Random choice.
`obs` is passed to function to make use consistent with other methods.
"""
return random.randint(0,1)
# Set up environemnt
env = gym.make("CartPole-v1")
totals = []
for episode in range(10):
episode_reward = 0
obs = env.reset()
for step in range(200):
if RENDER:
env.render()
action = random_choice(obs)
obs, reward, done, info = env.step(action)
episode_reward += reward
# Pole has fallen over if done is True
if done:
break
totals.append(episode_reward)
env.close()
print ("Average: {0:.1f}".format(np.mean(totals)))
print ("Stdev: {0:.1f}".format(np.std(totals)))
print ("Minumum: {0:.0f}".format(np.min(totals)))
print ("Maximum: {0:.0f}".format(np.max(totals)))
```
## A simple policy
Here we use a simple policy that accelerates left when the pole is leaning to the right, and accelerates right when the pole is leaning to the left.
```
def basic_policy(obs):
"""
A Simple policy that accelerates left when the pole is leaning to the right,
and accelerates right when the pole is leaning to the left
Cartpole observations:
X position (0 = centre)
velocity (+ve = right)
angle (0 = upright)
angular velocity (+ve = clockwise)
"""
angle = obs[2]
return 0 if angle < 0 else 1
# Set up environemnt
env = gym.make("CartPole-v1")
totals = []
for episode in range(10):
episode_reward = 0
obs = env.reset()
for step in range(200):
if RENDER:
env.render()
action = basic_policy(obs)
obs, reward, done, info = env.step(action)
episode_reward += reward
# Pole has fallen over if done is True
if done:
break
totals.append(episode_reward)
env.close()
print ("Average: {0:.1f}".format(np.mean(totals)))
print ("Stdev: {0:.1f}".format(np.std(totals)))
print ("Minumum: {0:.0f}".format(np.min(totals)))
print ("Maximum: {0:.0f}".format(np.max(totals)))
```
The next notebook will use a Deep Q Network (Double DQN) to see if we can improve on the simple policy.
| github_jupyter |
<center>
<img src="../../img/ods_stickers.jpg">
## Открытый курс по машинному обучению. Сессия № 2
### <center> Автор материала: Андрей Сухарев (@fremis)
## <center> Индивидуальный проект по анализу данных </center>
**План исследования**
- Описание набора данных и признаков
- Первичный анализ признаков
- Первичный визуальный анализ признаков
- Закономерности, "инсайты", особенности данных
- Предобработка данных
- Создание новых признаков и описание этого процесса
- Кросс-валидация, подбор параметров
- Построение кривых валидации и обучения
- Прогноз для тестовой или отложенной выборки
- Оценка модели с описанием выбранной метрики
- Выводы
Более детальное описание [тут](https://goo.gl/cJbw7V).
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
import librosa
import librosa.display
import glob
import random
np.random.seed(42)
```
### 1. Описание набора данных и признаков
Почти каждый день появляются новости, в которых разные виды нейронных сетей ставят очередную планку качества в решении той или иной задачи. В частности, в последние годы большой прирост качества мы можем наблюдать в решении задачи классификации изображений при помощи искусственных нейронных сетей, а именно свёрточных нейронных сетей (CNN).
Может сложиться впечатление, что нейронные сети могут решить абсолютно любую задачу (и ведь это отчасти так: нейронная сеть является универсальным аппроксиматором при достаточном количестве слоёв), и другие модели машинного обучения вроде логистической регрессии и градиентного бустинга на решающих деревьях больше не нужны.
Однако, если вы посмотрите, что общего между задачами, решаемыми при помощи нейронных сетей, то заметите, что входными данными для них выступают:
* Изображения (классификация, стилизация и т.п.)
* Звуковые сигналы (распознавание речи и т.п.)
* Текст (обработка естесственного языка)
Все эти типы данных объединяет одно: <i>локальные свойства</i>. Если в типичной задаче, решаемой xgboost'ом, вам на вход подаётся таблица признаков, порядок которых абсолютно неважен (какая разница, выставите вы сначала "пол", потом "возраст" или наоборот), то в изображениях группы соседних пикселей описывают какой-нибудь признак (например, нос котика). То же самое касается звуковых сигналов: последовательность частот в соседних звуковых интервалах может иметь свой смысл, который можно использовать для решения задачи.
В данном проекте будет рассмотрена задача классификации звуковых сигналов при помощи свёрточной нейронной сети.
Данная задача имеет множество применений, как то:
* Идентификация говорящего
* Определение интонации и настроения говорящего (воскрицательная, вопросительная интонации, гнев, радость и т.д.)
* Определение животного перед микрофоном в лесу
* и др.
Но больше интересно не её самостоятельное применение, а в совокупности с другими.
Решаться задача будет на общедоступном наборе данных <a href="https://github.com/karoldvl/ESC-50">ESC-50</a>, созданном польским исследователем Karol J. Piczak на основе звуков с Freesound.
Соответственно, на входе мы имеем звуковой сигнал с заданной частотой дискретизации. Необходимо определить, к какому классу он относится.
Всю необходимую информацию о файле можно найти в его названии и пути:
```
category_id - category_name/fold_number-Freesound_clip_ID-take_letter.ogg
```
### 2. Первичный анализ данных
В данном наборе находится 2000 звуковых файлов, размеченных по 50 классам: лай собаки, плач ребёнка, стук в дверь и т.д. Полное описание всех классов можно найти на странице с набором данных. Набор уже разделён на 5 фолдов. Набор является сбалансированным, т.е. на каждый класс в каждом фолде приходится одинаковое число записей.
Частота дискретизации файлов: 44 кГц, содержатся они в контейнере ogg.
Посмотрим на примеры данных.
```
all_recordings = glob.glob('ESC-50/*/*.ogg')
def plot_wave(audio_name):
x, sr = librosa.load(audio_name)
plt.figure(figsize=(12, 4))
plt.title(audio_name)
librosa.display.waveplot(x, sr=sr);
plot_wave(all_recordings[42])
plot_wave(all_recordings[300])
plot_wave(all_recordings[1488])
```
Можно увидеть, что звуковые сигналы не всегда имеют настоящую продолжительность в 5 секунд. При разрезании на кусочки данный момент будет учитываться, что описано в разделе с предобработкой.
Также можно увидеть, что амплитуда колебаний по модулю не превосходит 1, значит дополнительная нормализация не требуется.
### 3 и 4. Первичный визуальный анализ данных. Инсайты, найденные зависимости
Визуализируем логарифмированные мел-спектрограмммы звуковых сигналов, которые будут служить основой для нашей модели
```
bands = 60
FFT_WINDOW = 1024
HOP = 512
def plot_spec(file_name):
X, sr = librosa.load(file_name)
S = librosa.feature.melspectrogram(X,
n_fft=FFT_WINDOW, hop_length=HOP,
sr=22050, n_mels=bands)
logS = librosa.power_to_db(S, ref=np.max)
plt.figure(figsize=(15, 7))
plt.title(file_name)
librosa.display.specshow(logS,
y_axis='mel',
x_axis='time')
plt.colorbar(format='%+2.0f dB')
plt.show()
```
Посмотрим на примеры лая собак.
```
dog1_name = os.path.join('ESC-50', '101 - Dog', '1-30226-A.ogg')
dog2_name = os.path.join('ESC-50', '101 - Dog', '1-30344-A.ogg')
dog3_name = os.path.join('ESC-50', '101 - Dog', '1-32318-A.ogg')
plot_spec(dog1_name)
plot_spec(dog2_name)
plot_spec(dog3_name)
```
Можно увидеть, что лай собак имеет чёткие всплески короткой продолжительности практически во всём спектре. Раз это можно заметить глазом, значит и сетка это вполне способна увидеть.
```
laugh1_name = os.path.join('ESC-50', '307 - Laughing', '1-1791-A.ogg')
laugh2_name = os.path.join('ESC-50', '307 - Laughing', '1-30039-A.ogg')
laugh3_name = os.path.join('ESC-50', '307 - Laughing', '1-30043-A.ogg')
plot_spec(laugh1_name)
plot_spec(laugh2_name)
plot_spec(laugh3_name)
```
Как можно увидеть, даже визуальный анализ позволяет нам определить, что за класс у данной записи. У данных видна чёткая локальная структура, а следовательно, почему бы не попытаться использовать методы, которые применяются для анализа изображений?
Более подробный визуальный анализ набора данных ESC-50 вы можете найти в <a href="http://nbviewer.jupyter.org/github/karoldvl/paper-2015-esc-dataset/blob/master/Notebook/ESC-Dataset-for-Environmental-Sound-Classification.ipynb">тетрадке автора</a>. Мог бы продублировать его работу, но не буду.
### 5. Выбор метрики
Набор данных ESC-50 идеально сбалансирован, т.е. для каждого класса существует одинаковое количество файлов длиной до 5 секунд, и они поровну разбиты на 5 фолдов. Однако, после того, как эти файлы будут разбиты на фрагменты, количество станет неравным из-за того, что в каких-то классах одержится больше тишины, а в каких-то меньше, но всё равно будет примерно одинаковым.
Поэтому для оценки качества модели будут использоваться 2 метрики: accuracy (доля правильных ответов), т.к. она адекватно оценивает качество на сбалансированных выборках, а также F1-score, которая позволяет сгладить неравенство классов и взвешенно оценить работу на основе точности и полноты (а также weighted вариант позволяет придать разные веса классам в зависимости от их численности). Будут приведены метрики как для отдельных фрагментов, так и для файлов целиком.
AUC-ROC не получается применить на данном наборе ESC-50, т.к. из-за небольшого количества примеров (целых звуковых файлов) в одном фолде (8 штук) часто получается, что у пары классов и точность, и полнота составляет 0, а AUC-ROC в реализации scikit-learn не умеет работать с такими значениями. Возможно, это какой-то баг, потому что в этом случае выдаётся предупреждение, что количество классов меньше, чем количество лейблов, а в некоторых случаях всё проходит нормально, и при обработке других наборов данных тем же кодом всё работает нормально.
### 6. Выбор модели
Рассматриваемый набор данных находится на грани того, чтобы назвать его маленьким для глубоких сетей. Например, набора ESC-10 уже будет недостаточно, модель быстро переобучится на небольшом наборе данных (его можно искусственно увеличить путём сжатий/растяжений, увеличения/уменьшения тональности и других приёмов).
Поэтому для данной задачи соберём относительно неглубокую сеть, больше всего похожую на <a href="https://en.wikipedia.org/wiki/AlexNet">AlexNet</a>. Более глубокие сети не дают значимого улучшения производительности модели на данном наборе, зато сильно замедляют обучение и требуют более упорной борьбы с переобучением.
```
import os
from collections import Counter
import numpy as np
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.models import load_model
from keras.utils import to_categorical
from sklearn.metrics import f1_score, accuracy_score, classification_report
import shelve
```
Переопределим масштабирование признаков, т.к. стандартный пакет из scikit-learn не умеет в многомерные данные.
```
EPS = 1e-6
class StandardScaler:
def __init__(self):
# For multichannel
self.means = []
self.stds = []
# For single channel
self.mean = None
self.std = None
def fit(self, X):
if len(X.shape) == 4:
channels = X.shape[-1]
for i in range(channels):
self.means.append(X[:, :, :, i].mean())
self.stds.append(X[:, :, :, i].std())
else:
self.mean = X.mean()
self.std = X.std()
def transform(self, X, copy=True):
if copy:
res = np.copy(X)
else:
res = X
if len(X.shape) == 4:
channels = X.shape[-1]
for i in range(channels):
res[:, :, :, i] = (X[:, :, :, i]-self.means[i])/(self.stds[i]+EPS)
else:
res = (X-self.mean)/(self.std+EPS)
return res
def fit_transform(self, X, copy=True):
self.fit(X)
return self.transform(X, copy)
```
Базовая логика обучения модели
```
class BaseModel:
def __init__(self, name='model', verbose=1):
self.model = None
self.history = None
self.name = name
self.models_path = 'trained_models'
self.scaler = StandardScaler()
self.verbose = verbose
def get_keras_model(self, input_shape, classes_count):
raise NotImplementedError('model is not specified')
def get_model_save_dir(self, dataset):
path = os.sep.join((self.models_path, self.name,
dataset.name, dataset.extraction_type))
if not os.path.exists(path):
os.makedirs(path)
return path
def get_model_path(self, dataset, test_folds):
save_dir = self.get_model_save_dir(dataset)
folds_suff = '_'.join(test_folds)
model_path = save_dir+os.sep+folds_suff+'.h5'
return model_path
def get_meta_path(self, dataset, test_folds):
save_dir = self.get_model_save_dir(dataset)
folds_suff = '_'.join(test_folds)
meta_path = save_dir+os.sep+folds_suff+'.shelve'
return meta_path
def try_load(self, dataset, test_folds, input_shape=None, classes_count=None):
model_path = self.get_model_path(dataset, test_folds)
meta_path = self.get_meta_path(dataset, test_folds)
if os.path.exists(model_path):
if input_shape is None or classes_count is None:
self.model = load_model(model_path)
else:
# This is due to bug in Keras with loading functional models
self.model = self.get_keras_model(input_shape, classes_count)
self.model.load_weights(model_path)
with shelve.open(meta_path) as meta:
if self.verbose:
print('Loading meta info')
self.history = meta['history']
self.scaler = meta['scaler']
return True
return False
def _predict_by_features(self, X, voting='prob'):
if voting == 'prob':
probs = self.model.predict(X, verbose=0)
return np.sum(probs, axis=0).argmax()
elif voting == 'major':
classes = self.model.predict_classes(X, verbose=0)
c = Counter(classes)
return c.most_common(1)[0][0]
else:
raise NotImplementedError('unknown voting:', voting)
def _predict_proba(self, X):
probs = self.model.predict(X, verbose=0)
res = np.sum(probs, axis=0)
res /= np.sum(res)
return res
def _validate_on_fold(self, X_fold, y_fold,
dataset, fold_name, voting='prob'):
fold_idx = dataset.fold_feat_idx[fold_name]
y_true = []
y_pred = []
y_pred_proba = []
for idx_from, idx_to in fold_idx:
X = X_fold[idx_from:idx_to]
y = y_fold[idx_from]
y_pred_s = self._predict_by_features(X, voting)
y_pred_pr = self._predict_proba(X)
y_true.append(y)
y_pred.append(y_pred_s)
y_pred_proba.append(y_pred_pr)
y_true_oh = to_categorical(y_true, num_classes=dataset.metadata.classes_count)
results = TestResults(y_true, y_pred, y_true_oh, y_pred_proba,
classes=dataset.metadata.classes)
return results
def train_on(self, dataset, train_folds, test_folds, epochs=10,
batch_size=256, validate=True, min_lr=0.0002, ignore_trained=False):
X_train, X_test, y_train, y_test = dataset.train_test_split(train_folds, test_folds)
X_train = self.scaler.fit_transform(X_train, copy=False)
X_test = self.scaler.transform(X_test, copy=False)
y_train_oh = to_categorical(y_train, num_classes=dataset.metadata.classes_count)
y_test_oh = to_categorical(y_test, num_classes=dataset.metadata.classes_count)
if self.verbose:
print('Train set statistics:')
unique, counts = np.unique(y_train, return_counts=True)
for i in range(len(unique)):
print('{0} - {1} segments'.format(unique[i], counts[i]))
print('Test set statistics:')
unique, counts = np.unique(y_test, return_counts=True)
for i in range(len(unique)):
print('{0} - {1} segments'.format(unique[i], counts[i]))
input_shape = X_train.shape[1:]
classes_count = dataset.metadata.classes_count
if not ignore_trained and self.try_load(dataset, test_folds, input_shape, classes_count):
if self.verbose:
print('Found already trained model for this dataset and folds')
else:
self.model = self.get_keras_model(input_shape, classes_count)
model_path = self.get_model_path(dataset, test_folds)
best_saver = ModelCheckpoint(model_path,
monitor='val_acc',
verbose=1, save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75,
patience=7, min_lr=min_lr, verbose=1)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001,
patience=25, verbose=1)
callbacks = [best_saver, reduce_lr, early_stop]
history = self.model.fit(X_train, y_train_oh,
validation_data=(X_test, y_test_oh),
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks)
self.history = history.history
meta_path = self.get_meta_path(dataset, test_folds)
# Saving all meta information about this training
with shelve.open(meta_path) as meta:
meta['history'] = self.history
meta['scaler'] = self.scaler
if validate:
if self.verbose:
print('Reloading best model to validate')
self.try_load(dataset, test_folds, input_shape, classes_count)
results = self._validate_on_fold(X_test, y_test, dataset,
fold_name=test_folds[0], voting='prob')
frag_acc = self.model.evaluate(X_test, y_test_oh, batch_size=128, verbose=0)[1]
results.frag_acc = frag_acc
return results
def predict_file(self, file_name, dataset, return_probs=False):
X, sample_rate, _ = dataset.feature_extractor.extract_features(file_name)
X = self.scaler.transform(X)
if return_probs:
return self._predict_proba(X)
else:
return self._predict_by_features(X)
```
Определение самой AlexNet-подобной сети. В качестве регуляризации используется <a href="https://en.wikipedia.org/wiki/Dropout_(neural_networks)">Dropout</a>. Обучение производится при помощи модификации стохастического градиентного спуска — <a href="https://arxiv.org/abs/1412.6980">алгоритма Адама</a>. Вместо полносвязных слоёв в конце используется <a href="https://arxiv.org/pdf/1312.4400.pdf">Global average pooling</a>. Модель реализована при помощи библиотеки Keras, в качестве бэкенда к которой используется Tensorflow.
```
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Activation, Input
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import SGD, Adam, Adadelta
def add_conv_relu_pool_block(model, filters, kernel_size, pool_size=(2, 2), padding='same',
batch_norm=True, dropout=None, input_shape=None):
if input_shape is not None:
model.add(Conv2D(filters, kernel_size=kernel_size,
padding=padding, kernel_regularizer=l2(0.001),
input_shape=input_shape))
else:
model.add(Conv2D(filters, kernel_size=kernel_size,
padding=padding, kernel_regularizer=l2(0.001)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size, padding=padding))
if batch_norm:
model.add(BatchNormalization())
if dropout:
model.add(Dropout(dropout))
class AlexLikeModel(BaseModel):
def __init__(self, lr=0.001, suff=''):
super().__init__(name='alex_like'+suff)
self.lr = lr
self.suff = suff
def get_keras_model(self, input_shape, classes_count):
model = Sequential()
add_conv_relu_pool_block(model, 32, (7, 7), input_shape=input_shape, dropout=0.3)
add_conv_relu_pool_block(model, 64, (5, 5), dropout=0.3)
add_conv_relu_pool_block(model, 128, (3, 3), dropout=0.3)
add_conv_relu_pool_block(model, 256, (3, 3), dropout=0.3)
model.add(GlobalAveragePooling2D())
model.add(Dense(classes_count, activation='softmax'))
adam = Adam(lr=self.lr, decay=2e-5)
model.compile(loss='categorical_hinge',
optimizer=adam,
metrics=['accuracy'])
return model
```
### 7. Предобработка данных
Хотя исходные звуковые файлы имеют более-менее фиксированную продолжительность в 5 секунд, в произвольном наборе данных это может быть не так.
Поэтому с целью приведения признаков в единой размерности, а также для увеличения набора данных, разобьём каждый исходный звуковой сигнал на несколько маленьких. При этом соседние фрагменты будут пересекаться между собой. В файлах будут встречаться фрагменты, которые содержат преимущественно тишину (это можно считать за пропуски), поэтому такие фрагменты не добавляются в выборки. Если их не удалять, то во время обучения сеть натыкается на них и сходит с ума: сеть начинает предсказывать что угодно как один единственный класс.
<b>Важно</b>: пересечённые фрагменты не попадают в разные выборки, т.к. иначе будет происходить дикое переобучение (сталкивался с этим в разработке этого решения).
Предсказание класса производится для каждого отдельного фрагмента, после чего вероятности принадлежности классам каждого фрагмента из файла складываются и нормируются, что и является итоговым предсказанием.
```
import numpy as np
import librosa
class SoundFeatureExtractor:
def __init__(self, metadata, verbose=1):
self.metadata = metadata
self.extend = False
self.stretch_rates = None
self.sample_rate = 22050
self.segment_overlap = 2
self.verbose = verbose
def sound_segments(self, sound_clip, window_size,
stretch=True):
start = 0
while start < len(sound_clip):
segment = sound_clip[start:start+window_size]
if len(segment) == window_size:
yield segment
elif self.extend and len(segment) > window_size//2:
ext_segment = librosa.util.fix_length(segment, window_size)
yield ext_segment
start += window_size//self.segment_overlap
def aug_sound_segments(self, sound_clip, window_size,
stretch=True):
clips = []
if stretch and self.stretch_rates is not None:
for rate in self.stretch_rates:
clips.append(librosa.effects.time_stretch(sound_clip, rate))
for clip in clips:
yield from self.sound_segments(clip, window_size, stretch)
def read_from_file(self, file_path):
if self.verbose:
print('Reading file:', file_path)
X, sample_rate = librosa.load(file_path, sr=self.sample_rate)
if self.verbose:
print('Extracted sound info of duration:', librosa.get_duration(X, sr=sample_rate))
return X, sample_rate
def extract_wave(self, file_name):
X, sample_rate = self.read_from_file(file_name)
return X, sample_rate
```
Поскольку исходный звуковой сигнал представляет собой зависимость амплитуды от времени, а этой информации недостаточно, то воспользуемся стандартной техникой получения частотно-временных характеристик — преобразованием Фурье, а конкретнее оконным преобразованием Фурье. Затем полученный результат приведём к мел-шкале, а потом логарифмируем.
Таким образом, мы получим стандартное представление звукового сигнала для обработки моделями машинного обучения — логарифмированную мелспектрограмму. Ещё одним похожим представлением является MFCC (мел-частотные кепстральные коэффициенты), которое показывает примерно то же самое качество в нейросетевых моделях, и поэтому оно не рассматривается в данном проекте.
Также записи приводятся к более низкой частоте дискретизации 16 кГц, что позволяет убрать избыточные данные и лучше бороться с переобучением.
Итоговые признаки: логарифмированная мел-спектрограмма с дельта-коэффициентами, частота дискретизации 16 кГц, длина окна Фурье: 30 мс с пересечением пополам.
```
BANDS = 60
FRAMES = 41
FFT_WINDOW = 2048
HOP_LENGTH = 512
class LogMelSpecExtractor(SoundFeatureExtractor):
def __init__(self, metadata, bands=BANDS, frames=FRAMES,
fft_window=FFT_WINDOW, hop_length=HOP_LENGTH,
sample_rate=22050, extend=False, stretch_rates=None,
verbose=1):
super().__init__(metadata, verbose)
self.bands = bands
self.frames = frames
self.fft_window = fft_window
self.hop_length = hop_length
self.window_size = fft_window*(frames-1)//self.segment_overlap
self.extend = extend
self.sample_rate = sample_rate
self.stretch_rates = stretch_rates
def _should_skip_silent_segment(self, logspec):
std = np.std(logspec)
if std < 0.2:
if self.verbose:
print('Skipping segment as it is almost silence. Mean: %.6f, Std: %.6f'%(logspec.mean(), std))
return True
return False
def _extract_feature(self, sound_segment):
melspec = librosa.feature.melspectrogram(sound_segment, n_fft=self.fft_window,
hop_length=self.hop_length,
sr=self.sample_rate, n_mels=self.bands)
logspec = librosa.logamplitude(melspec, ref=np.max)
if self._should_skip_silent_segment(logspec):
return None
return logspec.reshape(self.bands, self.frames, 1)
def extract_features(self, file_name, stretch=True):
wave, sample_rate = self.extract_wave(file_name)
features = []
orig_features_count = 0
for sound_segment in self.sound_segments(wave, self.window_size,
stretch=stretch):
X = self._extract_feature(sound_segment)
if X is not None:
features.append(X)
orig_features_count += 1
aug_features_count = 0
for sound_segment in self.aug_sound_segments(wave, self.window_size,
stretch=stretch):
X = self._extract_feature(sound_segment)
if X is not None:
features.append(X)
aug_features_count += 1
if not features:
if self.verbose:
print('Extracted no sound segments from %s' % file_name)
return None
return np.stack(features), sample_rate, orig_features_count
```
Общая логика для обработки наборов данных, в которой также производится сериализация обработанных результатов:
```
import os
from collections import defaultdict
import pickle
import numpy as np
class Metadata:
def __init__(self):
self.classes = []
@property
def classes_count(self):
return len(self.classes)
def get_class(self, file_name):
pass
class Dataset:
def __init__(self, name, metadata, extraction_type, verbose=1):
self.X_dict = {}
self.y_dict = {}
self.name = name
self.metadata = metadata
self.fold_list = []
self.folds_count = 0
self.fold_feat_idx = defaultdict(list)
self.verbose = verbose
self.extraction_type = extraction_type
if extraction_type == 'logmelspec_30ms_16khz':
self.feature_extractor = LogMelSpecExtractor(self.metadata, fft_window=480, hop_length=240,
sample_rate=16000)
if extraction_type == 'logmelspec_delta_30ms_16khz':
self.feature_extractor = LogMelSpecDeltaExtractor(self.metadata, fft_window=480, hop_length=240,
sample_rate=16000)
else:
raise NotImplementedError('unknown extraction type: %s'%extraction_type)
def get_features_path(self):
return 'features'+os.sep+self.name+os.sep+self.extraction_type
def get_save_file_names(self, fold_name):
features_path = self.get_features_path()
X_file_name = features_path+os.sep+'_'.join((fold_name, 'X.npy'))
y_file_name = features_path+os.sep+'_'.join((fold_name, 'y.npy'))
return X_file_name, y_file_name
def load(self):
features_path = self.get_features_path()
fold_idx_path = features_path+os.sep+'fold_idx.pickle'
if not os.path.exists(fold_idx_path):
return False
with open(fold_idx_path, 'rb') as f:
self.fold_feat_idx = pickle.load(f)
if self.verbose:
print('Loaded fold index')
return True
def save(self, overwrite=False):
print('Saving dataset features')
features_path = self.get_features_path()
if not os.path.exists(features_path):
print('Creating features directory')
os.makedirs(features_path)
for fold_name in sorted(self.X_dict):
X_file_name, y_file_name = self.get_save_file_names(fold_name)
if not overwrite and os.path.exists(X_file_name) and os.path.exists(y_file_name):
print('Features for {0} already exist'.format(fold_name))
continue
X = self.X_dict[fold_name]
y = self.y_dict[fold_name]
print('Saving features for {0} to file'.format(fold_name))
np.save(X_file_name, X)
np.save(y_file_name, y)
fold_idx_path = features_path+os.sep+'fold_idx.pickle'
if overwrite or not os.path.exists(fold_idx_path):
with open(fold_idx_path, 'wb') as f:
pickle.dump(self.fold_feat_idx, f)
if self.verbose:
print('Saved fold index metadata')
def train_test_split(self, train_folds, test_folds):
if train_folds is None:
train_folds = self.fold_list[:-1]
if test_folds is None:
test_folds = [self.fold_list[-1]]
Xs = []
Ys = []
for fold in train_folds:
X_fold = self.X_dict[fold]
y_fold = self.y_dict[fold]
Xs.append(X_fold)
Ys.append(y_fold)
X_train = np.concatenate(Xs)
y_train = np.hstack(Ys)
Xts = []
Yts = []
for fold in test_folds:
X_fold = self.X_dict[fold]
y_fold = self.y_dict[fold]
Xts.append(X_fold)
Yts.append(y_fold)
X_test = np.concatenate(Xts)
y_test = np.hstack(Yts)
return X_train, X_test, y_train, y_test
def kfold(self, limit_folds=None):
if limit_folds is None:
limit_folds = self.folds_count
for i in range(min(limit_folds, self.folds_count)):
train_folds = self.fold_list[:i]+self.fold_list[i+1:]
test_folds = [self.fold_list[i]]
X_train, X_test, y_train, y_test = self.train_test_split(train_folds, test_folds)
yield X_train, X_test, y_train, y_test, train_folds, test_folds
def kfold_names(self, use_heldout=False):
if use_heldout:
max_fold = self.folds_count-1
else:
max_fold = self.folds_count
for i in range(max_fold):
train_folds = self.fold_list[:i]+self.fold_list[i+1:max_fold]
test_folds = [self.fold_list[i]]
yield train_folds, test_folds
SOUND_DIR = 'ESC-50'
class ESC50Metadata(Metadata):
def __init__(self):
super().__init__()
self.classes = ['101 - Dog', '102 - Rooster', '103 - Pig', '104 - Cow', '105 - Frog',
'106 - Cat', '107 - Hen', '108 - Insects (flying)', '109 - Sheep',
'110 - Crow', '201 - Rain', '202 - Sea waves', '203 - Crackling fire',
'204 - Crickets', '205 - Chirping birds', '206 - Water drops',
'207 - Wind', '208 - Pouring water', '209 - Toilet flush',
'210 - Thunderstorm', '301 - Crying baby', '302 - Sneezing', '303 - Clapping',
'304 - Breathing', '305 - Coughing', '306 - Footsteps', '307 - Laughing',
'308 - Brushing teeth', '309 - Snoring', '310 - Drinking - sipping',
'401 - Door knock', '402 - Mouse click', '403 - Keyboard typing',
'404 - Door - wood creaks', '405 - Can opening', '406 - Washing machine',
'407 - Vacuum cleaner', '408 - Clock alarm', '409 - Clock tick',
'410 - Glass breaking', '501 - Helicopter', '502 - Chainsaw', '503 - Siren',
'504 - Car horn', '505 - Engine', '506 - Train', '507 - Church bells',
'508 - Airplane', '509 - Fireworks', '510 - Hand saw']
self.classes_dict = {name: num for num, name in enumerate(self.classes)}
def get_class(self, file_name):
raw_class = file_name.split(os.sep)[-2]
return self.classes_dict[raw_class]
class ESC50Dataset(Dataset):
def __init__(self, extraction_type):
super().__init__('esc50', ESC50Metadata(), extraction_type)
self.file_ext = '*.ogg'
self.fold_list = ['fold1', 'fold2', 'fold3', 'fold4', 'fold5']
self.folds_count = 5
def _get_file_paths(self, sound_dir):
for class_name in self.metadata.classes:
pattern = os.path.join(sound_dir, class_name, self.file_ext)
for file_name in glob.iglob(pattern):
yield file_name
def _get_file_fold_num(self, file_path):
return int(file_path.split(os.sep)[-1].split('-')[0])
def load(self, sound_dir=SOUND_DIR):
super().load()
folds_loaded = 0
for fold in self.fold_list:
X_file_name, y_file_name = self.get_save_file_names(fold)
if os.path.exists(X_file_name) and os.path.exists(y_file_name):
print('Loading features from files:', X_file_name, y_file_name)
X_fold = np.load(X_file_name)
y_fold = np.load(y_file_name)
print('Loaded features. Shapes:', X_fold.shape, y_fold.shape)
folds_loaded += 1
self.X_dict[fold] = X_fold
self.y_dict[fold] = y_fold
if folds_loaded == self.folds_count:
return
fold_X = defaultdict(list)
fold_y = defaultdict(list)
fold_last_idx = defaultdict(int)
for file_name in self._get_file_paths(sound_dir):
file_fold = self._get_file_fold_num(file_name)
fold_name = 'fold%d' % file_fold
features = self.feature_extractor.extract_features(file_name)
if features is not None:
X, _, org_features_count = features
y = self.metadata.get_class(file_name)
features_count = X.shape[0]
print('Features count: %d, org features count: %d, class: %d' %
(features_count, org_features_count, y))
cur_feature_idx = fold_last_idx[fold_name]
self.fold_feat_idx[fold_name].append((cur_feature_idx, cur_feature_idx+org_features_count))
fold_last_idx[fold_name] += features_count
fold_X[fold_name].append(X)
fold_y[fold_name].extend(features_count*[y])
for fold in self.fold_list:
X_fold = np.concatenate(fold_X[fold])
y_fold = np.array(fold_y[fold], dtype=np.int)
self.X_dict[fold] = X_fold
self.y_dict[fold] = y_fold
%%time
dataset = ESC50Dataset(extraction_type='logmelspec_delta_30ms_16khz')
dataset.load()
dataset.save()
```
### 8 и 10. Кросс-валидация и настройка гиперпараметров модели. Построение кривых валидации и обучения
```
class TrainHistory:
def __init__(self, history):
self.history = history
class TestResults:
def __init__(self, y_true, y_pred,
y_true_oh=None, y_pred_proba=None, classes=None):
self.y_true = y_true
self.y_pred = y_pred
self.y_true_oh = y_true_oh
self.y_pred_proba = y_pred_proba
self.classes = classes
self.frag_acc = -1
def get_f1_score(self, average='weighted'):
return f1_score(self.y_true, self.y_pred, average=average)
def get_accuracy(self):
return accuracy_score(self.y_true, self.y_pred)
def print_classification_report(self):
print(classification_report(self.y_true, self.y_pred,
target_names=self.classes, digits=4))
def plot_history(history):
ax1 = plt.subplot(2, 1, 1)
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('Доля правильных ответов модели')
plt.ylabel('доля прав. ответов')
plt.legend(['обучающая', 'контрольная'], loc='best')
plt.setp(ax1.get_xticklabels(), visible=False)
plt.subplot(2, 1, 2, sharex=ax1)
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('Функция потерь модели')
plt.ylabel('значение функции')
plt.xlabel('эпохи обучения')
plt.tight_layout()
plt.show()
def evaluate(model, dataset, use_heldout=False, epochs=30, min_lr=0.0002):
accuracies = []
frag_accuracies = []
f1s = []
for train_folds, test_folds in dataset.kfold_names(use_heldout=use_heldout):
results = model.train_on(dataset, train_folds, test_folds,
epochs=epochs, validate=True, min_lr=min_lr)
print('Model evaluation results:')
print('Trained on %s, validated on %s' % (', '.join(train_folds), ', '.join(test_folds)))
print('Fragment accuracy: %.4f' % results.frag_acc)
frag_accuracies.append(results.frag_acc)
accuracy = results.get_accuracy()
accuracies.append(accuracy)
print('Accuracy: %.4f' % accuracy)
f1_score = results.get_f1_score(average='weighted')
f1s.append(f1_score)
print('F1-Score (weighted): %.4f' % f1_score)
results.print_classification_report()
if model.history is not None:
plot_history(model.history)
else:
print('Learning history is not found')
print('Final statistics:')
print('Fragment accuracy. Mean: %.4f, std: %.4f' % (np.mean(frag_accuracies),
np.std(frag_accuracies)))
print('Accuracy. Mean: %.4f, std: %.4f' % (np.mean(accuracies), np.std(accuracies)))
print('F1-Score. Mean: %.4f, std: %.4f' % (np.mean(f1s), np.std(f1s)))
model = AlexLikeModel(lr=0.0003, suff='_lr0-0003')
evaluate(model, dataset, use_heldout=True, epochs=50)
```
Подбор архитектуры сети (слоёв и их параметров), а также параметров предобработки был тактично опущен, т.к. он осуществлялся мной в том числе в рамках моей выпускной работы. Чтобы повторить все вычисления заново, потребовалось бы несколько суток непрерывной работы компьютера со 100% загрузкой видеокарты, что я сейчас не могу сделать. Прошу не снижать за это баллы :(
Могу отметить, что при увеличении Dropout модель недообучается, а при увеличении learning rate модель сильно колбасит и её итоговое качество непредсказуемо, но обычно ниже той, что достигается при более низких значениях.
По кривым обучения можно видеть, что модель не переобучается, а выходит на плато. Если добавить больше данных и усложить модель, можно достигнуть более высоких результатов.
### 9. Создание новых признаков и описание этого процесса
Помимо логарифмированных мел-спектрограмм можно использовать дельта-коэффициенты, что и было сделано в экспериментах выше. О том, что это такое, можно прочитать <a href="http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/">здесь</a> и <a href="https://librosa.github.io/librosa/generated/librosa.feature.delta.html">здесь</a>. Если в кратце, то они представляют собой аналог численной производной.
Посчитанные коэффициенты добавляются к исходным признакам в виде "канала" по аналогии с каналами в изображении. После добавления мы получаем уже четырёхмерную матрицу данных. Также в качестве каналов можно добавить дельта-коэффициенты от дельта-коэффициентов, а также MFCC.
Эксперименты за кулисами показали увеличение качества лишь от добавления дельта-коэффициентов на наборе ESC-50.
### 11. Прогноз для тестовой или отложенной выборки
```
def evaluate_validation(model, dataset, epochs=30, min_lr=0.0002):
accuracies = []
frag_accuracies = []
f1s = []
train_folds = ['fold1', 'fold2', 'fold3', 'fold4']
test_folds = ['fold5']
results = model.train_on(dataset, train_folds, test_folds,
epochs=epochs, validate=True, min_lr=min_lr)
print('Model evaluation results:')
print('Trained on %s, validated on %s' % (', '.join(train_folds), ', '.join(test_folds)))
print('Fragment accuracy: %.4f' % results.frag_acc)
frag_accuracies.append(results.frag_acc)
accuracy = results.get_accuracy()
accuracies.append(accuracy)
print('Accuracy: %.4f' % accuracy)
f1_score = results.get_f1_score(average='weighted')
f1s.append(f1_score)
print('F1-Score (weighted): %.4f' % f1_score)
results.print_classification_report()
if model.history is not None:
plot_history(model.history)
else:
print('Learning history is not found')
model = AlexLikeModel(lr=0.0003, suff='_lr0-0003')
evaluate_validation(model, dataset, epochs=50)
```
Полученные результаты согласуются с данными кросс-валидации (отличие около 1%).
При помощи Tensorflow невозможно корректно зафиксировать зерно генератора псведослучайных чисел, т.к. библиотека cuDNN, которая задействуется внутри, в некоторых местах использует свой ГПСЧ, на который пока нельзя повлиять. Поэтому ваши результаты могут отличаться от моих, но незначительно.
### 12. Выводы
Данная модель хорошо показала себя при решении задачи классификации звуковых сигналов. Приведу сравнение с результатами автора набора данных (средняя доля правильных ответов на кросс-валидации):
* Мой результат - 73.4%
* k-NN - 32.2%
* Random Forest - 45.1%
* SVM - 39.5%
Я не стал подробно расписывать каждый параметр модели и то, почему именно такой был выбран, т.к. для этого потребовалось бы притащить теории на десятки страниц, потому что в курсе не разбирались нейросетевые модели. Если есть вопросы, могу поделиться своим небольшим опытом в чате ODS, а также выслушать вашу критику и пожелания: @fremis
Сейчас данная тема активно исследуется. На arXiv вы можете увидеть новые статьи по данной тематике чуть ли не каждый день. Даже проводится ежегодный конкурс DCASE, который проводится при спонсировании Google.
В этом году появился огромный набор данных от Google под названием Audioset, в котором содержится огромное количество звуков из видео на Youtube, разбитых на огромное количество категорий. Для того, чтобы переварить этот набор, потребуются уже более глубокие сети и соответствующие мощности. Также имеется огромное множество неразмеченных звуковых файлов, которые было бы неплохо задействовать при обучении модели (например, есть набор ESC-US от автора ESC-50, в котором содержится довольно много неразмеченных звуковых файлов). Для этого можно попытаться применить подходы, которые используются при работе с изображениями.
| github_jupyter |
```
#Model: CNN
#Word Embedding: Pre-Trained (https://devmount.github.io/GermanWordEmbeddings/)
#Dataset: 3
#based on https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.initializers import Constant
BASE_DIR = ''
GLOVE_DIR = BASE_DIR
TEXT_DATA_DIR = './dataset3/'
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 20000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.2
print('Indexing word vectors.')
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glovegerman.txt')) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname==fname:
fpath = os.path.join(path, fname)
print (fname)
args = {} if sys.version_info < (3,) else {'encoding': 'utf-8'}
with open(fpath, **args) as f:
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
labels.append(label_id)
print('Found %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, LSTM, Flatten, Dropout
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
import keras
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Dropout(0.2)(embedded_sequences)
x = Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(hidden_dims)(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
x = Dense(2)(x)
preds = Activation('sigmoid')(x)
model = Model(sequence_input, preds)
print(model.summary())
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=64,
epochs=10,
validation_data=(x_val, y_val))
```
| github_jupyter |
# Example 4#
Tuning the hyper-parameters of LS-SVM regression models using the scikit-learn GridsearchCV function.
The synthetic data used for this purpose is the 1D Sinc function.
```
#Some imports
import matplotlib.pyplot as plt
import numpy as np
import random
import math
import scipy.stats as st
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut, KFold
from LSSVMRegression import LSSVMRegression
#Generating the synthetic data
Nmodels=25
Ndata=25
Nrem=5
x=np.array([np.random.uniform(low=-1,high=10,size=Ndata)]).T
y=np.sinc(x).ravel() #create the associated targets, needs to be a 1D array
#y=np.sin(x).ravel() #create the associated targets, needs to be a 1D array
x2=x*x
print("mean x²=",np.mean(x2))
#create Nmodels (identical) rbf-models to train on different datasets
models=list()
datasets_x=list()
datasets_y=list()
for i in range(Nmodels):
clfrbf=LSSVMRegression(
gamma=1, #set the gamma-hyper parameter equal to 1
kernel='poly', #use the linear kernel
sigma=1.0,
c=20.0,
d=3,
)
#index=list(range(i,i+Nrem))
index=random.sample(range(Ndata),Nrem)
seti_x=np.array([np.delete(x,index)]).T
#print(seti_x.shape," ",seti_x[0:5,:].ravel())
seti_y=np.delete(y,index)
clfrbf.fit(seti_x,seti_y) # train our model instance, aka solve the set of linear equations
datasets_x.append(seti_x)
datasets_y.append(seti_y)
models.append(clfrbf)
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), st.sem(a)
h = se * st.t.ppf((1 + confidence) / 2., n-1)
cf=(1.0-confidence)*0.5
qm = np.quantile(a,cf,interpolation='linear')
qp = np.quantile(a,1.0-cf,interpolation='linear')
return m, m-h, m+h, qm, qp
#generate a dens mesh
xmin=-8
xmax=8
Npts=2001
xPred=np.linspace((xmin,),(xmax,),Npts)
yExact=np.sinc(xPred).ravel()
#yExact=np.sin(xPred).ravel()
yAvg=np.zeros(Npts)
CIlow=np.zeros(Npts)
CIhigh=np.zeros(Npts)
Qlow=np.zeros(Npts)
Qhigh=np.zeros(Npts)
# and predict
all_yPred=list()
yPred2D=np.zeros((Nmodels,Npts))
cnt=-1
for clfrbf in models:
cnt+=1
yPred=clfrbf.predict(xPred)
all_yPred.append(yPred)
yPred2D[cnt]=yPred
# The mean squared error (MAE) and The coefficient of determination R²: 1 is perfect prediction
#print('MAE: %.3f R²: %.3f' % (mean_squared_error(yExact, yPred), r2_score(yExact, yPred)))
for i in range(Npts):
yAvg[i], CIlow[i], CIhigh[i], Qlow[i], Qhigh[i]= mean_confidence_interval(yPred2D[:,i],confidence=0.9)
#print(yAvg[i], CIlow[i], CIhigh[i]," ",yPred2D[1:5,i])
#print("TYPE:" , type(xPred)," shape:",xPred.shape)
# Plot outputs
plt.figure(figsize=(12,8))
for yPred in all_yPred:
plt.plot(xPred, yPred, color='red' ,linewidth=1, zorder=-1, alpha=0.25)
plt.fill_between(xPred.ravel(), CIlow, CIhigh, color='blue', zorder=0, alpha=.5)
plt.fill_between(xPred.ravel(), Qlow, Qhigh, color='green', zorder=0, alpha=.25)
plt.plot(xPred, yAvg, color='blue',linewidth=3, zorder=0)
plt.plot(xPred, yExact, color='black',linewidth=2, zorder=0)
plt.scatter(x, y, color='black', zorder=1)
plt.axis([xmin,xmax,-0.75,1.5])
step=(xmax-xmin)/11.0
Xlst=list()
for a in np.arange(math.floor(xmin),math.ceil(xmax)+1,1.0):
Xlst.append(a)
plt.xticks(Xlst,rotation=45,fontsize=18)
#plt.xticks([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])
plt.yticks([-0.75,-0.5,-0.25,0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=18)
plt.xlabel("feature x",fontsize=22,fontweight="bold")
plt.ylabel("target y",fontsize=22,fontweight="bold")
plt.show()
#sklearn gridsearch
#CrossVal=LeaveOneOut()
CrossVal=KFold(n_splits = 5, shuffle = True)
params=dict()
Gamma=[1.0e-6,1.0e-5,1.0e-4,1.0e-3,1.0e-2,0.1,1.0,10.0,100.0,1.0e3,1.0e4,1.0e5,1.0e6]
#Gamma=[1]
#Factors=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
Factors=np.array([1,2,3,4,5,6,7,8,9])
GammaDens=list()
for g in Gamma:
GammaDens.extend(Factors*g)
ptune='c' # gamma(float), c(float) or d (int)
scalename='log' #"linear", "log", "symlog", "logit",
#ptune='d' # gamma(float), c(float) or d (int)
#scalename='linear' #"linear", "log", "symlog", "logit",
params[ptune]=GammaDens
CVmodels=list()
scoreX = GammaDens
scoreY=np.zeros((Nmodels,len(GammaDens)))
for i in range(Nmodels):
print(i,",",end=" ")
mod=models[i]
CVmodel = GridSearchCV(
mod, # our estimator
param_grid=params, # dictionary with our possible hyper-parameters
scoring='neg_mean_squared_error', #This gives the same type of scoring as the lasso CV
n_jobs=4,
cv=CrossVal,
error_score=np.nan, # if set to raise it throws a error in case one point dies, now it throws a warning "FitFailedWarning"
return_train_score=True, # why would we not be interested in the result of the training scores?
)
CVmodel.fit(datasets_x[i],datasets_y[i])
scoreYi = np.array(CVmodel.cv_results_['mean_test_score']*-1.0)
scoreY[i] = scoreYi
CVmodels.append(CVmodel)
#generate a dens mesh
xmin=np.min(GammaDens)
xmax=np.max(GammaDens)
ymin=np.min(scoreY)
ymax=np.max(scoreY)
plt.figure(figsize=(12,8))
for yScore in scoreY:
col='red'
if (yScore[0]<yScore[-1]):
col='blue'
plt.plot(scoreX, yScore, color=col ,linewidth=1, zorder=-1, alpha=0.5)
#plt.plot(scoreX1, scoreY1, color='black',linewidth=2)
#plt.plot(scoreX2, scoreY2, color='blue',linewidth=2)
plt.xscale("log")#scalename)
plt.yscale("log")
plt.axis([0.001,100000,ymin,ymax])
plt.xticks(rotation=45,fontsize=18)
plt.yticks(fontsize=18)
#plt.xticks([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])
plt.xlabel(ptune,fontsize=22,fontweight="bold")
plt.ylabel("score",fontsize=22,fontweight="bold")
plt.grid(b=True, which='both', axis='both', color='lightgrey', linestyle='--', linewidth=1)
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import h5py
import heapq
import matplotlib.colors
import PIL
import datetime
```
# Algorithm
Custom adaptation of astar algorithm for 3D array with forced forward
```
def heuristic_function(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar_3D(space, origin_xy, destination_xy):
# make origin 3D with timeslice 0
origin = origin_xy[0], origin_xy[1], 0
# logs the path
came_from = {}
# holds the legal next moves in order of priority
frontier = []
# define legal moves:
# up, down, left, right, stay in place.
# no diagonals and always move forward one time step (z)
neighbours = [(0,0,1),(0,1,1),(0,-1,1),(1,0,1),(-1,0,1)]
cost_so_far = {origin: 0}
priority = {origin: heuristic_function(origin_xy, destination_xy)}
heapq.heappush(frontier, (priority[origin], origin))
# While there is still options to explore
while frontier:
current = heapq.heappop(frontier)[1]
# if current position is destination,
# break the loop and find the path that lead here
if (current[0], current[1]) == destination_xy:
data = []
while current in came_from:
data.append(current)
current = came_from[current]
return data
for i, j, k in neighbours:
move = current[0] + i, current[1] + j, current[2] + k
# check that move is legal
if ((0 <= move[0] < space.shape[0]) &
(0 <= move[1] < space.shape[1]) &
(0 <= move[2] < space.shape[2])):
if space[move[0], move[1], move[2]] != 1:
new_cost = 1
new_total = cost_so_far[current] + new_cost
if move not in cost_so_far:
cost_so_far[move] = new_total
# calculate total cost
priority[move] = new_total + heuristic_function(move, destination_xy)
# update frontier
heapq.heappush(frontier, (priority[move], move))
# log this move
came_from[move] = current
return 'no solution found :('
def convert_forecast(data_cube):
# take mean of forecasts
arr_world = np.min(data_cube, axis=2)
# binarize to storm (1) or safe (0)
arr_world = arr_world >= 15
# from boolean to binary
arr_world = arr_world.astype(int)
# swap axes so x=0, y=1, z=2, day=3
arr_world = np.swapaxes(arr_world,0,2)
arr_world = np.swapaxes(arr_world,1,3)
arr_world = np.swapaxes(arr_world,2,3)
return(arr_world)
def plot_timeslice(timeslice, cities, solution):
plt.figure(figsize=(5,5))
# black for storm
plt.imshow(timeslice[:,:].T, aspect='equal', cmap=plt.get_cmap('binary'))
for c,x,y in zip(cities.cid, cities.xid, cities.yid):
if c == 0:
plt.scatter([x-1], [y-1], c='red')
else:
plt.scatter([x-1], [y-1], c='blue')
#x, y, z = zip(*solution)
x = solution.x
y = solution.y
z = solution.z
plt.plot(list(x), list(y), linestyle='-', color='r')
plt.show()
def plot_series(world, cities, solution):
timesteps = list(range(0, 540, 30))
for t in timesteps:
print(t)
timeslice = world[:,:,t]
solution_subset = [i for i in solution if t <= i[2] <= t + 30]
if len(solution_subset) > 0:
plot_timeslice(timeslice, cities, solution_subset)
def plot_solution(world, cities, solution, day):
timesteps = list(range(0, 540, 30))
solution = solution.loc[solution.day == day,:]
# colour map for cities
cmap = plt.cm.cool
norm = matplotlib.colors.Normalize(vmin=1, vmax=10)
# colour map for weather
cm = matplotlib.colors.LinearSegmentedColormap.from_list('grid', [(1, 1, 1), (0.5, 0.5, 0.5)], N=2)
for t in timesteps:
timeslice = world[:,:,t]
moves_sofar = solution.loc[solution.z <= t,:]
moves_new = solution.loc[(t <= solution.z) & (solution.z <= t + 30),:]
if len(solution_subset) > 0:
plt.figure(figsize=(5,5))
plt.imshow(timeslice[:,:].T, aspect='equal', cmap = cm)
# plot old moves
for city in moves_sofar.city.unique():
moves_sofar_city = moves_sofar.loc[moves_sofar.city == city,:]
x = moves_sofar_city.x
y = moves_sofar_city.y
z = moves_sofar_city.z
plt.plot(list(x), list(y), linestyle='-', color='black')
# plot new moves
for city in moves_new.city.unique():
moves_new_city = moves_new.loc[moves_new.city == city,:]
x = moves_new_city.x
y = moves_new_city.y
z = moves_new_city.z
plt.plot(list(x), list(y), linestyle='-', color=cmap(norm(city)))
# plot cities
for city,x,y in zip(cities.cid, cities.xid, cities.yid):
if city == 0:
plt.scatter([x-1], [y-1], c='black')
else:
# balloon still en-route?
if city in moves_new.city.unique():
plt.scatter([x-1], [y-1], c=cmap(norm(city)))
else:
plt.scatter([x-1], [y-1], c='black')
# save and display
plt.savefig('img_day' + str(day) + '_timestep_' + str(t) + '.png')
plt.show()
# Toy data
world = np.ones((10,10,100))
world = world >= 15
world = world.astype(int)
origin = (0,0,0)
destinations = [(9,9, timeslice) for timeslice in range(0,100)]
print(world.shape)
```
# Data
Generate map of the world as binary 3D numpy array to find path in
```
# read h5 format back to numpy array
# h5f = h5py.File('../data/METdata.h5', 'r')
# train = h5f['train'][:]
# test = h5f['test'][:]
# h5f.close()
%pwd
data_cube = np.load('../data/5D_test.npy')
# convert forecast to world array
arr_world = convert_forecast(data_cube)
print(data_cube.shape)
print(arr_world.shape)
# repeat time slices x30
arr_world_big = np.repeat(arr_world, repeats=30, axis=2)
print(arr_world_big.shape)
```
# Run
```
#x = astar_3D(space=arr_world_big[:,:,:,0],
# origin_xy=origin,
# destination_xy=destination)
solution = pd.DataFrame([], columns=['x','y','z','day','city'])
origin = cities.loc[cities.cid == 0, ['xid', 'yid']]
origin = (origin.iloc[0,0], origin.iloc[0,1])
# iterate over days
for i in range(arr_world.shape[3]):
# get data for specific day
arr_day = arr_world_big[:,:,:,i]
# iterate over destinations
for j in range(cities.shape[0] - 1):
print('calculating day: '+str(i+1)+', city: '+str(j+1))
# find coordinates of target city
destination = cities.loc[cities.cid == j+1, ['xid', 'yid']]
destination = (destination.iloc[0,0], destination.iloc[0,1])
x = astar_3D(space=arr_day,
origin_xy=origin,
destination_xy=destination)
# check if solution was found
if type(x) == str:
out = pd.DataFrame(
np.array([[i]*540,[j]*540,[origin[0]]*540,[origin[1]]*540,range(540)]).T,
columns=['day','city','x','y','z'])
else:
out = (pd.DataFrame(zip(*x))
.transpose()
.rename(index=str, columns={0:'x', 1:'y', 2:'z'})
.append(pd.DataFrame({'x':origin[0],'y':origin[1],'z':[0]}))
.sort_values(by=['z'])
.assign(day=i+1,
city=j+1))
solution = solution.append(out, ignore_index=True)
solution.to_csv('solution.csv')
solution = pd.read_csv('solution.csv', index_col=0)
# convert time slices to timestamps
solution['time'] = solution['z'].apply(lambda x: ':'.join(str(datetime.timedelta(seconds=x * 2 + 180)).split(':')[1:]))
# re-adjust day number
solution['day'] = solution.day + 5
solution.head()
out = solution[['city','day','time','x','y']]
out.head()
out.to_csv('out.csv', index=False, header=False)
x2 = solution.loc[solution.day == 11,['x','y','z','city']]
x2.head()
plot_solution(arr_world_big[:,:,:,0], cities, solution, 15)
```
| github_jupyter |
# Fully Convolutional Networks for Change Detection
Example code for training the network presented in the paper:
```
Daudt, R.C., Le Saux, B. and Boulch, A., 2018, October. Fully convolutional siamese networks for change detection. In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.
```
Code uses the OSCD dataset:
```
Daudt, R.C., Le Saux, B., Boulch, A. and Gousseau, Y., 2018, July. Urban change detection for multispectral earth observation using convolutional neural networks. In IGARSS 2018-2018 IEEE International Geoscience and Remote Sensing Symposium (pp. 2115-2118). IEEE.
```
FresUNet architecture from paper:
```
Daudt, R.C., Le Saux, B., Boulch, A. and Gousseau, Y., 2019. Multitask learning for large-scale semantic change detection. Computer Vision and Image Understanding, 187, p.102783.
```
Please consider all relevant papers if you use this code.
```
# Rodrigo Daudt
# rcdaudt.github.io
# rodrigo.daudt@onera.fr
%%bash
hostname
# Imports
# PyTorch
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torchvision.transforms as tr
# Models imported for .py files in local directory. Hashed out here - models just put in a cell.
#from unet import Unet
#from siamunet_conc import SiamUnet_conc
#from siamunet_diff import SiamUnet_diff
#from fresunet import FresUNet
#from smallunet import SmallUnet
#from smallunet_attempt import Unet
# Other
import os
import numpy as np
import random
from skimage import io
from scipy.ndimage import zoom
import matplotlib.pyplot as plt
%matplotlib inline
from tqdm import tqdm as tqdm
from pandas import read_csv
from math import floor, ceil, sqrt, exp
from IPython import display
import time
from itertools import chain
import warnings
from pprint import pprint
print('IMPORTS OK')
from google.colab import drive
drive.mount('/content/drive')
# Global Variables' Definitions
PATH_TO_DATASET = '/content/drive/MyDrive/onera/'
IS_PROTOTYPE = False
FP_MODIFIER = 1 # Tuning parameter, use 1 if unsure
BATCH_SIZE = 32
PATCH_SIDE = 96
N_EPOCHS = 50
NORMALISE_IMGS = True
TRAIN_STRIDE = int(PATCH_SIDE/2) - 1
TYPE = 1 # 0-RGB | 1-RGBIr | 2-All bands s.t. resulution <= 20m | 3-All bands
LOAD_TRAINED = False
DATA_AUG = True
print('DEFINITIONS OK')
### This cell defines a load of functions that we will need to train the network e.g. data augmentation functions,
### functions that call the different bands of the sentinel data, etc.
# Functions
def adjust_shape(I, s):
"""Adjust shape of grayscale image I to s."""
# crop if necesary
I = I[:s[0],:s[1]]
si = I.shape
# pad if necessary
p0 = max(0,s[0] - si[0])
p1 = max(0,s[1] - si[1])
return np.pad(I,((0,p0),(0,p1)),'edge')
def read_sentinel_img(path):
"""Read cropped Sentinel-2 image: RGB bands."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
I = np.stack((r,g,b),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_4(path):
"""Read cropped Sentinel-2 image: RGB and NIR bands."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
nir = io.imread(path + im_name + "B08.tif")
I = np.stack((r,g,b,nir),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_leq20(path):
"""Read cropped Sentinel-2 image: bands with resolution less than or equals to 20m."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
s = r.shape
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
nir = io.imread(path + im_name + "B08.tif")
ir1 = adjust_shape(zoom(io.imread(path + im_name + "B05.tif"),2),s)
ir2 = adjust_shape(zoom(io.imread(path + im_name + "B06.tif"),2),s)
ir3 = adjust_shape(zoom(io.imread(path + im_name + "B07.tif"),2),s)
nir2 = adjust_shape(zoom(io.imread(path + im_name + "B8A.tif"),2),s)
swir2 = adjust_shape(zoom(io.imread(path + im_name + "B11.tif"),2),s)
swir3 = adjust_shape(zoom(io.imread(path + im_name + "B12.tif"),2),s)
I = np.stack((r,g,b,nir,ir1,ir2,ir3,nir2,swir2,swir3),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_leq60(path):
"""Read cropped Sentinel-2 image: all bands."""
im_name = os.listdir(path)[0][:-7]
r = io.imread(path + im_name + "B04.tif")
s = r.shape
g = io.imread(path + im_name + "B03.tif")
b = io.imread(path + im_name + "B02.tif")
nir = io.imread(path + im_name + "B08.tif")
ir1 = adjust_shape(zoom(io.imread(path + im_name + "B05.tif"),2),s)
ir2 = adjust_shape(zoom(io.imread(path + im_name + "B06.tif"),2),s)
ir3 = adjust_shape(zoom(io.imread(path + im_name + "B07.tif"),2),s)
nir2 = adjust_shape(zoom(io.imread(path + im_name + "B8A.tif"),2),s)
swir2 = adjust_shape(zoom(io.imread(path + im_name + "B11.tif"),2),s)
swir3 = adjust_shape(zoom(io.imread(path + im_name + "B12.tif"),2),s)
uv = adjust_shape(zoom(io.imread(path + im_name + "B01.tif"),6),s)
wv = adjust_shape(zoom(io.imread(path + im_name + "B09.tif"),6),s)
swirc = adjust_shape(zoom(io.imread(path + im_name + "B10.tif"),6),s)
I = np.stack((r,g,b,nir,ir1,ir2,ir3,nir2,swir2,swir3,uv,wv,swirc),axis=2).astype('float')
if NORMALISE_IMGS:
I = (I - I.mean()) / I.std()
return I
def read_sentinel_img_trio(path):
"""Read cropped Sentinel-2 image pair and change map."""
# read images
if TYPE == 0:
I1 = read_sentinel_img(path + '/imgs_1/')
I2 = read_sentinel_img(path + '/imgs_2/')
elif TYPE == 1:
I1 = read_sentinel_img_4(path + '/imgs_1/')
I2 = read_sentinel_img_4(path + '/imgs_2/')
elif TYPE == 2:
I1 = read_sentinel_img_leq20(path + '/imgs_1/')
I2 = read_sentinel_img_leq20(path + '/imgs_2/')
elif TYPE == 3:
I1 = read_sentinel_img_leq60(path + '/imgs_1/')
I2 = read_sentinel_img_leq60(path + '/imgs_2/')
cm = io.imread(path + '/cm/cm.png', as_gray=True) != 0
# crop if necessary
s1 = I1.shape
s2 = I2.shape
I2 = np.pad(I2,((0, s1[0] - s2[0]), (0, s1[1] - s2[1]), (0,0)),'edge')
return I1, I2, cm
def reshape_for_torch(I):
"""Transpose image for PyTorch coordinates."""
# out = np.swapaxes(I,1,2)
# out = np.swapaxes(out,0,1)
# out = out[np.newaxis,:]
out = I.transpose((2, 0, 1))
return torch.from_numpy(out)
class ChangeDetectionDataset(Dataset):
"""Change Detection dataset class, used for both training and test data."""
def __init__(self, path, train = True, patch_side = 96, stride = None, use_all_bands = False, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
# basics
self.transform = transform
self.path = path
self.patch_side = patch_side
if not stride:
self.stride = 1
else:
self.stride = stride
if train:
fname = 'train.txt'
else:
fname = 'test.txt'
# print(path + fname)
self.names = read_csv(path + fname).columns
self.n_imgs = self.names.shape[0]
n_pix = 0
true_pix = 0
# load images
self.imgs_1 = {}
self.imgs_2 = {}
self.change_maps = {}
self.n_patches_per_image = {}
self.n_patches = 0
self.patch_coords = []
for im_name in tqdm(self.names):
# load and store each image
I1, I2, cm = read_sentinel_img_trio(self.path + im_name)
self.imgs_1[im_name] = reshape_for_torch(I1)
self.imgs_2[im_name] = reshape_for_torch(I2)
self.change_maps[im_name] = cm
s = cm.shape
n_pix += np.prod(s)
true_pix += cm.sum()
# calculate the number of patches
s = self.imgs_1[im_name].shape
n1 = ceil((s[1] - self.patch_side + 1) / self.stride)
n2 = ceil((s[2] - self.patch_side + 1) / self.stride)
n_patches_i = n1 * n2
self.n_patches_per_image[im_name] = n_patches_i
self.n_patches += n_patches_i
# generate path coordinates
for i in range(n1):
for j in range(n2):
# coordinates in (x1, x2, y1, y2)
current_patch_coords = (im_name,
[self.stride*i, self.stride*i + self.patch_side, self.stride*j, self.stride*j + self.patch_side],
[self.stride*(i + 1), self.stride*(j + 1)])
self.patch_coords.append(current_patch_coords)
self.weights = [ FP_MODIFIER * 2 * true_pix / n_pix, 2 * (n_pix - true_pix) / n_pix]
def get_img(self, im_name):
return self.imgs_1[im_name], self.imgs_2[im_name], self.change_maps[im_name]
def __len__(self):
return self.n_patches
def __getitem__(self, idx):
current_patch_coords = self.patch_coords[idx]
im_name = current_patch_coords[0]
limits = current_patch_coords[1]
centre = current_patch_coords[2]
I1 = self.imgs_1[im_name][:, limits[0]:limits[1], limits[2]:limits[3]]
I2 = self.imgs_2[im_name][:, limits[0]:limits[1], limits[2]:limits[3]]
label = self.change_maps[im_name][limits[0]:limits[1], limits[2]:limits[3]]
label = torch.from_numpy(1*np.array(label)).float()
sample = {'I1': I1, 'I2': I2, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
class RandomFlip(object):
"""Flip randomly the images in a sample."""
# def __init__(self):
# return
def __call__(self, sample):
I1, I2, label = sample['I1'], sample['I2'], sample['label']
if random.random() > 0.5:
I1 = I1.numpy()[:,:,::-1].copy()
I1 = torch.from_numpy(I1)
I2 = I2.numpy()[:,:,::-1].copy()
I2 = torch.from_numpy(I2)
label = label.numpy()[:,::-1].copy()
label = torch.from_numpy(label)
return {'I1': I1, 'I2': I2, 'label': label}
class RandomRot(object):
"""Rotate randomly the images in a sample."""
# def __init__(self):
# return
def __call__(self, sample):
I1, I2, label = sample['I1'], sample['I2'], sample['label']
n = random.randint(0, 3)
if n:
I1 = sample['I1'].numpy()
I1 = np.rot90(I1, n, axes=(1, 2)).copy()
I1 = torch.from_numpy(I1)
I2 = sample['I2'].numpy()
I2 = np.rot90(I2, n, axes=(1, 2)).copy()
I2 = torch.from_numpy(I2)
label = sample['label'].numpy()
label = np.rot90(label, n, axes=(0, 1)).copy()
label = torch.from_numpy(label)
return {'I1': I1, 'I2': I2, 'label': label}
print('UTILS OK')
### Simple UNet implementation
#import torch
#import torch.nn as nn
#import torch.nn.functional as F
#from torch.nn.modules.padding import ReplicationPad2d
class Unet(nn.Module):
"""EF segmentation network."""
def __init__(self, input_nbr, label_nbr):
super(Unet, self).__init__()
self.input_nbr = input_nbr
self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(16)
self.do11 = nn.Dropout2d(p=0.2)
self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(16)
self.do12 = nn.Dropout2d(p=0.2)
self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(32)
self.do21 = nn.Dropout2d(p=0.2)
self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(32)
self.do22 = nn.Dropout2d(p=0.2)
self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(64)
self.do31 = nn.Dropout2d(p=0.2)
self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(64)
self.do32 = nn.Dropout2d(p=0.2)
self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(64)
self.do33 = nn.Dropout2d(p=0.2)
self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(128)
self.do41 = nn.Dropout2d(p=0.2)
self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(128)
self.do42 = nn.Dropout2d(p=0.2)
self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(128)
self.do43 = nn.Dropout2d(p=0.2)
self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv43d = nn.ConvTranspose2d(256, 128, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(128)
self.do43d = nn.Dropout2d(p=0.2)
self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(128)
self.do42d = nn.Dropout2d(p=0.2)
self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(64)
self.do41d = nn.Dropout2d(p=0.2)
self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv33d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(64)
self.do33d = nn.Dropout2d(p=0.2)
self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(64)
self.do32d = nn.Dropout2d(p=0.2)
self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(32)
self.do31d = nn.Dropout2d(p=0.2)
self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv22d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(32)
self.do22d = nn.Dropout2d(p=0.2)
self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(16)
self.do21d = nn.Dropout2d(p=0.2)
self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv12d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(16)
self.do12d = nn.Dropout2d(p=0.2)
self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = torch.cat((x1, x2), 1)
"""Forward method."""
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x))))
x12 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43, kernel_size=2, stride=2)
# Stage 4d
x4d = self.upconv4(x4p)
pad4 = ReplicationPad2d((0, x43.size(3) - x4d.size(3), 0, x43.size(2) - x4d.size(2)))
x4d = torch.cat((pad4(x4d), x43), 1)
x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d))))
x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d))))
x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d))))
# Stage 3d
x3d = self.upconv3(x41d)
pad3 = ReplicationPad2d((0, x33.size(3) - x3d.size(3), 0, x33.size(2) - x3d.size(2)))
x3d = torch.cat((pad3(x3d), x33), 1)
x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d))))
x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d))))
x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d))))
# Stage 2d
x2d = self.upconv2(x31d)
pad2 = ReplicationPad2d((0, x22.size(3) - x2d.size(3), 0, x22.size(2) - x2d.size(2)))
x2d = torch.cat((pad2(x2d), x22), 1)
x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))
x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))
# Stage 1d
x1d = self.upconv1(x21d)
pad1 = ReplicationPad2d((0, x12.size(3) - x1d.size(3), 0, x12.size(2) - x1d.size(2)))
x1d = torch.cat((pad1(x1d), x12), 1)
x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))
x11d = self.conv11d(x12d)
return self.sm(x11d)
# Rodrigo Caye Daudt
# https://rcdaudt.github.io/
# Daudt, R. C., Le Saux, B., & Boulch, A. "Fully convolutional siamese networks for change detection". In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.padding import ReplicationPad2d
class SiamUnet_diff(nn.Module):
"""SiamUnet_diff segmentation network."""
def __init__(self, input_nbr, label_nbr):
super(SiamUnet_diff, self).__init__()
self.input_nbr = input_nbr
self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(16)
self.do11 = nn.Dropout2d(p=0.2)
self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(16)
self.do12 = nn.Dropout2d(p=0.2)
self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(32)
self.do21 = nn.Dropout2d(p=0.2)
self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(32)
self.do22 = nn.Dropout2d(p=0.2)
self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(64)
self.do31 = nn.Dropout2d(p=0.2)
self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(64)
self.do32 = nn.Dropout2d(p=0.2)
self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(64)
self.do33 = nn.Dropout2d(p=0.2)
self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(128)
self.do41 = nn.Dropout2d(p=0.2)
self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(128)
self.do42 = nn.Dropout2d(p=0.2)
self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(128)
self.do43 = nn.Dropout2d(p=0.2)
self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv43d = nn.ConvTranspose2d(256, 128, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(128)
self.do43d = nn.Dropout2d(p=0.2)
self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(128)
self.do42d = nn.Dropout2d(p=0.2)
self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(64)
self.do41d = nn.Dropout2d(p=0.2)
self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv33d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(64)
self.do33d = nn.Dropout2d(p=0.2)
self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(64)
self.do32d = nn.Dropout2d(p=0.2)
self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(32)
self.do31d = nn.Dropout2d(p=0.2)
self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv22d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(32)
self.do22d = nn.Dropout2d(p=0.2)
self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(16)
self.do21d = nn.Dropout2d(p=0.2)
self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv12d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(16)
self.do12d = nn.Dropout2d(p=0.2)
self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
"""Forward method."""
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x1))))
x12_1 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12_1, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22_1 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22_1, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33_1 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33_1, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43_1 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43_1, kernel_size=2, stride=2)
####################################################
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x2))))
x12_2 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12_2, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22_2 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22_2, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33_2 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33_2, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43_2 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43_2, kernel_size=2, stride=2)
# Stage 4d
x4d = self.upconv4(x4p)
pad4 = ReplicationPad2d((0, x43_1.size(3) - x4d.size(3), 0, x43_1.size(2) - x4d.size(2)))
x4d = torch.cat((pad4(x4d), torch.abs(x43_1 - x43_2)), 1)
x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d))))
x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d))))
x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d))))
# Stage 3d
x3d = self.upconv3(x41d)
pad3 = ReplicationPad2d((0, x33_1.size(3) - x3d.size(3), 0, x33_1.size(2) - x3d.size(2)))
x3d = torch.cat((pad3(x3d), torch.abs(x33_1 - x33_2)), 1)
x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d))))
x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d))))
x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d))))
# Stage 2d
x2d = self.upconv2(x31d)
pad2 = ReplicationPad2d((0, x22_1.size(3) - x2d.size(3), 0, x22_1.size(2) - x2d.size(2)))
x2d = torch.cat((pad2(x2d), torch.abs(x22_1 - x22_2)), 1)
x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))
x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))
# Stage 1d
x1d = self.upconv1(x21d)
pad1 = ReplicationPad2d((0, x12_1.size(3) - x1d.size(3), 0, x12_1.size(2) - x1d.size(2)))
x1d = torch.cat((pad1(x1d), torch.abs(x12_1 - x12_2)), 1)
x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))
x11d = self.conv11d(x12d)
return self.sm(x11d)
# Daudt, R. C., Le Saux, B., & Boulch, A. "Fully convolutional siamese networks for change detection". In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.
### SiamUNet_conc network. Improvement on simple UNet, as outlined in the paper above. Siamese architectures are pretty nifty.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.padding import ReplicationPad2d
class SiamUnet_conc(nn.Module):
"""SiamUnet_conc segmentation network."""
def __init__(self, input_nbr, label_nbr):
super(SiamUnet_conc, self).__init__()
self.input_nbr = input_nbr
self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(16)
self.do11 = nn.Dropout2d(p=0.2)
self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(16)
self.do12 = nn.Dropout2d(p=0.2)
self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(32)
self.do21 = nn.Dropout2d(p=0.2)
self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(32)
self.do22 = nn.Dropout2d(p=0.2)
self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(64)
self.do31 = nn.Dropout2d(p=0.2)
self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(64)
self.do32 = nn.Dropout2d(p=0.2)
self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(64)
self.do33 = nn.Dropout2d(p=0.2)
self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(128)
self.do41 = nn.Dropout2d(p=0.2)
self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(128)
self.do42 = nn.Dropout2d(p=0.2)
self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(128)
self.do43 = nn.Dropout2d(p=0.2)
self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv43d = nn.ConvTranspose2d(384, 128, kernel_size=3, padding=1)
self.bn43d = nn.BatchNorm2d(128)
self.do43d = nn.Dropout2d(p=0.2)
self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1)
self.bn42d = nn.BatchNorm2d(128)
self.do42d = nn.Dropout2d(p=0.2)
self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1)
self.bn41d = nn.BatchNorm2d(64)
self.do41d = nn.Dropout2d(p=0.2)
self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv33d = nn.ConvTranspose2d(192, 64, kernel_size=3, padding=1)
self.bn33d = nn.BatchNorm2d(64)
self.do33d = nn.Dropout2d(p=0.2)
self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1)
self.bn32d = nn.BatchNorm2d(64)
self.do32d = nn.Dropout2d(p=0.2)
self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn31d = nn.BatchNorm2d(32)
self.do31d = nn.Dropout2d(p=0.2)
self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv22d = nn.ConvTranspose2d(96, 32, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(32)
self.do22d = nn.Dropout2d(p=0.2)
self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(16)
self.do21d = nn.Dropout2d(p=0.2)
self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv12d = nn.ConvTranspose2d(48, 16, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(16)
self.do12d = nn.Dropout2d(p=0.2)
self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
"""Forward method."""
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x1))))
x12_1 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12_1, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22_1 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22_1, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33_1 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33_1, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43_1 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43_1, kernel_size=2, stride=2)
####################################################
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x2))))
x12_2 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12_2, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22_2 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22_2, kernel_size=2, stride=2)
# Stage 3
x31 = self.do31(F.relu(self.bn31(self.conv31(x2p))))
x32 = self.do32(F.relu(self.bn32(self.conv32(x31))))
x33_2 = self.do33(F.relu(self.bn33(self.conv33(x32))))
x3p = F.max_pool2d(x33_2, kernel_size=2, stride=2)
# Stage 4
x41 = self.do41(F.relu(self.bn41(self.conv41(x3p))))
x42 = self.do42(F.relu(self.bn42(self.conv42(x41))))
x43_2 = self.do43(F.relu(self.bn43(self.conv43(x42))))
x4p = F.max_pool2d(x43_2, kernel_size=2, stride=2)
####################################################
# Stage 4d
x4d = self.upconv4(x4p)
pad4 = ReplicationPad2d((0, x43_1.size(3) - x4d.size(3), 0, x43_1.size(2) - x4d.size(2)))
x4d = torch.cat((pad4(x4d), x43_1, x43_2), 1)
x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d))))
x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d))))
x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d))))
# Stage 3d
x3d = self.upconv3(x41d)
pad3 = ReplicationPad2d((0, x33_1.size(3) - x3d.size(3), 0, x33_1.size(2) - x3d.size(2)))
x3d = torch.cat((pad3(x3d), x33_1, x33_2), 1)
x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d))))
x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d))))
x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d))))
# Stage 2d
x2d = self.upconv2(x31d)
pad2 = ReplicationPad2d((0, x22_1.size(3) - x2d.size(3), 0, x22_1.size(2) - x2d.size(2)))
x2d = torch.cat((pad2(x2d), x22_1, x22_2), 1)
x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))
x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))
# Stage 1d
x1d = self.upconv1(x21d)
pad1 = ReplicationPad2d((0, x12_1.size(3) - x1d.size(3), 0, x12_1.size(2) - x1d.size(2)))
x1d = torch.cat((pad1(x1d), x12_1, x12_2), 1)
x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))
x11d = self.conv11d(x12d)
return self.sm(x11d)
# Daudt, R.C., Le Saux, B., Boulch, A. and Gousseau, Y., 2019. Multitask learning for large-scale semantic change detection. Computer Vision and Image Understanding, 187, p.102783.
# FresUNet - comes from the above paper. Still not sure how it improves on UNet tbh. Will find out soon.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.padding import ReplicationPad2d
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1)
class BasicBlock_ss(nn.Module):
def __init__(self, inplanes, planes = None, subsamp=1):
super(BasicBlock_ss, self).__init__()
if planes == None:
planes = inplanes * subsamp
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.subsamp = subsamp
self.doit = planes != inplanes
if self.doit:
self.couple = nn.Conv2d(inplanes, planes, kernel_size=1)
self.bnc = nn.BatchNorm2d(planes)
def forward(self, x):
if self.doit:
residual = self.couple(x)
residual = self.bnc(residual)
else:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.subsamp > 1:
out = F.max_pool2d(out, kernel_size=self.subsamp, stride=self.subsamp)
residual = F.max_pool2d(residual, kernel_size=self.subsamp, stride=self.subsamp)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class BasicBlock_us(nn.Module):
def __init__(self, inplanes, upsamp=1):
super(BasicBlock_us, self).__init__()
planes = int(inplanes / upsamp) # assumes integer result, fix later
self.conv1 = nn.ConvTranspose2d(inplanes, planes, kernel_size=3, padding=1, stride=upsamp, output_padding=1)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.upsamp = upsamp
self.couple = nn.ConvTranspose2d(inplanes, planes, kernel_size=3, padding=1, stride=upsamp, output_padding=1)
self.bnc = nn.BatchNorm2d(planes)
def forward(self, x):
residual = self.couple(x)
residual = self.bnc(residual)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class FresUNet(nn.Module):
"""FresUNet segmentation network."""
def __init__(self, input_nbr, label_nbr):
"""Init FresUNet fields."""
super(FresUNet, self).__init__()
self.input_nbr = input_nbr
cur_depth = input_nbr
base_depth = 8
# Encoding stage 1
self.encres1_1 = BasicBlock_ss(cur_depth, planes = base_depth)
cur_depth = base_depth
d1 = base_depth
self.encres1_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Encoding stage 2
self.encres2_1 = BasicBlock_ss(cur_depth)
d2 = cur_depth
self.encres2_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Encoding stage 3
self.encres3_1 = BasicBlock_ss(cur_depth)
d3 = cur_depth
self.encres3_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Encoding stage 4
self.encres4_1 = BasicBlock_ss(cur_depth)
d4 = cur_depth
self.encres4_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Decoding stage 4
self.decres4_1 = BasicBlock_ss(cur_depth)
self.decres4_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Decoding stage 3
self.decres3_1 = BasicBlock_ss(cur_depth + d4, planes = cur_depth)
self.decres3_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Decoding stage 2
self.decres2_1 = BasicBlock_ss(cur_depth + d3, planes = cur_depth)
self.decres2_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Decoding stage 1
self.decres1_1 = BasicBlock_ss(cur_depth + d2, planes = cur_depth)
self.decres1_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Output
self.coupling = nn.Conv2d(cur_depth + d1, label_nbr, kernel_size=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = torch.cat((x1, x2), 1)
# pad5 = ReplicationPad2d((0, x53.size(3) - x5d.size(3), 0, x53.size(2) - x5d.size(2)))
s1_1 = x.size()
x1 = self.encres1_1(x)
x = self.encres1_2(x1)
s2_1 = x.size()
x2 = self.encres2_1(x)
x = self.encres2_2(x2)
s3_1 = x.size()
x3 = self.encres3_1(x)
x = self.encres3_2(x3)
s4_1 = x.size()
x4 = self.encres4_1(x)
x = self.encres4_2(x4)
x = self.decres4_1(x)
x = self.decres4_2(x)
s4_2 = x.size()
pad4 = ReplicationPad2d((0, s4_1[3] - s4_2[3], 0, s4_1[2] - s4_2[2]))
x = pad4(x)
# x = self.decres3_1(x)
x = self.decres3_1(torch.cat((x, x4), 1))
x = self.decres3_2(x)
s3_2 = x.size()
pad3 = ReplicationPad2d((0, s3_1[3] - s3_2[3], 0, s3_1[2] - s3_2[2]))
x = pad3(x)
x = self.decres2_1(torch.cat((x, x3), 1))
x = self.decres2_2(x)
s2_2 = x.size()
pad2 = ReplicationPad2d((0, s2_1[3] - s2_2[3], 0, s2_1[2] - s2_2[2]))
x = pad2(x)
x = self.decres1_1(torch.cat((x, x2), 1))
x = self.decres1_2(x)
s1_2 = x.size()
pad1 = ReplicationPad2d((0, s1_1[3] - s1_2[3], 0, s1_1[2] - s1_2[2]))
x = pad1(x)
x = self.coupling(torch.cat((x, x1), 1))
x = self.sm(x)
return x
# Dataset
if DATA_AUG:
data_transform = tr.Compose([RandomFlip(), RandomRot()])
else:
data_transform = None
train_dataset = ChangeDetectionDataset(PATH_TO_DATASET, train = True, stride = TRAIN_STRIDE, transform=data_transform)
#weights = torch.FloatTensor(train_dataset.weights)
weights = torch.FloatTensor(train_dataset.weights).cuda()
print(weights)
train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = 4)
test_dataset = ChangeDetectionDataset(PATH_TO_DATASET, train = False, stride = TRAIN_STRIDE)
test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle = True, num_workers = 4)
print('DATASETS OK')
# print(weights)
# 0-RGB | 1-RGBIr | 2-All bands s.t. resulution <= 20m | 3-All bands
if TYPE == 0:
# net, net_name = Unet(2*3, 2), 'FC-EF'
net, net_name = SiamUnet_conc(3, 2), 'FC-Siam-conc'
# net, net_name = SiamUnet_diff(3, 2), 'FC-Siam-diff'
# net, net_name = FresUNet(2*3, 2), 'FresUNet'
elif TYPE == 1:
# net, net_name = Unet(2*4, 2), 'FC-EF'
net, net_name = SiamUnet_conc(4, 2), 'FC-Siam-conc'
# net, net_name = SiamUnet_diff(4, 2), 'FC-Siam-diff'
# net, net_name = FresUNet(2*4, 2), 'FresUNet'
elif TYPE == 2:
# net, net_name = Unet(2*10, 2), 'FC-EF'
net, net_name = SiamUnet_conc(10, 2), 'FC-Siam-conc'
# net, net_name = SiamUnet_diff(10, 2), 'FC-Siam-diff'
# net, net_name = FresUNet(2*10, 2), 'FresUNet'
elif TYPE == 3:
# net, net_name = Unet(2*13, 2), 'FC-EF'
net, net_name = SiamUnet_conc(13, 2), 'FC-Siam-conc'
# net, net_name = SiamUnet_diff(13, 2), 'FC-Siam-diff'
# net, net_name = FresUNet(2*13, 2), 'FresUNet'
net.cuda()
criterion = nn.NLLLoss(weight=weights) # to be used with logsoftmax output - need to think about tweaking this too.
print('NETWORK OK')
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of trainable parameters:', count_parameters(net))
### This cell gives the procedure to train the model on the training dataset, and output
### graphs that show the progress of the training through the epochs e.g. loss, recall etc.
### Uses the Adam optimiser.
### There are lots of things we could tweak here - optimiser, learning rate, weight decay (regularisation),
### no. of epochs, as well as tweaking the fundamental structure of the ConvNet models used.
# net.load_state_dict(torch.load('net-best_epoch-1_fm-0.7394933126157746.pth.tar'))
def train(n_epochs = N_EPOCHS, save = True):
t = np.linspace(1, n_epochs, n_epochs)
epoch_train_loss = 0 * t
epoch_train_accuracy = 0 * t
epoch_train_change_accuracy = 0 * t
epoch_train_nochange_accuracy = 0 * t
epoch_train_precision = 0 * t
epoch_train_recall = 0 * t
epoch_train_Fmeasure = 0 * t
epoch_test_loss = 0 * t
epoch_test_accuracy = 0 * t
epoch_test_change_accuracy = 0 * t
epoch_test_nochange_accuracy = 0 * t
epoch_test_precision = 0 * t
epoch_test_recall = 0 * t
epoch_test_Fmeasure = 0 * t
# mean_acc = 0
# best_mean_acc = 0
fm = 0
best_fm = 0
lss = 1000
best_lss = 1000
plt.figure(num=1)
plt.figure(num=2)
plt.figure(num=3)
optimizer = torch.optim.Adam(net.parameters(), weight_decay=1e-4)
# optimizer = torch.optim.Adam(net.parameters(), lr=0.0005)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
for epoch_index in tqdm(range(n_epochs)):
net.train()
print('Epoch: ' + str(epoch_index + 1) + ' of ' + str(N_EPOCHS))
tot_count = 0
tot_loss = 0
tot_accurate = 0
class_correct = list(0. for i in range(2))
class_total = list(0. for i in range(2))
# for batch_index, batch in enumerate(tqdm(data_loader)):
for batch in train_loader:
I1 = Variable(batch['I1'].float().cuda())
I2 = Variable(batch['I2'].float().cuda())
label = torch.squeeze(Variable(batch['label'].cuda()))
#I1 = Variable(batch['I1'].float())
#I2 = Variable(batch['I2'].float())
#label = torch.squeeze(Variable(batch['label']))
optimizer.zero_grad()
output = net(I1, I2)
loss = criterion(output, label.long())
loss.backward()
optimizer.step()
scheduler.step()
epoch_train_loss[epoch_index], epoch_train_accuracy[epoch_index], cl_acc, pr_rec = test(train_dataset)
epoch_train_nochange_accuracy[epoch_index] = cl_acc[0]
epoch_train_change_accuracy[epoch_index] = cl_acc[1]
epoch_train_precision[epoch_index] = pr_rec[0]
epoch_train_recall[epoch_index] = pr_rec[1]
epoch_train_Fmeasure[epoch_index] = pr_rec[2]
# epoch_test_loss[epoch_index], epoch_test_accuracy[epoch_index], cl_acc, pr_rec = test(test_dataset)
epoch_test_loss[epoch_index], epoch_test_accuracy[epoch_index], cl_acc, pr_rec = test(test_dataset)
epoch_test_nochange_accuracy[epoch_index] = cl_acc[0]
epoch_test_change_accuracy[epoch_index] = cl_acc[1]
epoch_test_precision[epoch_index] = pr_rec[0]
epoch_test_recall[epoch_index] = pr_rec[1]
epoch_test_Fmeasure[epoch_index] = pr_rec[2]
plt.figure(num=1)
plt.clf()
l1_1, = plt.plot(t[:epoch_index + 1], epoch_train_loss[:epoch_index + 1], label='Train loss')
l1_2, = plt.plot(t[:epoch_index + 1], epoch_test_loss[:epoch_index + 1], label='Test loss')
plt.legend(handles=[l1_1, l1_2])
plt.grid()
# plt.gcf().gca().set_ylim(bottom = 0)
plt.gcf().gca().set_xlim(left = 0)
plt.title('Loss')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=2)
plt.clf()
l2_1, = plt.plot(t[:epoch_index + 1], epoch_train_accuracy[:epoch_index + 1], label='Train accuracy')
l2_2, = plt.plot(t[:epoch_index + 1], epoch_test_accuracy[:epoch_index + 1], label='Test accuracy')
plt.legend(handles=[l2_1, l2_2])
plt.grid()
plt.gcf().gca().set_ylim(0, 100)
# plt.gcf().gca().set_ylim(bottom = 0)
# plt.gcf().gca().set_xlim(left = 0)
plt.title('Accuracy')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=3)
plt.clf()
l3_1, = plt.plot(t[:epoch_index + 1], epoch_train_nochange_accuracy[:epoch_index + 1], label='Train accuracy: no change')
l3_2, = plt.plot(t[:epoch_index + 1], epoch_train_change_accuracy[:epoch_index + 1], label='Train accuracy: change')
l3_3, = plt.plot(t[:epoch_index + 1], epoch_test_nochange_accuracy[:epoch_index + 1], label='Test accuracy: no change')
l3_4, = plt.plot(t[:epoch_index + 1], epoch_test_change_accuracy[:epoch_index + 1], label='Test accuracy: change')
plt.legend(handles=[l3_1, l3_2, l3_3, l3_4])
plt.grid()
plt.gcf().gca().set_ylim(0, 100)
# plt.gcf().gca().set_ylim(bottom = 0)
# plt.gcf().gca().set_xlim(left = 0)
plt.title('Accuracy per class')
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(num=4)
plt.clf()
l4_1, = plt.plot(t[:epoch_index + 1], epoch_train_precision[:epoch_index + 1], label='Train precision')
l4_2, = plt.plot(t[:epoch_index + 1], epoch_train_recall[:epoch_index + 1], label='Train recall')
l4_3, = plt.plot(t[:epoch_index + 1], epoch_train_Fmeasure[:epoch_index + 1], label='Train Dice/F1')
l4_4, = plt.plot(t[:epoch_index + 1], epoch_test_precision[:epoch_index + 1], label='Test precision')
l4_5, = plt.plot(t[:epoch_index + 1], epoch_test_recall[:epoch_index + 1], label='Test recall')
l4_6, = plt.plot(t[:epoch_index + 1], epoch_test_Fmeasure[:epoch_index + 1], label='Test Dice/F1')
plt.legend(handles=[l4_1, l4_2, l4_3, l4_4, l4_5, l4_6])
plt.grid()
plt.gcf().gca().set_ylim(0, 1)
# plt.gcf().gca().set_ylim(bottom = 0)
# plt.gcf().gca().set_xlim(left = 0)
plt.title('Precision, Recall and F-measure')
display.clear_output(wait=True)
display.display(plt.gcf())
# mean_acc = (epoch_test_nochange_accuracy[epoch_index] + epoch_test_change_accuracy[epoch_index])/2
# if mean_acc > best_mean_acc:
# best_mean_acc = mean_acc
# save_str = 'net-best_epoch-' + str(epoch_index + 1) + '_acc-' + str(mean_acc) + '.pth.tar'
# torch.save(net.state_dict(), save_str)
# fm = pr_rec[2]
fm = epoch_train_Fmeasure[epoch_index]
if fm > best_fm:
best_fm = fm
save_str = 'net-best_epoch-' + str(epoch_index + 1) + '_fm-' + str(fm) + '.pth.tar'
torch.save(net.state_dict(), save_str)
lss = epoch_train_loss[epoch_index]
if lss < best_lss:
best_lss = lss
save_str = 'net-best_epoch-' + str(epoch_index + 1) + '_loss-' + str(lss) + '.pth.tar'
torch.save(net.state_dict(), save_str)
# print('Epoch loss: ' + str(tot_loss/tot_count))
if save:
im_format = 'png'
# im_format = 'eps'
plt.figure(num=1)
plt.savefig(net_name + '-01-loss.' + im_format)
plt.figure(num=2)
plt.savefig(net_name + '-02-accuracy.' + im_format)
plt.figure(num=3)
plt.savefig(net_name + '-03-accuracy-per-class.' + im_format)
plt.figure(num=4)
plt.savefig(net_name + '-04-prec-rec-fmeas.' + im_format)
out = {'train_loss': epoch_train_loss[-1],
'train_accuracy': epoch_train_accuracy[-1],
'train_nochange_accuracy': epoch_train_nochange_accuracy[-1],
'train_change_accuracy': epoch_train_change_accuracy[-1],
'test_loss': epoch_test_loss[-1],
'test_accuracy': epoch_test_accuracy[-1],
'test_nochange_accuracy': epoch_test_nochange_accuracy[-1],
'test_change_accuracy': epoch_test_change_accuracy[-1]}
print('pr_c, rec_c, f_meas, pr_nc, rec_nc')
print(pr_rec)
return out
L = 1024
N = 2
def test(dset):
net.eval()
tot_loss = 0
tot_count = 0
tot_accurate = 0
n = 2
class_correct = list(0. for i in range(n))
class_total = list(0. for i in range(n))
class_accuracy = list(0. for i in range(n))
tp = 0
tn = 0
fp = 0
fn = 0
for img_index in dset.names:
I1_full, I2_full, cm_full = dset.get_img(img_index)
s = cm_full.shape
steps0 = np.arange(0,s[0],ceil(s[0]/N))
steps1 = np.arange(0,s[1],ceil(s[1]/N))
for ii in range(N):
for jj in range(N):
xmin = steps0[ii]
if ii == N-1:
xmax = s[0]
else:
xmax = steps0[ii+1]
ymin = jj
if jj == N-1:
ymax = s[1]
else:
ymax = steps1[jj+1]
I1 = I1_full[:, xmin:xmax, ymin:ymax]
I2 = I2_full[:, xmin:xmax, ymin:ymax]
cm = cm_full[xmin:xmax, ymin:ymax]
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
cm = Variable(torch.unsqueeze(torch.from_numpy(1.0*cm),0).float()).cuda()
output = net(I1, I2)
loss = criterion(output, cm.long())
# print(loss)
tot_loss += loss.data * np.prod(cm.size())
tot_count += np.prod(cm.size())
_, predicted = torch.max(output.data, 1)
c = (predicted.int() == cm.data.int())
for i in range(c.size(1)):
for j in range(c.size(2)):
l = int(cm.data[0, i, j])
class_correct[l] += c[0, i, j]
class_total[l] += 1
pr = (predicted.int() > 0).cpu().numpy()
gt = (cm.data.int() > 0).cpu().numpy()
tp += np.logical_and(pr, gt).sum()
tn += np.logical_and(np.logical_not(pr), np.logical_not(gt)).sum()
fp += np.logical_and(pr, np.logical_not(gt)).sum()
fn += np.logical_and(np.logical_not(pr), gt).sum()
net_loss = tot_loss/tot_count
net_accuracy = 100 * (tp + tn)/tot_count
for i in range(n):
class_accuracy[i] = 100 * class_correct[i] / max(class_total[i],0.00001)
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f_meas = 2 * prec * rec / (prec + rec)
prec_nc = tn / (tn + fn)
rec_nc = tn / (tn + fp)
pr_rec = [prec, rec, f_meas, prec_nc, rec_nc]
return net_loss, net_accuracy, class_accuracy, pr_rec
### This cell either loads trained weights, or it begins the training process of a network by calling train().
if LOAD_TRAINED:
net.load_state_dict(torch.load('net_final.pth.tar'))
print('LOAD OK')
else:
t_start = time.time()
out_dic = train()
t_end = time.time()
print(out_dic)
print('Elapsed time:')
print(t_end - t_start)
### This cell saves the weights of the trained network for future use.
if not LOAD_TRAINED:
torch.save(net.state_dict(), 'siamunet_conc_net_final.pth.tar')
print('SAVE OK')
### This cell outputs the results of the trained network when applied to the test set.
### Results come in the form of png files showing the network's predictions of change.
def save_test_results(dset):
for name in tqdm(dset.names):
with warnings.catch_warnings():
I1, I2, cm = dset.get_img(name)
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
out = net(I1, I2)
_, predicted = torch.max(out.data, 1)
I = np.stack((255*cm,255*np.squeeze(predicted.cpu().numpy()),255*cm),2)
io.imsave(f'{net_name}-{name}.png',I)
t_start = time.time()
# save_test_results(train_dataset)
save_test_results(test_dataset)
t_end = time.time()
print('Elapsed time: {}'.format(t_end - t_start))
### This cell returns various metrics that relate to the performance of the network.
### It does this by testing the trained network on the test set (called by test) and then
### computing the various metrics e.g. accuracy, precision, recall.
L = 1024
def kappa(tp, tn, fp, fn):
N = tp + tn + fp + fn
p0 = (tp + tn) / N
pe = ((tp+fp)*(tp+fn) + (tn+fp)*(tn+fn)) / (N * N)
return (p0 - pe) / (1 - pe)
def test(dset):
net.eval()
tot_loss = 0
tot_count = 0
tot_accurate = 0
n = 2
class_correct = list(0. for i in range(n))
class_total = list(0. for i in range(n))
class_accuracy = list(0. for i in range(n))
tp = 0
tn = 0
fp = 0
fn = 0
for img_index in tqdm(dset.names):
I1_full, I2_full, cm_full = dset.get_img(img_index)
s = cm_full.shape
for ii in range(ceil(s[0]/L)):
for jj in range(ceil(s[1]/L)):
xmin = L*ii
xmax = min(L*(ii+1),s[1])
ymin = L*jj
ymax = min(L*(jj+1),s[1])
I1 = I1_full[:, xmin:xmax, ymin:ymax]
I2 = I2_full[:, xmin:xmax, ymin:ymax]
cm = cm_full[xmin:xmax, ymin:ymax]
I1 = Variable(torch.unsqueeze(I1, 0).float()).cuda()
I2 = Variable(torch.unsqueeze(I2, 0).float()).cuda()
cm = Variable(torch.unsqueeze(torch.from_numpy(1.0*cm),0).float()).cuda()
output = net(I1, I2)
loss = criterion(output, cm.long())
tot_loss += loss.data * np.prod(cm.size())
tot_count += np.prod(cm.size())
_, predicted = torch.max(output.data, 1)
c = (predicted.int() == cm.data.int())
for i in range(c.size(1)):
for j in range(c.size(2)):
l = int(cm.data[0, i, j])
class_correct[l] += c[0, i, j]
class_total[l] += 1
pr = (predicted.int() > 0).cpu().numpy()
gt = (cm.data.int() > 0).cpu().numpy()
tp += np.logical_and(pr, gt).sum()
tn += np.logical_and(np.logical_not(pr), np.logical_not(gt)).sum()
fp += np.logical_and(pr, np.logical_not(gt)).sum()
fn += np.logical_and(np.logical_not(pr), gt).sum()
net_loss = tot_loss/tot_count
net_loss = float(net_loss.cpu().numpy())
net_accuracy = 100 * (tp + tn)/tot_count
for i in range(n):
class_accuracy[i] = 100 * class_correct[i] / max(class_total[i],0.00001)
class_accuracy[i] = float(class_accuracy[i].cpu().numpy())
prec = tp / (tp + fp)
rec = tp / (tp + fn)
dice = 2 * prec * rec / (prec + rec)
prec_nc = tn / (tn + fn)
rec_nc = tn / (tn + fp)
pr_rec = [prec, rec, dice, prec_nc, rec_nc]
k = kappa(tp, tn, fp, fn)
return {'net_loss': net_loss,
'net_accuracy': net_accuracy,
'class_accuracy': class_accuracy,
'precision': prec,
'recall': rec,
'dice': dice,
'kappa': k}
results = test(test_dataset)
pprint(results)
```
| github_jupyter |
# Poetry Generation
Generate your own AI-Poetry based on the work of female, non-binary and trans artistist using RNNs.
We are using part of the [Poetry Foundation dataset from Kaggle](https://www.kaggle.com/johnhallman/complete-poetryfoundationorg-dataset) which was tagged in terms of genders and filtered out the male poet works.
The Neural Network creates a new poem based on patterns it recognizes in the training poetry. The user can also choose the length of the poem and its starting word from a list of keywords.
## Get the Data
The data is already provided for you in `./training_data_per_tag/FILE.txt`. The folder contains a list of `.TXT files` which are named according to the categories of poems in the dataset
>* As a first step, we'll load in this data and look at some samples.
* Then, you'll be tasked with defining and training an RNN to generate a new script!
```
# load in data
import helper
data_dir = '../data/training_data_per_tag/Race & Ethnicity.txt'
#Arts & Sciences.txt LGBTQ.txtLGBTQ
#Cities & Urban Life.txt Love.txt
#Death.txt Parenthood.txt
#Desire.txt Pets.txt
#Fairy-tales & Legends.txt Race & Ethnicity.txt
#Friends & Enemies.txt Realistic & Complicated.txt
#Gender & Sexuality.txt Sorrow & Grieving.txt
#Humor & Satire.txt The Mind.txt
#Landscapes & Pastorals.txt
text = helper.load_data(data_dir)
```
## Explore the Data
Play around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\n`.
```
view_sentence_range = (30, 50)
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n~~~\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
```
---
## Implement Pre-processing Functions
The first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below:
- Lookup Table
- Tokenize Punctuation
### Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call `vocab_to_int`
- Dictionary to go from the id to word, we'll call `int_to_vocab`
Return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)`
```
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# Check text
print(type(text))
print(text[:5])
# create dictionaries and fill them
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {i: word for i, word in enumerate(sorted_vocab)}
vocab_to_int = {word: i for i, word in int_to_vocab.items()}
print(int_to_vocab)
print(vocab_to_int)
print(len(int_to_vocab))
# Return values
return vocab_to_int, int_to_vocab
tests.test_create_lookup_tables(create_lookup_tables)
```
### Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids.
Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( **.** )
- Comma ( **,** )
- Quotation Mark ( **"** )
- Semicolon ( **;** )
- Exclamation mark ( **!** )
- Question mark ( **?** )
- Left Parentheses ( **(** )
- Right Parentheses ( **)** )
- Dash ( **-** )
- Return ( **\n** )
This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value "dash", try using something like "||dash||".
```
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenized dictionary where the key is the punctuation and the value is the token
"""
tokens = dict()
tokens['.'] = '||period||'
tokens[','] = '||comma||'
tokens['"'] = '||quotation_mark||'
tokens[';'] = '||semicolon||'
tokens['!'] = '||exclam_mark||'
tokens['?'] = '||question_mark||'
tokens['('] = '||left_par||'
tokens[')'] = '||right_par||'
tokens['-'] = '||dash||'
tokens['\n'] = '||return||'
return tokens
tests.test_tokenize(token_lookup)
```
## Pre-process all the data and save it
Running the code cell below will pre-process all the data and save it to file. You're encouraged to lok at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code.
```
# pre-process training data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
```
# Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
```
import helper
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
```
## Build the Neural Network
In this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions.
### Check Access to GPU
```
import torch
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
```
## Input
Let's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.html#torch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions.
You can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual.
```
data = TensorDataset(feature_tensors, target_tensors)
data_loader = torch.utils.data.DataLoader(data,
batch_size=batch_size)
```
### Batching
Implement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes.
>You can batch words using the DataLoader, but it will be up to you to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`.
For example, say we have these as input:
```
words = [1, 2, 3, 4, 5, 6, 7]
sequence_length = 4
```
Your first `feature_tensor` should contain the values:
```
[1, 2, 3, 4]
```
And the corresponding `target_tensor` should just be the next "word"/tokenized word value:
```
5
```
This should continue with the second `feature_tensor`, `target_tensor` being:
```
[2, 3, 4, 5] # features
6 # target
```
```
from torch.utils.data import TensorDataset, DataLoader
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
# TODO: Implement function
# Get the number of batches as an integer
num_batches = len(words) // batch_size
# Trim words so it can be divisable by num_batches
words = words[:num_batches * batch_size]
features = []
target = []
for idx in range(len(words) - sequence_length):
features.append(words[idx : idx + sequence_length])
target.append(words[idx + sequence_length])
data = TensorDataset(torch.from_numpy(np.asarray(features)), torch.from_numpy(np.asarray(target)))
data_loader = torch.utils.data.DataLoader(data, shuffle=False, batch_size=batch_size)
# return a dataloader
return data_loader
```
### Test your dataloader
You'll have to modify this code to test a batching function, but it should look fairly similar.
Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader.
Your code should return something like the following (likely in a different order, if you shuffled your data):
```
torch.Size([10, 5])
tensor([[ 28, 29, 30, 31, 32],
[ 21, 22, 23, 24, 25],
[ 17, 18, 19, 20, 21],
[ 34, 35, 36, 37, 38],
[ 11, 12, 13, 14, 15],
[ 23, 24, 25, 26, 27],
[ 6, 7, 8, 9, 10],
[ 38, 39, 40, 41, 42],
[ 25, 26, 27, 28, 29],
[ 7, 8, 9, 10, 11]])
torch.Size([10])
tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12])
```
### Sizes
Your sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10).
### Values
You should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`.
```
# test dataloader
test_text = range(50)
t_loader = batch_data(test_text, sequence_length=5, batch_size=10)
data_iter = iter(t_loader)
sample_x, sample_y = data_iter.next()
print(sample_x.shape)
print(sample_x)
print()
print(sample_y.shape)
print(sample_y)
```
---
## Build the Neural Network
Implement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class:
- `__init__` - The initialize function.
- `init_hidden` - The initialization function for an LSTM/GRU hidden state
- `forward` - Forward propagation function.
The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state.
**The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word.
### Hints
1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)`
2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so:
```
# reshape into (batch_size, seq_length, output_size)
output = output.view(batch_size, -1, self.output_size)
# get last batch
out = output[:, -1]
```
```
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
"""
Initialize the PyTorch RNN Module
:param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)
:param output_size: The number of output dimensions of the neural network
:param embedding_dim: The size of embeddings, should you choose to use them
:param hidden_dim: The size of the hidden layer outputs
:param dropout: dropout to add in between LSTM/GRU layers
"""
super(RNN, self).__init__()
# set class variables
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# define model layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_size)
def forward(self, nn_input, hidden):
"""
Forward propagation of the neural network
:param nn_input: The input to the neural network
:param hidden: The hidden state
:return: Two Tensors, the output of the neural network and the latest hidden state
"""
batch_size = nn_input.size(0)
# embeddings and lstm_out
embeds = self.embedding(nn_input)
lstm_out, hidden = self.lstm(embeds, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# fully-connected layer
out = self.fc(lstm_out)
# reshape into (batch_size, seq_length, output_size)
out = out.view(batch_size, -1, self.output_size)
# get last batch
output = out[:, -1]
return output, hidden
def init_hidden(self, batch_size):
'''
Initialize the hidden state of an LSTM/GRU
:param batch_size: The batch_size of the hidden state
:return: hidden state of dims (n_layers, batch_size, hidden_dim)
'''
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
tests.test_rnn(RNN, train_on_gpu)
```
### Define forward and backpropagation
Use the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows:
```
loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target)
```
And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`.
**If a GPU is available, you should move your data to that GPU device, here.**
```
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):
"""
Forward and backward propagation on the neural network
:param decoder: The PyTorch Module that holds the neural network
:param decoder_optimizer: The PyTorch optimizer for the neural network
:param criterion: The PyTorch loss function
:param inp: A batch of input to the neural network
:param target: The target output for the batch of input
:return: The loss and the latest hidden state Tensor
"""
# move data to GPU, if available
if(train_on_gpu):
rnn.cuda()
inp = inp.cuda()
target = target.cuda()
# perform backpropagation and optimization
h = tuple([el.data for el in hidden])
rnn.zero_grad()
output, h = rnn(inp, h)
loss = criterion(output, target)
loss.backward()
nn.utils.clip_grad_norm_(rnn.parameters(), 5)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
# Note that these tests aren't completely extensive.
# they are here to act as general checks on the expected outputs of your functions
tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu)
```
## Neural Network Training
With the structure of the network complete and data ready to be fed in the neural network, it's time to train it.
### Train Loop
The training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section.
```
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
```
### Hyperparameters
Set and train the neural network with the following parameters:
- Set `sequence_length` to the length of a sequence.
- Set `batch_size` to the batch size.
- Set `num_epochs` to the number of epochs to train for.
- Set `learning_rate` to the learning rate for an Adam optimizer.
- Set `vocab_size` to the number of uniqe tokens in our vocabulary.
- Set `output_size` to the desired size of the output.
- Set `embedding_dim` to the embedding dimension; smaller than the vocab_size.
- Set `hidden_dim` to the hidden dimension of your RNN.
- Set `n_layers` to the number of layers/cells in your RNN.
- Set `show_every_n_batches` to the number of batches at which the neural network should print progress.
If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class.
```
# Data params
# Sequence Length
sequence_length = 8 # of words in a sequence
# Batch Size
batch_size = 128
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 10
# Learning Rate
learning_rate = 0.001
# Model parameters
# Vocab size
vocab_size = len(vocab_to_int)
# Output size
output_size = vocab_size
# Embedding Dimension
embedding_dim = 400
# Hidden Dimension
hidden_dim = 256
# Number of RNN Layers
n_layers = 2
# Show stats for every n number of batches
show_every_n_batches = 3000
```
### Train
In the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train.
> **You should aim for a loss less than 3.5.**
You should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn.
```
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./save/trained_rnn', trained_rnn)
print('Model Trained and Saved')
```
---
# Checkpoint
After running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name!
```
import torch
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('./save/trained_rnn')
```
## Generate TV Script
With the network trained and saved, you'll use it to generate a new, "fake" Seinfeld TV script in this section.
### Generate Text
To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores!
```
import torch.nn.functional as F
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
"""
Generate text using the neural network
:param decoder: The PyTorch Module that holds the trained neural network
:param prime_id: The word id to start the first prediction
:param int_to_vocab: Dict of word id keys to word values
:param token_dict: Dict of puncuation tokens keys to puncuation values
:param pad_value: The value used to pad a sequence
:param predict_len: The length of text to generate
:return: The generated text
"""
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
```
### Generate a New Script
It's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction:
-' '
You can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!)
```
# run the cell multiple times to get different results!
gen_length = 120 # modify the length to your preference
prime_word = 'body' # name for starting the script
# soul dream first water sky body beauty
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn, vocab_to_int[prime_word], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)
print(generated_script)
```
#### Save your favorite scripts
Once you have a script that you like (or find interesting), save it to a text file!
```
# save script to a text file
f = open("race_soul.txt","w")
f.write(generated_script)
f.close()
```
### Explore Network and corner cases
### Based on
https://github.com/HectorBudielE/udacity-tv-script-generation/
https://github.com/aosama16/Udacity-Deep-Learning-Nanodegree/tree/master/Project-3/TV-Script-Generation
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Klasifikasi Dasar: Klasifikasi gambar pakaian
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Lihat di TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/id/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Jalankan di Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/id/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Lihat source di GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/id/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Unduh notebook</a>
</td>
</table>
Panduan ini bertujuan untuk membangun sebuah model neural network yang dapat mengklasifikasikan gambar pakaian, seperti sneaker dan baju. Tidak masalah apabila saat ini Anda belum memahami seluruh detail yang ada; Ini merupakan ringkasan singkat dari sebuah program Tensorflow lengkap dengan penjelasan detail dari setiap langkah-langkahnya.
Panduan ini menggunakan [tf.keras](https://www.tensorflow.org/guide/keras), sebuah API tingkat tinggi untuk membangun dan melakukan training model di TensorFlow.
```
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## Import dataset Fashion MNIST
Panduan ini menggunakan dataset [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) yang memiliki 70,000 gambar hitam putih dalam 10 kategori pakaian. Tiap gambar menunjukan gambar dari satu tipe pakaian dalam resolusi rendah (28 x 28 pixel), seperti yang dapat dilihat disini:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Gambar 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Sampel gambar dari dataset Fashion-MNIST</a> (by Zalando, MIT License).<br/>
</td></tr>
</table>
Dataset Fashion MNIST dibuat untuk menggantikan dataset [MNIST](http://yann.lecun.com/exdb/mnist/) klasik - yang seringkali dijadikan sebagai "Hello, World" dari program machine learning untuk computer vision. Dataset MNIST terdiri atas gambar angka tulisan tangan (0, 1, 2, dsb) dalam format yang identik dengan gambar pakaian yang akan digunakan dalam dataset Fashion MNIST.
Panduan ini menggunakan Fashion MNIST agar lebih bervariasi, data ini juga sedikit lebih menantang dibandingkan dengan MNIST biasa. Kedua dataset berukuran kecil dan biasa digunakan untuk melakukan verifikasi apakah sebuah algoritma bekerja sesuai dengan yang diinginkan. Kedua data ini merupakan titik awal yang bagus untuk melakukan test dan debug terhadap kode.
60,000 gambar digunakan sebagai data train dari model neural network dan 10,000 gambar digunakan untuk mengevaluasi seberapa akurat model dapat mengklasifikasikan gambar. Anda dapat mengakses dataset Fashion MNIST langsung dari TensorFlow. Import dan muat data dari Fashion MNIST langsung dari TensorFlow:
```
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
Dataset yang telah di muat akan mengembalikan empat nilai NumPy arrays:
* Array `train_images` dan `train_labels` merupakan data yang digunakan oleh model untuk mempelajari *pattern*
* Model diuji menggunakan *test set*, yaitu array `test_images`, dan `test_labels`.
Tiap gambar merupakan array berukuran 28x28, dengan nilai pixel yang berkisar antara 0 sampai dengan 255. Label dari data merupakan array bertipe bilangan integer, yang memiliki rentang nilai dari 0 sampai dengan 9. Nilai ini memiliki korespondensi dengan kelas pakaian sebagai berikut:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Setiap gambar memiliki satu label saja. Nama kelas tidak tersedia dalam dataset, simpan nama kelas tersebut disini. Hal ini akan berguna ketika kita akan melakukan plotting dari gambar tersebut:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Eksplorasi Data
Mari kita lakukan eksplorasi dari format dataset sebelum kita menggunakan data tersebut untuk membangun model. Kode berikut menunjukan bahwa terdapat 60,000 gambar di dalam training set, setiap gambar berukuran 28 x 28 pixel.
```
train_images.shape
```
Selain terdapat 60,000 gambar, terdapat juga 60,000 label di dalam training set:
```
len(train_labels)
```
Setiap label adalah bilangan integer antara 0 dan 9:
```
train_labels
```
Terdapat 10,000 gambar di dalam test set. Gambar ini juga direpresentasikan sebagai 28 x 28 pixel:
```
test_images.shape
```
Dan test set mengandung 10,000 label gambar:
```
len(test_labels)
```
## Melakukan preprocessing terhadap data
Data harus diolah terlebih dahulu sebelum digunakan untuk membangun model neural network. Jika Anda mengamati gambar pertama dalam training set, Anda akan melihat bahwa nilai pixel dari gambar memiliki rentang antara 0 sampai dengan 255:
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
```
Nilai pixel ini akan diubah menjadi rentang 0 sampai dengan 1 sebelum menggunakannya sebagai input dari model neural network. Untuk melakukan hal tersebut, nilai tersebut harus dibagi dengan 255. Perlu diperhatika bahwa training set dan testing set harus diolah dengan cara yang sama:
```
train_images = train_images / 255.0
test_images = test_images / 255.0
```
Untuk memastikan bahwa data dalam format yang benar dan Anda siap untuk membangun dan melatih model neural network, mari kita tampilkan 25 gambar pertama dari training set dan menampilkan nama kelas di bawah dari tiap gambar.
```
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
```
## Membangun model
Untuk dapat membuat sebuah model neural network, perlu dilakukan konfigurasi layer untuk model tersebut, sehingga proses kompilasi pada model dapat dilakukan.
### Pengaturan layer
Komponen utama dari neural network adalah *layer*. Layer mengekstraksi representasi dari data yang masuk ke dalam layer-layer tersebut. Harapannya, representasi tersebut berguna untuk mengatasi permasalahan yang ingin diselesaikan.
Kebanyakan deep learning terdiri atas penggabungan layer-layer sederhana. Kebanyakan layer, seperti `tf.keras.layers.Dense`, memiliki parameter-parameter yang dipelajari ketika proses training dijalankan
```
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
```
Layer pertama dalam model neural network ini, `tf.keras.layers.Flatten`, melakukan transformasi dari format gambar yang mulanya aray dua dimensi (28 x 28 pixel) menjadi aray satu dimensi (28 * 28 = 784 pixel). Bayangkan layer ini sebagai layar yang membuat pixel-pixel dalam gambar menjadi satu baris. Layer ini tidak memiliki parameter untuk dipelajari; layer ini hanya mengubah format dari data.
Setelah pixel diubah menjadi satu baris, model yang akan dibuat terdiri dari dua buah layer `tf.keras.layers.Dense`. Kedua layer ini terhubung secara penuh. Layer `Dense` yang pertama memiliki 128 node (atau neuron). Layer yang kedua (dan terakhir) memiliki 10 node *softmax* yang mengembalikan sebuah array dari 10 nilai probabilitas yang apabila dijumlahkan hasilnya adalah 1. Setiap node memiliki score yang mengindikasikan probabilitas bahwa gambar yang sedang diproses merupakan 1 dari 10 kelas label.
### Proses kompilasi model
Sebelum model siap untuk di training, model ini memerlukan beberapa pengaturan tambahan. Pengaturan ini ditambahkan ketika proses kompilasi:
* *loss function* —Fungsi ini menghitung seberapa akurat model ketika proses training dilakukan. Anda ingin meminimalisir nilai kerugian dari fungsi ini untuk "mengarahkan" model ke arah yang benar.
* *Optimizer* —Optimizer mendefinisikan cara model untuk memperbarui modelnya berdasarkan data yang dilihat dan berdasarkan loss function-nya.
* *Metrics* —Bagian ini digunakan untuk memantau langkah-langkah dalam proses training dan testing. Dalam contoh ini, digunakan *akurasi*, perbandingan gambar yang diklasifikasikan dengan tepat oleh model.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Proses training dari model
Melakukan training terhadap model neural network memerlukan beberapa langkah sebagai berikut:
1. Gunakan training data sebagai inputan untuk model. Dalam kasus ini, training data terdapat dalam array `train_images` dan `train_labels`.
2. Model akan mempelajari untuk mengasosiaskan antara gambar dan label.
3. Anda menyuruh model untuk membuat prediksi terhadap test set, dalam kasus ini, array `test_images`. Pastikan bahwa prediksi cocok dengan label yang terdapat di array `test_labels`.
Untuk memulai proses training, panggil method `model.fit`—dinamakan method fit karena method ini "membuat" model berdasarkan data training:
```
model.fit(train_images, train_labels, epochs=10)
```
Ketika proses training model sedang dilakukan, *loss metrics* dan *accuracy metrics* ditampilkan. Model ini mencapai akurasi sekitar 0.88 (atau 88%) terhadap data training.
## Evaluasi nilai dari akurasi
Selanjutnya, bandingkan bagaimana performansi dari model terhadap data test:
```
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
```
Hasilnya adalah akurasi dari data test sedikit lebih kecil dibandingkan dengan akurasi model terhadap data training. Perbedaan antara akurasi dari data training dan data test memperlihatkan *overfitting*. Overfitting terjadi ketika performansi dari model machine learning lebih buruk untuk data baru yang belum pernah dilihat sebelumnya dibandingkan dengan data training.
## Membuat prediksi
Dengan model yang telah dilatih menggunakan data training, Anda dapat menggunakan model tersebut untuk memprediksi berbagai gambar.
```
predictions = model.predict(test_images)
```
Disini, model kita telah memprediksi label dari tiap gambar yang terdapat di data test. Mari lihat prediksi pertama dari model:
```
predictions[0]
```
Prediksi dari gambar pertama dalam data test adalah array dengan 10 angka. Prediksi ini memperlihatkan "*confidence*" dari model terhadap 10 jenis pakaian. Kita dapat melihat label mana yang memiliki nilai *confidence* yang tinggi:
```
np.argmax(predictions[0])
```
Jadi, model memprediksi bahwa gambar tersebut adalah ankle boot, atau `class_names[9]`. Dengan meninjau label test, dapat dilihat bahwa klasifikasi ini benar:
```
test_labels[0]
```
Gambar berikut digunakan untuk melihat seluruh set dari prediksi 10 kelas.
```
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
```
Mari lihat gambar ke-0, prediksi, dan array prediksi. Label prediksi yang tepat berwarna biru dan label prediksi yang salah akan berwarna merah. Bilangan yang ada memberikan presentase (dari 100) untuk label yang diprediksi.
```
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
```
Mari buat plot beberapa gambar dengan prediksinya. Perhatikan bahwa model dapat salah meskipun model sangat percaya diri dengan prediksinya.
```
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
```
Akhirnya, gunakan model yang telah dibuat untuk membuat prediksi terhadap sebuah gambar.
```
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
```
model-model `tf.keras` dioptimalisasi untuk membuat prediksi dalam sebuah *batch*, atau koleksi, dari contoh-contoh sekaligus. Sehingga, meskipun Anda menggunakan satu gambar, Anda harus menambahkan gambar tersebut ke dalam list:
```
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
```
Sekarang prediksi label yang tepat untuk gambar ini:
```
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
```
`model.predict` mengembalikan list dari list—satu list untuk setiap gambar di dalam batch dari data. Lakukan prediksi untuk gambar dalam batch ini:
```
np.argmax(predictions_single[0])
```
Dan model memprediksi label sesuai yang diharapkan.
| github_jupyter |
# `Probability Distributions`
```
%matplotlib inline
# for inline plots in jupyter
import matplotlib.pyplot as plt# import matplotlib
import seaborn as sns
import warnings
warnings.simplefilter("ignore")
from ipywidgets import interact
styles = ['seaborn-notebook', 'seaborn', 'seaborn-darkgrid', 'classic',
'_classic_test', 'seaborn-poster', 'tableau-colorblind10', 'grayscale',
'fivethirtyeight', 'seaborn-ticks', 'seaborn-dark',
'dark_background', 'seaborn-pastel',
'fast', 'Solarize_Light2', 'seaborn-colorblind', 'seaborn-white',
'seaborn-dark-palette',
'bmh', 'seaborn-talk', 'seaborn-paper', 'seaborn-deep', 'seaborn-bright',
'seaborn-muted',
'seaborn-whitegrid', 'ggplot']
```
## `5. Poisson Distribution`
1. The Poisson distribution is used to model the number of events occurring within a given time interval.

```
# import poisson module from scipy.stats to generate poisson random variables.
from scipy.stats import poisson
# Poisson random variable is typically used to model the number of times an event
# happened in a time interval. For example, number of users visited your website in an interval
# can be thought of a Poisson process. Poisson distribution is described in terms of the rate (mu) at which
# the events happen. We can generate Poisson random variables in Python using poisson.rvs.
poisson = poisson.rvs(mu=3, size=10000)
# Let us generate 10000 random numbers from Poisson random variable with mu = 0.3 and plot them.
ax = sns.distplot(poisson,
kde=False,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Poisson', ylabel='Frequency')
def PoissonDistribution(palette="dark",kde = False,style = "ggplot"):
plt.figure(figsize=(13,10))
plt.style.use(style)
sns.set_palette(palette)
ax = sns.distplot(poisson,
kde=kde,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Poisson', ylabel='Frequency')
plt.show()
interact(PoissonDistribution,palette = ["deep", "muted", "pastel", "bright",
"dark", "colorblind","Set3","Set2"],kde = [True,False],style = styles);
```
## `6. Beta Distribution`
1. We can understand Beta distribution as a distribution for probabilities. Beta distribution is a continuous distribution taking values from 0 to 1. It is defined by two parameters alpha and beta, depending on the values of alpha and beta they can assume very different distributions.
```
# Let us generate 10000, random numbers from Beta distribution
# with alpha = 1 and beta = 1. The histogram of Beta(1,1) is a uniform distribution.
from scipy.stats import beta
beta = beta.rvs(1, 1, size=10000)
ax = sns.distplot(beta,
kde=False,
bins=100,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(1,1)', ylabel='Frequency')
# Let us generate 10000, random numbers from Beta distribution with alpha = 10 and beta = 1.
# The histogram of Beta(10,1) is skewed towards right.
from scipy.stats import beta
beta_right = beta.rvs(10, 1, size=10000)
sns.distplot(beta_right,
kde=False,
bins=50,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(10,1)', ylabel='Frequency')
# Let us generate 10000, random numbers from Beta distribution with alpha = 1 and beta = 10.
# The histogram of Beta(1,10) is skewed towards left.
beta_left = beta.rvs(1, 10, size=10000)
ax = sns.distplot(beta_left,
kde=False,
bins=100,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(1,10)', ylabel='Frequency')
# Let us generate 10000, random numbers from Beta distribution with alpha = 10 and beta = 10.
# The histogram of Beta(10,10) is symmetric and looks like a normal distribution
beta_symmetric = beta.rvs(10, 10, size=10000)
ax = sns.distplot(beta_symmetric,
kde=False,
bins=100,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(10,10)', ylabel='Frequency')
def BetaDistribution(palette="dark",kde = False,style = "ggplot",kind = "left"):
plt.figure(figsize=(13,10))
plt.style.use(style)
sns.set_palette(palette)
if kind == "left":
ax = sns.distplot(beta_left,
kde=kde,
bins=100,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(1,10)', ylabel='Frequency')
elif kind == "right":
ax = sns.distplot(beta_right,
kde=kde,
bins=50,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(10,1)', ylabel='Frequency')
elif kind == "symmetric":
ax = sns.distplot(beta_symmetric,
kde=kde,
bins=100,
hist_kws={"linewidth": 15,'alpha':1})
ax.set(xlabel='Beta(10,10)', ylabel='Frequency')
plt.show()
interact(BetaDistribution,palette = ["deep", "muted", "pastel", "bright",
"dark", "colorblind","Set3","Set2"],kde = [True,False],style = styles,
kind = ["left","right","symmetric"]);
```
| github_jupyter |
---
### *The 11th Computational Neuroscience Winter School*
# Tutorial II: Neuronal Dynamics - Point Neuron Model
---
__Date:__ Jan. 21, 2022
__Content Creator:__ Songting Li, Kai Chen, Ziling Wang
## Using tutorial notebook
Please download the latest version of [Jupyter Notebook](https://jupyter.org) if you want to try run all tutorials on your own laptop.
**Gray cells are executable code cells. To execute, click on a cell and press `Shift + Enter`.**
**To modify text in a cell, click on the cell and type.**
Note that most function stubs are suggestions. Feel free to code up whatever solves the task.
If something behaves weird, it always is worth a try to restart the kernel.
---
# Tutorial Objectives
In this notebook, we will learn how to use python to solve differential equations that describe the dynamics of neurons. We will cover single-compartment neurons and simplest spatial neuron models (ball and stick) in next notebook.
In the course of this notebook, we will learn how to simulate the single point neuron model, including LIF and reduced HH neuron (Fitzhugh-Nagumo model):
1. Initialize model and variable state;
1. Record output, such as membrane potentials and spikes
1. Drive the neurons by external input
In doing so we will practice
- To make use of modular programming
- To write testing functions to check and optimize code
## Setup
```
# @title Import most modules and functions needed
import time
import numpy as np
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['legend.fontsize'] = 14
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 18
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
# General coding remarks
In your very own interest (but also the interest of everyone who ever has to work with your code) it really helps, if you always:
- structure your code clearly, such that you can quickly grasp where loops begin and end, etc.
- choose sensible names for your functions, e.g. call a routine that stores spikes `store_spikes`, one that plots membrane potentials `plot_pots`, or something similar.
- choose sensible names for your variables, e.g. call a variable that encodes the membrane potential `V` or `Vm`, a space variable `x`, a time variable `t`, and so on. You get the gist :-)
- add comments about what individual lines/sections of code do, for (stupid) example:
```python
>>> v=v+c # this adds the constant c to v
```
- add doc strings to your functions: that is the text in between ''' in the beginning of each function definition (see examples below). Doc strings should describe what the function does and what the input variables and parameters are.
```python
>>> def func():
>>> '''
>>> This is a function. It does things.
>>> '''
>>>
>>> # You can print this info by typing 'print(func.__doc__)'
>>> print(func.__doc__)
This is a function. It does things.
```
Also: **SAVE REGULARLY!!!** You don't want to lose the most brilliant few lines of code you ever produced, nor more regular tedious work to [insert random thing that WILL happen and kill your running program]
Though `Jupyter Notebook` helps by regularly creating checkpoints, you'd better mannully save your modifications.
### Modular Programming
It is extremely helpful to structure your code into seperate independent functions or modules.
For example, instead of having one huge simulation function which initializes all dynamical variables, updates the dynamical states, checks for state transitions (e.g. threshold crossings), and writes output to file, you should split it into basic functions that do one thing, and one thing only [UNIX philosophy:)].
This has several advantages:
- it increases readability of complex code;
- each function can be more easily tested and maintained;
- each function can be easily changed or replaced without affecting the behavior of other functions;
- each function can be easily reused, in the best case scenario saving you work at a later point in time;
## Numpy Broadcasting
If you want to multiply each element of `a` by `2.0`, you may implement a loop operation as follows.
```python
>>> import numpy as np
>>> a = np.array([1.0,2.0,3.0])
>>> for i in range(len(a)):
>>> a[i] = a[i] * 2.0
>>> print(a)
[2., 4., 6.]
```
Numpy's broadcasting rule allows you to implement element-wise operations by directly interact numpy.array with scalars, shown as the following examples.
```python
>>> import numpy as np
>>> a = np.array([1.0,2.0,3.0])
>>> a = a * 2.0
>>> print(a)
[2., 4., 6.]
```

As the sketch and figure above, broadcasting provides a means of vectorizing array operatings so that looping occurs in C instead of Python. We may use `time.time()` to verify it.
```
import numpy as np
import time
# Python Looping
a = np.arange(1000000, dtype=float)
t0 = time.time()
for i in range(len(a)):
a[i] *= 2.0
print(f'Python looping takes {time.time()-t0:.3f}s')
print('Last 5 elements in a: ', a[-5:])
# Numpy broadcasting
a = np.arange(1000000, dtype=float)
t0 = time.time()
a = a * 2.0
print(f'Python looping takes {time.time()-t0:.3f}s')
print('Last 5 elements in a: ', a[-5:])
```
**More broadcasting, less loops!**
### Testing functions
It is critical to test each function before putting all of them together. Writing a proper test of a function will give you confidence that a function is working properly. The test should be simple enough, yet still contain typical situation(s) to be met when all functions are put together.
E.g. if you have a function that detects a threshold crossing and resets the dynamical state as a consequence, you want to make sure that it correctly identifies the crossing and correctly resets to the desired value. To do so, you could give an increasing time series that contains the threshold value as an input and measure the output for expected behavior.
# Point neuron models
## Fitzhugh-Nagumo model
Most of you will in one way or another have encountered the famous Hodgkin-Huxley (HH) model for the dynamics of neuronal membranes. It is a powerful biophysical model to explain many aspects of membrane excitability, such as action potential generation and the role of ion channels, in particular sodium and potassium, in it.
The Fitzhugh-Nagumo model is a model that reduces the complexity of the HH model to two dimensions, while still capturing the key aspects of the dynamics. Most of all, the reduction makes it amenable to dynamical system analysis techniques.
The membrane potential equation for the Fitzhugh-Nagumo model is given by
\begin{eqnarray}
\frac{d}{dt} v(t) &=& v - \frac{v^3}{3} - w + I\quad (1.1)\\
\frac{d}{dt}w(t) &=& \epsilon\,(b_0+b_1 v - w)\quad (1.2)
%\tau \frac{d}{dt}w(t) &=& v + a -bw
\end{eqnarray}
$I$ is a driving current, $v(t)$ is a fast variable (membrane potential), $w(t)$ is a slow recovery variable.
## Step 1: Initialization and Output monitor functions
### Example: membrane potential recorder
To write membrane potential recording and plotting functions we want three different functions:
- `init_rec_pot()`: should initialize the object which you will store potentials in
- `store_pot()`: should record all potential values at a given time point
- `plot_pot()`: should plot the membrane potential as a function of time
We can print the time taken by the whole session by using `time.time()`. This is helpful for optimizing code.
```
# if you want to initialize all traces to already have the full length=number of sample points=n_timesteps
def init_rec_pot(n_neuron,n_timesteps):
'''
Initializes dictionary of potential trace arrays.
Args:
n_neuron : number of neurons for which you want to store the potential
n_timesteps : number of time steps
Returns:
rec_pot : dictionary object rec_pot
'''
rec_pot = np.zeros((n_neuron, n_timesteps)) # initialize a 2-D array with all entries=0
# i-th row represents the trace of each neuron i
# j-th column represents the potential of all neurons
# at j-th time point;
return rec_pot
def store_pot(rec_pot,V,it):
'''
Stores potentials in dictionary rec_pot
Args:
rec_pot: dictionary of potential traces
V : vector of current potential values
it : current time step
Returns:
does not return anything, this functions just updates the storing object
'''
rec_pot[:, it] = V # assign potential value V of all neurons at time it
# to corresponding entries of rec_pot
def plot_pot(rec_pot, time=None):
'''
Plots potentials stored in dictionary rec_pot
Args:
rec_pot: dictionary of potential traces
time : time ticks of x-axis
Returns:
does not return anything, this functions just plots the contents of rec_pot
'''
cnt = 0 # counter, only needed for plotting in different colors;
# for keys==range(n_neuron) can use i directly
for trace in rec_pot: # iterate through all rows of the rec_pot array
# plot gray scale solid lines (-) with linewidth (lw) 2
if time is None:
plt.plot(trace,'-',color='%s'%((1.+cnt)/(len(rec_pot)+1)),lw=2, label=str(cnt+1))
else:
plt.plot(time, trace,'-',color='%s'%((1.+cnt)/(len(rec_pot)+1)),lw=2, label=str(cnt+1))
cnt+=1
```
We will now use some mock potential data to see if the three functions init_rec_pot(), store_pot() and plot_pot() do what they should.
```
T, dt = 10, 0.1 # total time T [ms], time increment dt [ms]
n_neuron = 3 # number of neurons
ts = np.arange(0,T,dt) # time vector
rec_pot = init_rec_pot(n_neuron,len(ts)) # create rec_pot object
starttime = time.time() # you can clock how long your code needs, useful during optimization
#print rec_pot # how does it look?
### run "pseudo-dynamics"
# first, create an update function for the potential dynamics (here, simply creating Gaussian random numbers)
def update(n_neuron):
return np.random.randn(n_neuron)
for it in range(len(ts)):
vs = update(n_neuron) # for each time step update potential values
store_pot(rec_pot,vs,it) # hand to store_pot function
#print rec_pot # how does it look?
# plot potential traces: should give n_neuron random traces in varying gray tones
plot_pot(rec_pot)
endtime = time.time()
print(f'Running time is {endtime-starttime:0.5f} s')
```
## Step 2: Numerical ODE Solver
---
### Euler integration
Consider a differential equation of the type
$$
\tau \frac{d}{dt} x(t) = -x(t) +I\,,\quad(i)
$$
where $I$ is some constant input that drives the dynamics of the variable $x(t)$, and $\tau$ is some time constant that tunes how fast $x$ reacts to the input $I$.
For this type of equation it is straightforward to write down the solution, i.e.,
$$
x(t) = I\,(1-e^{-t/\tau}) + x_0\,e^{-t/\tau}\,,\quad (ii)
$$
where $x_0=x(t_0)$ is the initial condition. Indeed, if we compute the temporal derivative of $(ii)$ we get
\begin{eqnarray}
\frac{d}{dt} x(t) \quad &=& \quad \frac{d}{dt} \big(I\,(1-e^{-t/\tau}) + x_0\,e^{-t/\tau}\big) \quad =\quad \frac{1}{\tau} \left(I\,e^{-t/\tau} -x_0 \,e^{-t/\tau}\right) \\
\Leftrightarrow\quad \tau \frac{d}{dt} x(t) \quad &=& \quad I\,e^{-t/\tau} - x_0 \,e^{-t/\tau} + (I-I) \,\, \,\quad =\quad -x(t) +I\,.
\end{eqnarray}
Not every differential equation is so easy to solve, so it is useful to know how to numerically compute such problems.
The most straight-forward (though not necessarily always best or optimal) way to do that is by *Euler integration*.
### Discretization
We start from $(i)$ by substituting
$$dt \rightarrow \Delta t= t_{i+1}-t_i, \quad dx \rightarrow \Delta x = x(t_{i+1})-x(t_i)=: x_{i+1}-x_i$$
The time increment $\Delta t$ is usually a fixed number that should be considerably smaller than the smallest other time constant in the system in order to obtain a good approximation (here $\tau$). You also don't want it to be unnecessarily small, because simulation time will increase the smaller $\Delta t$. If you are unsure, whether you chose a good $\Delta t$, try varying it and observe how much the result varies.
### Initialize
Whenever we want to solve differential equations, we have to define an initial condition. So we will have to have an initialization step for $x$:
$$x(0)=x_0$$
### Update
The discretized version of $(i)$ is given by
\begin{aligned}
\tau\, \frac{\Delta x}{\Delta t} \quad &= \quad -x_i + I\\
\Leftrightarrow\quad \Delta x \quad = \quad x_{i+1} - x_i \quad &= \quad \Delta t \left[ \frac{1}{\tau}\,\big( -x_i +I \big)\right]\\
\Leftrightarrow\quad\quad \quad\quad\quad\quad\quad x_{i+1}\quad &= \quad x_i + \Delta t \, \left[ \frac{1}{\tau}\,\big( -x_i +I \big)\right]
\end{aligned}
The Euler integration update step always has the same form, i.e.,
$$
x_{i+1} = x_i + \Delta t f(x) \quad (1)
$$
When we write code, we want to save as much work as possible, not only by letting the computer do all the tedious calculations, but also by reducing the amount of typing work in the future. So we should design our code such that it is flexible and reusable. These are benefits that *modularization* gives us.
We can write a function called euler_step($x$,f,$\Delta t$) that just does Eqn.(1), regardless of what f does.
Then, for what is specific to the dynamics at hand, we will have a function called, here e.g., f = f_x($x,
I,\tau$) that computes $\left[ \frac{1}{\tau}\,\big( -x_i +I \big)\right]$.
This way, whenever we want to solve a dynamics with Euler integration again, we can just use euler_step() again and simply exchange $f$.
So let's do that in python code!
```
## function for Euler integration step
def euler_step(x,f,dt):
'''
Executes Euler integration step
Expects:
x : state variable
f : increment of x
dt : increment of t
Returns:
updated state x
'''
return x + dt*f
## specific functional increment
def f_x(x,I,tau):
'''
Computes increment dx/dt of state variable for Eqn.(i)
Expects:
I : constant drive
tau : time constant
Returns:
state variable increment
'''
return (I-x)/tau
```
We have all necessary ingredients to run the full dynamics Eqn.(i), we only have to put it together into one function.
```
def run_dynamics(x0,I,tau,T,dt):
'''
Runs the low-pass filter dynamics Eqn.(i)
Expects:
x0 : initial condition
I : drive
tau : time constant
T : total run time
dt : time increment
Returns:
rec_x : dicitionary of x trajectory
'''
rec_x = init_rec_pot(1,int(T/dt)) # initialize x-recorder
x = x0 # initialize x
for it in range(int(T/dt)):
f = f_x(x,I,tau) # increment of x
x = euler_step(x,f,dt) # update of x
store_pot(rec_x,x,it) # store
return rec_x
```
The code below plots the result of our Euler integration in comparison to the analytical solution. Play with dt to see what effect it has.
```
## define params
x0 = 0.
I = 1.
tau = 1.
T,dt = 20.,0.1
## run dynamics
rx = run_dynamics(x0,I,tau,T,dt)
ts = np.arange(0,T,dt)
## plot the trace
plot_pot(rx)
## to check if solved correctly, add analytical solution Eqn.(ii)
def x_sol(t,x0,I,tau):
return I*(1-np.exp(-t/tau)) + x0*np.exp(-t/tau)
## plot analytical trace
plt.plot(x_sol(ts+dt,x0,I,tau),'r--')
```
### Now that we successfully set up devices to record and plot traces, and learned how to solve differential equations numerically, let's cook up our Fitzhugh Nagumo models.
---
## Exercise: Euler integration for the Fitzhugh-Nagumo model
In order to solve these coupled equations numerically, we again interpret the differential dynamics in discrete time:
$$dt \rightarrow \Delta t= t_{i+1}-t_i, \quad dv \rightarrow \Delta v = v_{i+1}-v_i, \quad dw \rightarrow \Delta w =w_{i+1}- w_i$$
### Initialize
We will need an initialization step for $v,w$:
$$v(0)=v_0, \quad w(0)=w_0$$
### Update
The next step is to update the dynamics from $t_i$ to $t_{i+1}$. We will use Euler integration to do this.
Take good care of the fact that Eqns.(1.1) and (1.2) are coupled. If we want to update one first, the second would already receive updated information, so in order to maintain causality, use some auxiliary names in the update
$$
hv = v_{i+1}\\
hw = w_{i+1}
$$
The discretized dynamics thus becomes:
\begin{eqnarray}
\Delta v = v_{i+1}-v_i &=& \Delta t \left (v_i - \frac{v^3_i}{3} - w_i + I(t_i) \right)\\
\\
\Leftrightarrow v_{i+1}=:hv &=& v_i \,+ \,\Delta t \left (v_i - \frac{v^3_i}{3} - w_i + I(t_i) \right)\quad (2.1)\\
\\ \\
\Delta w = w_{i+1}-w_i &=& \Delta t\, \epsilon \, \big( b_0 + b_1 v_i -w_i\big)\\ \\
\Leftrightarrow w_{i+1}=: hw &=& w_i+\Delta t\, \epsilon \, \big( b_0 + b_1 v_i -w_i\big) \quad \quad (2.2)
\end{eqnarray}
We thus arrive at
\begin{eqnarray}
hv &=& v \,+ \,\Delta t \left (v- \frac{v^3}{3} - w + I(t_i) \right)\quad (3.1)\\
hw &=& w \,+\,\Delta t\, \epsilon \, \big( b_0 + b_1 v -w\big) \quad (3.2)
\end{eqnarray}
Looking at Eqns.(3.1),(3.2), we recognize the identical Euler integration update structure in both cases ($x=\{v,w\}$):
$$
hx = x + \Delta t f(x) \quad (4)
$$
We can thus simply recycle our euler_step function from before and adapt the state variable increments on the RHS in Eqns.(3.1),(3.2): $f=$fitzhugh_nagumo($v,w,p$), where p contains all relevant parameters ($b_0,b_1,\epsilon$).
After the update is complete, we can set the new $v=hv, w=hw$ and reiterate.
### To do:
Write
- init functions
- functions to compute v,w increments
- run-function, where you recycle euler_step for the update (remember to update both with the information of the current time step, see $hv,hw$ above)
Then, store and plot the resulting traces as we have done in the example on top!
Try to come up with good tests to see if all functions have desired behavior!
Play with the individual parameters and see how the dynamics changes.
```
def init_v(v0):
'''
Initializes v
'''
return v0
def init_w(w0):
'''
Initializes w
'''
return w0
def f_vw(v,w,eps,b0,b1,I):
'''
Computes v,w increments for Fitzhugh-Nagumo model
Expects:
v,w : current values of v,w
eps,b0,b1 : coupling parameters
I : drive
Returns:
fv,fw : increments of v,w for Euler integration
'''
# Fill up code below:
fv = ...
fw = ...
return fv,fw
def run_fitzhugh_nagumo(v0,w0,eps,b0,b1,I,T,dt):
'''
Runs Fitzhugh-Nagumo dynamics
Expects:
v0, w0 : initial values
eps,b0,b1 : coupling parameters
I : drive
T : total runtime
dt : time increment
Returns:
rec_v, rec_w : dictionary of v,w traces
'''
rec_v = init_rec_pot(1,int(T/dt))
rec_w = init_rec_pot(1,int(T/dt))
v = init_v(v0)
w = init_v(w0)
for it in range(int(T/dt)):
# Fill up code here
return rec_v,rec_w
epsilon = 0.1
b0 = 0.
b1 = 0.2
I = -0.5
T,dt = 100,0.1
v0,w0 = 0.5,0.5
rv,rw = run_fitzhugh_nagumo(v0,w0,epsilon,b0,b1,I,T,dt)
## params to try:
## I = [-0.5,-0.4,-0.3,0.2,0.5]
## b1 = [0.2,0.5]
## also vary initial conditions
plot_pot(rv, time = np.arange(0, T, dt))
plt.xlabel('Time (ms)')
plt.ylabel(r'$v(t)$')
plot_pot(rw, time = np.arange(0, T, dt))
plt.xlabel('Time (ms)')
plt.ylabel(r'$w(t)$')
```
# >>> Extra: Leaky integrate-and-fire (LIF) neuron <<<
### If you managed to go through all the material so far, you can start working on the LIF model. We will work on this in more detail in the Spiking Network Tutorial
The membrane potential equation for the LIF is given by
\begin{eqnarray}
C_m\frac{dV}{dt} = -g_L(V-V_L) + I
\end{eqnarray}
$I$ is a driving current. $C_m$ is the membrane capacitance of the neuron. We also assume the membrane only has a leak conductance $g_L$, trying to bring the voltage to leak potential $V_L$.
Dividing both sides of the equation by $g_L$ we get,
\begin{align}
\tau_m\frac{dV}{dt} = -(V-V_L) + \frac{I}{g_L}\,,\quad (1)
\end{align}
where $\tau_m=C/g_L$.
If $I$ is sufficiently strong such that $V$ reaches some threshold value $V_{th}$, $V$ will be reset to a reset potential $V_{reset}<V_{th}$, i.e.,
\begin{eqnarray}
\mathrm{if}\quad V(t)\geq V_{th}&:& V(t^+)=V_{reset}
\end{eqnarray}
To numerically solve the LIF membrane potential dynamics, we will again use Euler integration to approximate the continuous dynamics by a piecewise integration on a time-grid of stepsize $\Delta t$.
Note once more, that in order to have a good approximation, $\Delta t$ needs to be small compared to all other time constants in the system (here $\tau_m$). The smaller $\Delta t$, the better the approximation, however, also the total computation time will be longer.
On a grid the dynamics translates to
\begin{aligned}
\tau_m \Delta V = \tau_m (V_{i+1}-V_i) &= \left(-(V_i-V_L) +\frac{I_{syn}}{g_L}\right)dt \\
\Leftrightarrow \qquad\qquad\qquad\quad V_{i+1} &= V_i+ \left(-(V_i-V_L) +\frac{I_{syn}}{g_L}\right)\frac{dt}{\tau_m} \quad (2)
\end{aligned}
### Simulating LIF dynamics
We will now translate the dynamics Eqn.(2) into code. We start by creating a dictionary `pm` that contains all parameters.
Then we will write several small modular functions that perform the individual operations to solve the dynamics.
We will likely need several functions to
- initialize a dictionary that represents membrane potentials
- initialize a dictionary that represents constant input currents **(Amplitude of constant current is generated by Gaussian distribution with mean $\mu$ and variance $\sigma^2$)**
- an update function that updates the membrane potential in each time step corresponding to the Euler integration scheme above
- a threshold handling function that resets the membrane potential in case of a crossing and registers a spike event
- a main function that combines all of the above in a sensible way to solve the LIF dynamics
```
# Some typical neuron parameters, stored in a dictionary
pm={"V_th" : -55., # spike threshold [mV]
"V_reset" : -75., # reset potential [mV]
"tau_m" : 20., # membrane time constant [ms]
"g_L" : 20., # leak conductance [nS]
"V_init" : -65, # initial potential
"V_L" : -65} # leak reversal potential
def init_V(n_neuron,pm):
'''
Intialize the array of membrane potentials
Expects:
n_neuron : number of neurons
pm : parameter dictionary
Returns:
vs : array of membrane potential values (scalars) initialized to V_init
'''
# ADD CODE HERE
# test init_V()
# ADD CODE HERE
def init_I_syn(n_neuron,mu,sig):
'''
Intialize the array of direct input currents, each drawn from Gaussian distribution
of mean mu and standard deviatiton sig
Expects:
n_neuron : number of neurons
mu, sig : constant driving currents with amplitudes drawn from Gaussian distribution
with mean mu and standard deviation sig
Returns:
isyns : array of driving currents
'''
# ADD CODE HERE
# test init_I_syn()
# ADD CODE HERE
def threshold(V,n_neuron,pm):
'''
Handle nonlinear threshold:
if potential >= V_th: reset to V_reset, indicate spike event
Expects:
V : membrane potential [mV]
n_neuron : number of neurons
pm : parameter dictionary
Returns:
V : updated membrane potential value [mV]
spiking : boolean array of length n_neuron with entries True at position i if neuron i
spiked in current time step, False otherwise
'''
# ADD CODE HERE
# test threshold()
# ADD CODE HERE
### For the update make use of Euler integration
def update(V,n_neuron,I_syn,dt,pm):
'''
Update dynamical state of all n_neuron membrane potentials
(assuming all neurons have same parameters pm)
Expects:
V : membrane potential [mV]
n_neuron : number of neurons
I_syn : injected current drive [pA]
dt : time increment
pm : parameter dictionary
Returns:
V : membrane potential
spiking : boolean that indicates whether neuron fired or not
'''
# ADD CODE HERE
# test update()
# ADD CODE HERE
def run_LIF(n_neuron,mu,sig,T,dt,pm):
'''
Run LIF dynamics
Expects:
n_neuron : number of neurons
mu,sig : mean and standard deviation of initial input current drives [pA]
T : total run time [ms]
dt : time increment
pm : parameter dictionary
Returns:
rec_spikes : dictionary of lists of spike times
'''
# ADD CODE HERE
def plot_raster(spks:dict)->None:
'''
Plot raster plot
Expects:
spks : dictionary of spike trains
Returns:
None
'''
for i in spks.keys():
plt.plot(spks[i], i*np.ones(len(spks[i])), '|k')
# test LIF_run()
# if all worked, uncomment codes below to run LIF dynamics for n_neuron uncoupled neurons
# n_neuron = 100
# mu, sig = 240,100
# T,dt = 100,0.1
# rec_spikes = run_LIF(n_neuron,mu,sig,T,dt,p)
# plot_raster(rec_spikes)
```
### >>> EXTRA: LIF driven by Gaussian white noise and Ornstein-Uhlenbeck current <<<
So far the neurons were driven by constant currents, with amplitudes drawn randomly from a Gaussian distribution.
1) Set up a version where the input current is (approximately) Gaussian white noise $\xi(t)$ with mean $E[\xi(t)]=\mu=0$ and autocovariance $E[\xi(t)\xi(t+\tau)]=\sigma_\xi^2 \delta(\tau)$.
Hint: In order to have correct statistics of the GWN approximation on a time grid with increment $dt$ the noise amplitide $\sigma_\xi$ needs to be scaled by $1/\sqrt{dt}$.
Make sure to test that the mean and variance of the current input your function produces has the right mean and variance.
2) Set up a version where the input current is an Ornstein-Uhlenbeck process $\eta(t)$, i.e., low-pass filtered GWN with a time constant $\tau_{\eta}$:
$$\tau_\eta \frac{d}{dt}\eta(t) = -\eta + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$
Make sure to test that the mean and variance of the current input your function produces has the right mean and variance.
Hint: An OUP as defined above has $E[\eta(t)]=0$ and autocovariance $[\eta(t)\eta(t+\tau)]=\sigma_\eta^2e^{-|t-\tau|/\tau_\eta}$.
| github_jupyter |
# 4️⃣ Zero-Shot Cross-Lingual Transfer using Adapters
Beyond AdapterFusion, which we trained in [the previous notebook](https://github.com/Adapter-Hub/adapter-transformers/blob/master/notebooks/04_Cross_Lingual_Transfer.ipynb), we can compose adapters for zero-shot cross-lingual transfer between tasks. We will use the stacked adapter setup presented in **MAD-X** ([Pfeiffer et al., 2020](https://arxiv.org/pdf/2005.00052.pdf)) for this purpose.
In this example, the base model is a pre-trained multilingual **XLM-R** (`xlm-roberta-base`) ([Conneau et al., 2019](https://arxiv.org/pdf/1911.02116.pdf)) model. Additionally, two types of adapters, language adapters and task adapters, are used. Here's how the MAD-X process works in detail:
1. Train language adapters for the source and target language on a language modeling task. In this notebook, we won't train them ourselves but use [pre-trained language adapters from the Hub](https://adapterhub.ml/explore/text_lang/).
2. Train a task adapter on the target task dataset. This task adapter is **stacked** upon the previously trained language adapter. During this step, only the weights of the task adapter are updated.
3. Perform zero-shot cross-lingual transfer. In this last step, we simply replace the source language adapter with the target language adapter while keeping the stacked task adapter.
Now to our concrete example: we select **XCOPA** ([Ponti et al., 2020](https://ducdauge.github.io/files/xcopa.pdf)), a multilingual extension of the **COPA** commonsence reasoning dataset ([Roemmele et al., 2011](https://people.ict.usc.edu/~gordon/publications/AAAI-SPRING11A.PDF)) as our target task. The setup is trained on the original **English** dataset and then transferred to **Chinese**.
## Installation
Besides `adapter-transformers`, we use HuggingFace's `datasets` library for loading the data. So let's install both first:
```
!pip install -U adapter-transformers
!pip install -U datasets
```
## Dataset Preprocessing
We need the English COPA dataset for training our task adapter. It is part of the SuperGLUE benchmark and can be loaded via `datasets` using one line of code:
```
from datasets import load_dataset
from transformers.adapters.composition import Stack
dataset_en = load_dataset("super_glue", "copa")
dataset_en.num_rows
```
Every dataset sample has a premise, a question and two possible answer choices:
```
dataset_en['train'].features
```
In this example, we model COPA as a multiple-choice task with two choices. Thus, we encode the premise and question as well as both choices as one input to our `xlm-roberta-base` model. Using `dataset.map()`, we can pass the full dataset through the tokenizer in batches:
```
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
def encode_batch(examples):
"""Encodes a batch of input data using the model tokenizer."""
all_encoded = {"input_ids": [], "attention_mask": []}
# Iterate through all examples in this batch
for premise, question, choice1, choice2 in zip(examples["premise"], examples["question"], examples["choice1"], examples["choice2"]):
sentences_a = [premise + " " + question for _ in range(2)]
# Both answer choices are passed in an array according to the format needed for the multiple-choice prediction head
sentences_b = [choice1, choice2]
encoded = tokenizer(
sentences_a,
sentences_b,
max_length=60,
truncation=True,
padding="max_length",
)
all_encoded["input_ids"].append(encoded["input_ids"])
all_encoded["attention_mask"].append(encoded["attention_mask"])
return all_encoded
def preprocess_dataset(dataset):
# Encode the input data
dataset = dataset.map(encode_batch, batched=True)
# The transformers model expects the target class column to be named "labels"
dataset.rename_column_("label", "labels")
# Transform to pytorch tensors and only output the required columns
dataset.set_format(columns=["input_ids", "attention_mask", "labels"])
return dataset
dataset_en = preprocess_dataset(dataset_en)
```
## Task Adapter Training
In this section, we will train the task adapter on the English COPA dataset. We use a pre-trained XLM-R model from HuggingFace and instantiate our model using `AutoModelWithHeads`.
```
from transformers import AutoConfig, AutoModelWithHeads
config = AutoConfig.from_pretrained(
"xlm-roberta-base",
)
model = AutoModelWithHeads.from_pretrained(
"xlm-roberta-base",
config=config,
)
```
Now we only need to set up the adapters. As described, we need two language adapters (which are assumed to be pre-trained in this example) and a task adapter (which will be trained in a few moments).
First, we load both the language adapters for our source language English (`"en"`) and our target language Chinese (`"zh"`) from the Hub. Then we add a new task adapter (`"copa"`) for our target task.
Finally, we add a multiple-choice head with the same name as our task adapter on top.
```
from transformers import AdapterConfig
# Load the language adapters
lang_adapter_config = AdapterConfig.load("pfeiffer", reduction_factor=2)
model.load_adapter("en/wiki@ukp", config=lang_adapter_config)
model.load_adapter("zh/wiki@ukp", config=lang_adapter_config)
# Add a new task adapter
model.add_adapter("copa")
# Add a classification head for our target task
model.add_multiple_choice_head("copa", num_choices=2)
```
We want the task adapter to be stacked on top of the language adapter, so we have to tell our model to use this setup via the `active_adapters` property.
A stack of adapters is represented by the `Stack` class, which takes the names of the adapters to be stacked as arguments.
Of course, there are various other possibilities to compose adapters beyonde stacking. Learn more about those [in our documentation](https://docs.adapterhub.ml/adapter_composition.html).
```
# Unfreeze and activate stack setup
model.active_adapters = Stack("en", "copa")
```
Great! Now, the input will be passed through the English language adapter first and the COPA task adapter second in every forward pass.
Just one final step to make: Using `train_adapter()`, we tell our model to only train the task adapter in the following. This call will freeze the weights of the pre-trained model and the weights of the language adapters to prevent them from further finetuning.
```
model.train_adapter(["copa"])
```
For training, we make use of the `Trainer` class built-in into `transformers`. We configure the training process using a `TrainingArguments` object.
As the dataset splits of English COPA in the SuperGLUE are slightly different, we train on both the train and validation split of the dataset. Later, we will evaluate on the test split of XCOPA.
```
from transformers import TrainingArguments, Trainer
from datasets import concatenate_datasets
training_args = TrainingArguments(
learning_rate=1e-4,
num_train_epochs=8,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
logging_steps=100,
output_dir="./training_output",
overwrite_output_dir=True,
# The next line is important to ensure the dataset labels are properly passed to the model
remove_unused_columns=False,
)
train_dataset = concatenate_datasets([dataset_en["train"], dataset_en["validation"]])
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
```
Start the training 🚀 (this will take a while)
```
trainer.train()
```
## Cross-lingual transfer
With the model and all adapters trained and ready, we can come to the cross-lingual transfer step here. We will evaluate our setup on the Chinese split of the XCOPA dataset.
Therefore, we'll first download the data and preprocess it using the same method as the English dataset:
```
dataset_zh = load_dataset("xcopa", "zh", ignore_verifications=True)
dataset_zh = preprocess_dataset(dataset_zh)
print(dataset_zh["test"][0])
```
Next, let's adapt our setup to the new language. We simply replace the English language adapter with the Chinese language adapter we already loaded previously. The task adapter we just trained is kept. Again, we set this architecture using `active_adapters`:
```
model.active_adapters = Stack("zh", "copa")
```
Finally, let's see how well our adapter setup performs on the new language. We measure the zero-shot accuracy on the test split of the target language dataset. Evaluation is also performed using the built-in `Trainer` class.
```
import numpy as np
from transformers import EvalPrediction
def compute_accuracy(p: EvalPrediction):
preds = np.argmax(p.predictions, axis=1)
return {"acc": (preds == p.label_ids).mean()}
eval_trainer = Trainer(
model=model,
args=TrainingArguments(output_dir="./eval_output", remove_unused_columns=False,),
eval_dataset=dataset_zh["test"],
compute_metrics=compute_accuracy,
)
eval_trainer.evaluate()
```
You should get an overall accuracy of about 56 which is on-par with full finetuning on COPA only but below the state-of-the-art which is sequentially finetuned on an additional dataset before finetuning on COPA.
For results on different languages and a sequential finetuning setup which yields better results, make sure to check out [the MAD-X paper](https://arxiv.org/pdf/2005.00052.pdf).
| github_jupyter |
# Time Series Analysis with Pastas
*Developed by Mark Bakker, TU Delft*
Required files to run this notebook (all available from the `data` subdirectory):
* Head files: `head_nb1.csv`, `B58C0698001_1.csv`, `B50H0026001_1.csv`, `B22C0090001_1.csv`, `headwell.csv`
* Pricipitation files: `rain_nb1.csv`, `neerslaggeg_HEIBLOEM-L_967.txt`, `neerslaggeg_ESBEEK_831.txt`, `neerslaggeg_VILSTEREN_342.txt`, `rainwell.csv`
* Evaporation files: `evap_nb1.csv`, `etmgeg_380.txt`, `etmgeg_260.txt`, `evapwell.csv`
* Well files: `well1.csv`, `well2.csv`
* Figure: `b58c0698_dino.png`
### Pastas
Pastas is a computer program for hydrological time series analysis and is available from the [Pastas Github](https://github.com/pastas/pastas) . Pastas makes heavy use of `pandas` `timeseries`. An introduction to `pandas` `timeseries` can be found, for example, [here](http://nbviewer.jupyter.org/github/mbakker7/exploratory_computing_with_python/blob/master/notebook8_pandas/py_exploratory_comp_8_sol.ipynb). The Pastas documentation is available [here](http://pastas.readthedocs.io).
```
import pandas as pd
import pastas as ps
import matplotlib.pyplot as plt
ps.set_log_level("ERROR")
ps.show_versions()
```
### Load the head observations
The first step in time series analysis is to load a time series of head observations. The time series needs to be stored as a `pandas.Series` object where the index is the date (and time, if desired). `pandas` provides many options to load time series data, depending on the format of the file that contains the time series. In this example, measured heads are stored in the csv file `head_nb1.csv`.
The heads are read from a csv file with the `read_csv` function of `pandas` and are then squeezed to create a `pandas Series` object. To check if you have the correct data type, use the `type` command as shown below.
```
ho = pd.read_csv('../data/head_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True)
print('The data type of the oseries is:', type(ho))
```
The variable `ho` is now a `pandas Series` object. To see the first five lines, type `ho.head()`.
```
ho.head()
```
The series can be plotted as follows
```
ho.plot(style='.', figsize=(12, 4))
plt.ylabel('Head [m]');
plt.xlabel('Time [years]');
```
### Load the stresses
The head variation shown above is believed to be caused by two stresses: rainfall and evaporation. Measured rainfall is stored in the file `rain_nb1.csv` and measured potential evaporation is stored in the file `evap_nb1.csv`.
The rainfall and potential evaporation are loaded and plotted.
```
rain = pd.read_csv('../data/rain_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True)
print('The data type of the rain series is:', type(rain))
evap = pd.read_csv('../data/evap_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True)
print('The data type of the evap series is', type(evap))
plt.figure(figsize=(12, 4))
rain.plot(label='rain')
evap.plot(label='evap')
plt.xlabel('Time [years]')
plt.ylabel('Rainfall/Evaporation (m/d)')
plt.legend(loc='best');
```
### Recharge
As a first simple model, the recharge is approximated as the measured rainfall minus the measured potential evaporation.
```
recharge = rain - evap
plt.figure(figsize=(12, 4))
recharge.plot()
plt.xlabel('Time [years]')
plt.ylabel('Recharge (m/d)');
```
### First time series model
Once the time series are read from the data files, a time series model can be constructed by going through the following three steps:
1. Creat a `Model` object by passing it the observed head series. Store your model in a variable so that you can use it later on.
2. Add the stresses that are expected to cause the observed head variation to the model. In this example, this is only the recharge series. For each stess, a `StressModel` object needs to be created. Each `StressModel` object needs three input arguments: the time series of the stress, the response function that is used to simulate the effect of the stress, and a name. In addition, it is recommended to specified the `kind` of series, which is used to perform a number of checks on the series and fix problems when needed. This checking and fixing of problems (for example, what to substitute for a missing value) depends on the kind of series. In this case, the time series of the stress is stored in the variable `recharge`, the Gamma function is used to simulate the response, the series will be called `'recharge'`, and the kind is `prec` which stands for precipitation. One of the other keyword arguments of the `StressModel` class is `up`, which means that a positive stress results in an increase (up) of the head. The default value is `True`, which we use in this case as a positive recharge will result in the heads going up. Each `StressModel` object needs to be stored in a variable, after which it can be added to the model.
3. When everything is added, the model can be solved. The default option is to minimize the sum of the squares of the errors between the observed and modeled heads.
```
ml = ps.Model(ho)
sm1 = ps.StressModel(recharge, ps.Gamma, name='recharge', settings='prec')
ml.add_stressmodel(sm1)
ml.solve(tmin='1985', tmax='2010')
```
The `solve` function has a number of default options that can be specified with keyword arguments. One of these options is that by default a fit report is printed to the screen. The fit report includes a summary of the fitting procedure, the optimal values obtained by the fitting routine, and some basic statistics. The model contains five parameters: the parameters $A$, $n$, and $a$ of the Gamma function used as the response function for the recharge, the parameter $d$, which is a constant base level, and the parameter $\alpha$ of the noise model, which will be explained a little later on in this notebook.
The results of the model are plotted below.
```
ml.plot(figsize=(12, 4));
ml = ps.Model(ho)
sm1 = ps.StressModel(recharge, ps.Gamma, name='recharge', settings='prec')
ml.add_stressmodel(sm1)
ml.solve(tmin='1985', tmax='2010', solver=ps.LeastSquares)
ml = ps.Model(ho)
sm1 = ps.StressModel(recharge, ps.Gamma, name='recharge', settings='prec')
ml.add_stressmodel(sm1)
ml.set_parameter('recharge_n', vary=False)
ml.solve(tmin='1985', tmax='2010', solver=ps.LeastSquares)
ml.plot(figsize=(10, 4));
```
| github_jupyter |
# Day 19 - regular expressions
* https://adventofcode.com/2020/day/19
The problem description amounts to a [regular expression](https://www.regular-expressions.info/); by traversing the graph of rules you can combine the string literals into a regex pattern that the Python [`re` module](https://docs.python.org/3/library/re.html) can compile into a pattern. Using the [`Pattern.fullmatch()` method](https://docs.python.org/3/library/re.html#re.Pattern.fullmatch) you can then check each message for validity.
Having just used the `tokenize` module the [day before](./Day%2018.ipynb), I found it very helpful to parse the rules, as well.
```
import re
from collections import deque
from collections.abc import Iterable, Mapping, MutableMapping
from io import StringIO
from itertools import islice
from tokenize import generate_tokens, NUMBER, STRING, TokenInfo
from typing import Callable, Dict, Tuple
def parse_rules(lines: Iterable[str], make_regex: Callable[[str], re.Pattern[str]]) -> re.Pattern[str]:
def rule_to_tokens(rule: str) -> Tuple[str, Iterable[TokenInfo]]:
tokens = generate_tokens(StringIO(rule).readline)
# tokens are NUMBER, COLON, (...)+, we skip the COLON.
return next(tokens).string, list(islice(tokens, 1, None))
unprocessed = dict(map(rule_to_tokens, lines))
rules: MutableMapping[str, str] = {}
dispatch: Mapping[int, Callable[[str], str]] = {NUMBER: rules.__getitem__, STRING: lambda s: s[1:-1]}
stack = deque(['0'])
while stack:
tokens = unprocessed[stack[-1]]
if missing := {t.string for t in tokens if t.type == NUMBER and t.string not in rules}:
stack += missing
continue
rule = "".join([dispatch.get(t.type, str)(t.string) for t in tokens])
rules[stack.pop()] = f"(?:{rule})"
return make_regex(rules["0"])
def validate_messages(data: str, make_regex: Callable[[str], re.Pattern[str]] = re.compile) -> int:
rule_data, messages = data.split("\n\n")
rule_regex = parse_rules(rule_data.splitlines(), make_regex)
return sum(bool(rule_regex.fullmatch(msg)) for msg in messages.splitlines())
assert validate_messages("""\
0: 4 1 5
1: 2 3 | 3 2
2: 4 4 | 5 5
3: 4 5 | 5 4
4: "a"
5: "b"
ababbb
bababa
abbbab
aaabbb
aaaabbb
""") == 2
import aocd
data = aocd.get_data(day=19, year=2020)
print("Part 1:", validate_messages(data))
```
## Part 2 - recursive regex
Part two introduces _recursion_; patterns `8` and `11` add self-references.
For rule 8, that just means that the contained rule `42` just matches 1 or more times (`"42 | 42 8"` will match `"42"`, `"42 42"`, `"42 42 42"`, etc), so can be simplified using the [`+` repetition operator](https://www.regular-expressions.info/repeat.html), to `"8: 42 +"` which my tokenizer-based parser will happily assemble.
But the change for rule 11, `"42 31 | 42 11 31"` is not so easily simplified. The rule matches for any number of repetitions of `"42"` and `"31"` **provided they repeat an equal number of times**. To check for such patterns using regular expressions, you need a regex engine that supports either [balancing groups](https://www.regular-expressions.info/balancing.html) or [recursion](https://www.regular-expressions.info/recurse.html). .NET's regex engine would let you use balancing groups (the pattern, with spaces around the pattern IDs, would be `(?'g42' 42 )+ (?'-g42' 31 )+ (?(g42)(?!))`), and Perl, Ruby and any regex engine based on PCRE would let you use recursion.
Lucky for me, the [`regex` package](https://pypi.org/project/regex/) _does_ support recursion. The package may one day be ready to replace the standard-library `re` module, but that day has not yet arrived. In the meantime, if you have advanced regex needs, do keep the existence of that package in mind! As for the recursion syntax: given a named group `(?P<groupname>...)`, the expression `(?&groupname)` will match everything within the named pattern, and `(?&groupname)?` will do so 0 or more times. So, we can replace `"42 31 | 42 11 31"` with `"(?P<rule_11> 42 (?&rule_11)? 31 )"` to get the desired regex validation pattern.
```
import regex
def validate_corrected_rules(data: str) -> int:
return validate_messages(
data
# 42 | 42 8, repeating 42 one or more times.
.replace("8: 42\n", "8: 42 +\n")
# 42 31 | 42 11 31, recursive self-reference
.replace("11: 42 31\n", "11: (?P<rule_11> 42 (?&rule_11)? 31 )\n"),
regex.compile
)
assert validate_corrected_rules("""\
42: 9 14 | 10 1
9: 14 27 | 1 26
10: 23 14 | 28 1
1: "a"
11: 42 31
5: 1 14 | 15 1
19: 14 1 | 14 14
12: 24 14 | 19 1
16: 15 1 | 14 14
31: 14 17 | 1 13
6: 14 14 | 1 14
2: 1 24 | 14 4
0: 8 11
13: 14 3 | 1 12
15: 1 | 14
17: 14 2 | 1 7
23: 25 1 | 22 14
28: 16 1
4: 1 1
20: 14 14 | 1 15
3: 5 14 | 16 1
27: 1 6 | 14 18
14: "b"
21: 14 1 | 1 14
25: 1 1 | 1 14
22: 14 14
8: 42
26: 14 22 | 1 20
18: 15 15
7: 14 5 | 1 21
24: 14 1
abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa
bbabbbbaabaabba
babbbbaabbbbbabbbbbbaabaaabaaa
aaabbbbbbaaaabaababaabababbabaaabbababababaaa
bbbbbbbaaaabbbbaaabbabaaa
bbbababbbbaaaaaaaabbababaaababaabab
ababaaaaaabaaab
ababaaaaabbbaba
baabbaaaabbaaaababbaababb
abbbbabbbbaaaababbbbbbaaaababb
aaaaabbaabaaaaababaa
aaaabbaaaabbaaa
aaaabbaabbaaaaaaabbbabbbaaabbaabaaa
babaaabbbaaabaababbaabababaaab
aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba
""") == 12
print("Part 2:", validate_corrected_rules(data))
```
| github_jupyter |
# Kriging Example1
- Author: Mohit S. Chauhan
- Date: Jan 08, 2019
In this example, Kriging is used to generate a surrogate model for a given data. In this data, sample points are generated using STS class and functional value at sample points are estimated using a model defined in python script ('python_model_function.py).
Import the necessary libraries. Here we import standard libraries such as numpy and matplotlib, but also need to import the STS, RunModel and Krig class from UQpy.
```
from UQpy.Surrogates import Kriging
from UQpy.SampleMethods import RectangularStrata, RectangularSTS
from UQpy.RunModel import RunModel
from UQpy.Distributions import Gamma
import numpy as np
import matplotlib.pyplot as plt
```
Create a distribution object.
```
marginals = [Gamma(a= 2., loc=1., scale=3.)]
```
Create a strata object.
```
strata = RectangularStrata(nstrata=[20])
```
Run stratified sampling
```
x = RectangularSTS(dist_object=marginals, strata_object=strata, nsamples_per_stratum=1, random_state=2)
```
RunModel is used to evaluate function values at sample points. Model is defined as a function in python file 'python_model_function.py'.
```
rmodel = RunModel(model_script='python_model_1Dfunction.py', delete_files=True)
rmodel.run(samples=x.samples)
K = Kriging(reg_model='Linear', corr_model='Gaussian', nopt=20, corr_model_params=[1], random_state=2)
K.fit(samples=x.samples, values=rmodel.qoi_list)
print(K.corr_model_params)
```
Kriging surrogate is used to compute the response surface and its gradient.
```
num = 1000
x1 = np.linspace(min(x.samples), max(x.samples), num)
y, y_sd = K.predict(x1.reshape([num, 1]), return_std=True)
y_grad = K.jacobian(x1.reshape([num, 1]))
```
Actual model is evaluated at all points to compare it with kriging surrogate.
```
rmodel.run(samples=x1, append_samples=False)
```
This plot shows the input data as blue dot, blue curve is actual function and orange curve represents response curve. This plot also shows the gradient and 95% confidence interval of the kriging surrogate.
```
fig = plt.figure()
ax = plt.subplot(111)
plt.plot(x1, rmodel.qoi_list, label='Sine')
plt.plot(x1, y, label='Surrogate')
plt.plot(x1, y_grad, label='Gradient')
plt.scatter(K.samples, K.values, label='Data')
plt.fill(np.concatenate([x1, x1[::-1]]), np.concatenate([y - 1.9600 * y_sd,
(y + 1.9600 * y_sd)[::-1]]),
alpha=.5, fc='y', ec='None', label='95% CI')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
```
| github_jupyter |
# sbpy.activity: dust
[sbpy.activity](https://sbpy.readthedocs.io/en/latest/sbpy/activity.html) has classes and functions for models of cometary dust comae. Comet brightness can be estimated for observations of scattered light or themral emission.
## Light scattered by dust via Afρ
Light scattered by coma dust can be estimated via the cometary parameter Afρ. For a circular aperture, the Αfρ quantity is the product of albedo, filling factor of dust in the aperture, and aperture radius. It has units of length, is aperture-size independent for an idealized coma, and is proportional to mass loss rate under certain assumptions (see [A'Hearn et al. 1984 for details](https://ui.adsabs.harvard.edu/#abs/1984AJ.....89..579A/abstract)):
$$ Af\rho = \frac{(2 r_h \Delta)^2}{\rho}\frac{F_\lambda}{S_\lambda} $$
where $r_h$ is the heliocentric distance in au, $\Delta$ is the observer-comet distance in the same units as $\rho$, $F_\lambda$ is the flux desnity of the comet continuum, and $S_\lambda$ is the flux desnity of sunlight at 1 au in the same units as $F_\lambda$.
`sbpy` has a class that represents this parameter: `Afrho`. With this quantity, we can estimate the brightness of a comet, under the assumptions of the Afρ model (isotropic coma in free expansion).
### Afρ is a quantity
`Afrho` is an `astropy` `Quantity` that has units of length.
```
import astropy.units as u
from sbpy.activity import Afrho
afrho = Afrho(100 * u.cm)
afrho = Afrho('100 cm') # alternate initialization
afrho = Afrho(100, unit='cm') # alternate initialization
print(afrho)
# Arithmetic works as usual:
print(afrho * 2)
# unit conversion, too
print(afrho.to('m'))
```
### Afρ from observed continuum flux density
`Afrho` can also be initialized from continuum flux densities. Let's work with some photometry of comet 46P/Wirtanen by [Farnham & Schleicher (1998)](https://ui.adsabs.harvard.edu/#abs/1998A&A...335L..50F/abstract). The following observations were taken through the IHW blue continuum filter (λ=4845 Å).
```
import numpy as np
# comet ephemeris as a dictionary:
eph = {
'rh': [1.12, 1.14, 1.07, 1.49, 1.72] * u.au,
'delta': [1.41, 1.60, 1.54, 1.96, 2.31] * u.au,
'phase': [45, 38, 40, 31, 24] * u.deg
}
# observational data:
wave = 4845 * u.AA
rho = 10**np.array((4.29, 4.64, 4.49, 5.03, 4.96)) * u.km
fluxd = 10**np.array((-13.74, -13.84, -13.48, -14.11, -14.30)) * u.erg / (u.cm**2 * u.s * u.AA)
afrho = Afrho.from_fluxd(wave, fluxd, rho, eph)
print(afrho)
```
Compare with the Farnham et al. values:
```
f98 = 10**np.array((2.05, 1.71, 2.14, 1.47, 1.61)) * u.cm
print('Mean percent difference: {:.1%}'.format(np.mean(np.abs(afrho - f98) / f98)))
```
### Afρ and sbpy's solar spectrum
The few percent difference is due to the assumed solar flux density. We asked for the conversion at a specific wavelength, but the observation was through a filter with a specific width. With the `spectroscopy` module we can show the solar flux density used:
```
from sbpy.spectroscopy.sun import default_sun
sun = default_sun.get()
print(sun(wave, unit=fluxd.unit))
```
But rather than having `sbpy` compute the solar flux density at 1 au, we can pass the precomputed value for this filter from [A'Hearn et al. 1995](https://ui.adsabs.harvard.edu/#abs/1995Icar..118..223A/abstract):
```
S = 189.7 * u.erg / (u.cm**2 * u.s * u.AA)
afrho = Afrho.from_fluxd(None, fluxd, rho, eph, S=S)
print('Mean percent difference: {:.1%}'.format(np.mean(np.abs(afrho - f98) / f98)))
```
In the future, we will be able to provide the filter transmission and have `sbpy` automatically compute the mean solar flux density in the filter.
## Dust thermal emission via εfρ
Dust can also be modeled with εfρ, a thermal emission corollary to Afρ ([Kelley et al. 2013](https://ui.adsabs.harvard.edu/#abs/2013Icar..225..475K/abstract)). Albedo is replaced with emissivity, and the solar spectrum with the Planck function. It is an approximation to the thermal emission from a comet that can be useful for observation planning and making comet-to-comet comparisions:
$$ \epsilon f \rho = \frac{F_\lambda}{\pi \rho B_\lambda(T_c)} $$
where $B_\lambda(T_c)$ is the Planck function evaluated at the apparent continuum temperature $T_c$. The parameter is has units of length and is included in sbpy as the `Efrho` class.
The continuum temperature is parameterized with respect to the temperature of an isothermal blackbody sphere in LTE:
$$ T_{scale} = \frac{T_c}{278\,r_h^{-0.5}} $$
Plot a model spectrum of comet 46P/Wirtanen in December 2018 from 0.3 to 30 μm, varying the contintuum temperature:
```
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from sbpy.activity import Afrho, Efrho
from sbpy.spectroscopy.sun import default_sun
%matplotlib notebook
afrho = Afrho(100 * u.cm)
efrho = Efrho(afrho * 3.5)
# comet 46P/Wirtanen on 13 Dec 2018 as observed by Earth
eph = {
'rh': 1.055 * u.au,
'delta': 0.080 * u.au,
'phase': 27 * u.deg
}
wave = np.logspace(-0.5, 1.5, 1000) * u.um
rho = 1 * u.arcsec
fsca = afrho.fluxd(wave, rho, eph)
fth = efrho.fluxd(wave, rho, eph, Tscale=1.1)
ftot = fsca + fth
fig = plt.figure(1)
fig.clear()
ax = fig.add_subplot(111)
for Tscale in [1.0, 1.1, 1.2]:
fth = efrho.fluxd(wave, rho, eph, Tscale=Tscale)
T = Tscale * 278 * u.K / np.sqrt(eph['rh'] / u.au)
ax.plot(wave, wave * fth, label="{:.0f}".format(T))
ax.plot(wave, wave * fsca)
ax.plot(wave, wave * ftot, color='k')
plt.setp(ax, xlabel='Wavelength (μm)', xscale='log',
ylabel='$\lambda F_\lambda$ (W/m$^2$)', ylim=[1e-15, 1e-12], yscale='log')
ax.legend()
plt.tight_layout()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pandas as pd
import sys
import inspect
import datetime
from scipy.stats import percentileofscore
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
sys.path.append(parentdir + '/modeling')
import load_data
from fit_and_predict import add_preds
from functions import merge_data
from viz import viz_interactive
import numpy as np
import pandas as pd
from os.path import join as oj
import os
import pandas as pd
import sys
import matplotlib as plt
from scipy.stats import percentileofscore
from sklearn.metrics import mean_absolute_error
# CHANGE THIS
from exponential_modeling import *
from fit_and_predict import *
#from pmdl_weights import *
from shared_models import SharedModel
# Load in the Data
print('loading data...')
NUM_DAYS_LIST = [1, 2, 3, 4, 5, 6, 7]
df_county = load_data.load_county_level(data_dir=oj(parentdir, 'data'))
#df_county = add_preds(df_county, NUM_DAYS_LIST=NUM_DAYS_LIST,
# cached_dir=oj(parentdir, 'data'))
# Currently selecting confirmed cases, can look at probable later
df_ucsf = pd.read_csv("../../covid-19-private-data/ucsf_hospitalization_data.csv", sep="\t")
df_ucsf = df_ucsf[["Date", "ZSFG COVID-19+ (total admitted)"]]
df_ucsf = df_ucsf.rename(columns={"ZSFG COVID-19+ (total admitted)": "hospitalizations"})
df_ucsf_hosp = df_ucsf["hospitalizations"]
UCSF_data = np.array(df_ucsf_hosp.to_numpy())
UCSF_data
```
## Defining Start Day and k
```
start_day = 22; # start_day >= 1; the index that we're predicting on
k = 5 #Number of days we want to predict ahead
```
## Linear, Separate Exponential, and Shared Exponential
```
lin_future_predictions = []
exp_future_predictions = []
shared_exp_future_predictions = []
for i in range(start_day,UCSF_data.shape[0]-k + 1):
tmp = UCSF_data[:i] # Should there be an i+1 for predict_future? No
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
shared_future_pred = fit_and_predict_shared_exponential(df_shared_UCSF,mode = "predict_future",outcome = "hospitalizations",demographic_vars=[],target_day=np.array([k]), verbose=False)
shared_exp_future_predictions.append(shared_future_pred[0][0])
tmp = tmp.reshape(1,tmp.shape[0])
lin_pred_future = linear_fit(tmp,'predict_future',target_day=np.array([k]))
exp_pred_future = exponential_fit(tmp,'predict_future',target_day=np.array([k]))
lin_future_predictions.append(lin_pred_future[0][0])
exp_future_predictions.append(exp_pred_future[0][0])
lin_future_predictions
# Mean Absolute Error (MAE) Calculations
lin_mae = mean_absolute_error(lin_future_predictions, UCSF_data[start_day+k-1:])
print("Linear MAE: "+str(lin_mae))
sepexp_mae = mean_absolute_error(exp_future_predictions, UCSF_data[start_day+k-1:])
print("Separate Exp MAE: "+str(sepexp_mae))
sharexp_mae = mean_absolute_error(shared_exp_future_predictions, UCSF_data[start_day+k-1:])
print("Shared Exp MAE: "+str(sharexp_mae))
# Log Scale Mean Absolute Error (LMAE) Calculations
lin_lmae = mean_absolute_error(np.log(lin_future_predictions), np.log(UCSF_data[start_day+k-1:]))
print("Linear log MAE: "+str(lin_lmae))
sepexp_lmae = mean_absolute_error(np.log(exp_future_predictions), np.log(UCSF_data[start_day+k-1:]))
print("Separate Exp log MAE: "+str(sepexp_lmae))
sharexp_lmae = mean_absolute_error(np.log(shared_exp_future_predictions), np.log(UCSF_data[start_day+k-1:]))
print("Shared Exp log MAE: "+str(sharexp_lmae))
```
## Ensemble
```
ensemble = []
for i in range(start_day,UCSF_data.shape[0]-k+1):
tmp = UCSF_data[:i]
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
ensemble_future_prediction = fit_and_predict_ensemble(df_shared_UCSF,target_day = np.array([k]),outcome = 'hospitalizations',
methods = [shared_exponential,linear],mode = 'predict_future', verbose = False)['predicted_hospitalizations_ensemble_'+str(k)].values[0][0]
ensemble.append(ensemble_future_prediction)
# Mean Absolute Error (MAE) Calculations
ensemble_mae = mean_absolute_error(ensemble, UCSF_data[start_day+k-1:])
print("CLEP MAE: "+str(ensemble_mae))
ensemble_lmae = mean_absolute_error(np.log(ensemble), np.log(UCSF_data[start_day+k-1:]))
print("CLEP log MAE: "+str(ensemble_lmae))
```
## 4 Plots
```
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
axs[0, 0].plot(UCSF_data[start_day+k-1:],'r',label = "True")
axs[0, 0].plot(lin_future_predictions,'b',label= "Preds")
axs[0, 0].set_title('Linear')
axs[0, 0].set_ylim([0, 75])
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
axs[0, 0].legend(prop=fontP)
axs[0, 1].plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
axs[0, 1].plot(exp_future_predictions,'b',label= "Separate exp predictions")
axs[0, 1].set_title('Separate Exp')
axs[1, 0].plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
axs[1, 0].plot(shared_exp_future_predictions,'b',label= "Shared exp predictions")
axs[1, 0].set_title('Shared Exp')
axs[1, 1].plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
axs[1, 1].plot(ensemble,'b',label= "CLEP predictions")
axs[1, 1].set_title('CLEP')
for ax in axs.flat:
ax.set(xlabel='# days since 4/3/20', ylabel='# hospitalizations')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
fig.suptitle(str(k)+"-day ahead prediction for shared exponential predictor")
plt.savefig("UCSF_Hospitalizations_plots/combinedplot_"+str(k)+".jpg", dpi=150)
```
## Imputed Deaths
```
shifted_date = 50 # 50 days between 1/22 and 3/12
cen = 4 # Number of days to censor on left + 1
# Calculate df_CA
df_CA = df_county[df_county['StateName'].isin(['CA', "WA"])]
# Compute the Imputed Deaths
# lin_impu_future_predictions = []
# exp_impu_future_predictions = []
# shared_impu_exp_future_predictions = []
## Censoring the UCSF_data
UCSF_cen = UCSF_data[cen:]
imputed_deaths = []
## Q: Should beginning of the range be 0 or the start_day?
## Q: Should we include this k?
## A: I hate time series...
for i in range(1,UCSF_cen.shape[0]-k + 1):
df_CA_test = df_CA.copy(deep=True)
# Calculate the Imputed Deaths
for county in range(len(df_CA_test)):
# do we need to remove columns and truncate counts??
# truncate list counts: deaths, cases, neighbor_deaths, neighbor_cases
for col in ['deaths', 'cases', 'neighbor_deaths', 'neighbor_cases']:
# adding 1 to predict t
df_CA_test[col].iloc[county] = df_CA_test[col].iloc[county][:shifted_date + i]
tmp = fit_and_predict_ensemble(df_CA_test, target_day=np.array([k]))
sf_dop = tmp[tmp["CountyName"] == "San Francisco"]
imputed_deaths.append(np.array(sf_dop["predicted_deaths_ensemble_" + str(k)])[0][0])
imputed_deaths = np.array(imputed_deaths)
preds = []
for i in range(start_day,UCSF_cen.shape[0]-k + 1):
# Calculate the Imputed Deaths
tmp = UCSF_cen[:i] # day t-1
deaths = imputed_deaths[:i] # day t
assert len(tmp) == len(deaths)
d = {'Name':['UCSF'],'hospitalizations':[tmp], "imputed_deaths": [deaths]}
df_shared_UCSF = pd.DataFrame(data = d)
# Feature Transforms
feat_transforms = {}
feat_transforms["hospitalizations"] = [lambda x: np.log(x+1)]
feat_transforms["imputed_deaths"] = [lambda x: np.log(x + 1)]
# Auxiliary Time Variables
aux_vars = ["imputed_deaths"]
shared_model = SharedModel(df_shared_UCSF,mode = "predict_future",
outcome = "hospitalizations",
demographic_variables=[],
auxiliary_time_features=aux_vars,
target_days=np.array([k]),
feat_transforms=feat_transforms,
time_series_default_values = defaultdict(lambda: 0),
scale=False)
shared_model.create_dataset()
shared_model.fit_model()
shared_model.predict()
preds.append(shared_model.predictions[0][0])
imputed_mae = np.mean(abs(np.array(preds) - UCSF_cen[start_day+k-1:]))
print("Imputed MAE: "+str(imputed_mae))
imputed_lmae = np.mean(abs(np.log(np.array(preds)) - np.log(UCSF_cen[start_day+k-1:])))
print("Imputed Exp log MAE: "+str(imputed_lmae))
plt.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
plt.plot(lin_future_predictions,'b',label= "Linear predictions")
plt.legend()
plt.xlabel("# days since 3/28/20")
plt.ylabel("# people hospitalized at UCSF")
plt.title(str(k)+"-day ahead prediction for linear predictor")
plt.savefig("UCSF_Hospitalizations_plots/linear_"+str(k)+".jpg")
plt.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
plt.plot(exp_future_predictions,'b',label= "Separate exp predictions")
plt.legend()
plt.xlabel("# days since 3/28/20")
plt.ylabel("# people hospitalized at UCSF")
plt.title(str(k)+"-day ahead prediction for separate exponential predictor")
plt.savefig("UCSF_Hospitalizations_plots/sepexp_"+str(k)+".jpg")
plt.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
plt.plot(shared_exp_future_predictions,'b',label= "Shared exp predictions")
plt.legend()
plt.xlabel("# days since 3/28/20")
plt.ylabel("# people hospitalized at UCSF")
plt.title(str(k)+"-day ahead prediction for shared exponential predictor")
plt.savefig("UCSF_Hospitalizations_plots/sharexp_"+str(k)+".jpg")
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
axs[0, 0].plot(UCSF_data[start_day+k-1:],'r',label = "True")
axs[0, 0].plot(lin_future_predictions,'b',label= "Preds")
axs[0, 0].set_title('Linear')
axs[0, 0].set_ylim([0, 50])
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
axs[0, 0].legend(prop=fontP)
axs[0, 1].plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
axs[0, 1].plot(exp_future_predictions,'b',label= "Separate exp predictions")
axs[0, 1].set_title('Separate Exp')
axs[1, 0].plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
axs[1, 0].plot(shared_exp_future_predictions,'b',label= "Shared exp predictions")
axs[1, 0].set_title('Shared Exp')
axs[1, 1].plot(np.arange(4, len(UCSF_data[start_day+k-1:])), UCSF_cen[start_day+k-1:],'r',label = "UCSF hospitalization data")
axs[1, 1].plot(np.arange(4, len(UCSF_data[start_day+k-1:])), np.array(preds),'b',label= "Imputed deaths shared exp predictions")
axs[1, 1].set_title('Imputed deaths')
for ax in axs.flat:
ax.set(xlabel='# days since 3/28/20', ylabel='# hospitalizations')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
fig.suptitle(str(k)+"-day ahead prediction for shared exponential predictor")
plt.savefig("UCSF_Hospitalizations_plots/combinedplot_"+str(k)+".jpg", dpi=150)
fig, (ax1, ax2, ax3) = plt.subplots(3)
fig.suptitle('Vertically stacked subplots')
ax1.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
ax1.plot(shared_exp_future_predictions,'b',label= "Shared exp predictions")
ax2.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
ax2.plot(shared_exp_future_predictions,'b',label= "Shared exp predictions")
ax3.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
ax3.plot(shared_exp_future_predictions,'b',label= "Shared exp predictions")
#df_CA = df_county[df_county['StateName']=='CA']
# df_CA.head()
#len(df_CA['deaths'].iloc[0])
# County data start date: 1/22/20
# print(df_county.columns.values)
# UCSF data start date: 3/8/20
# UCSF first date with 3+ hospitalizations: 3/17/20, 10 days after UCSF start
# print(df_ucsf)
# Start predicting from 3/8/20 onwards
# 46 days from the 1/22/20 to 3/8/20, not including the end date
# UCSF first prediction is: 3/12/20
# death_preds = []
# for day in range(#edit date range):
# df_CA_test = df_CA.copy(deep=True)
# #append preds to death_preds
# fit_and_predict_ensemble(df_CA_test)['predicted_deaths_ensemble_1']
# for county in range(len(df_CA)):
# # do we need to remove columns and truncate counts??
# # truncate list counts: deaths, cases, neighbor_deaths, neighbor_cases
# for col in ['deaths', 'cases', 'neighbor_deaths', 'neighbor_cases']:
# df_CA_test[col].iloc[county] = df_CA_test[col].iloc[county][:day]
# Why are we starting from 56?
# print(df_CA[df_CA['CountyName']=='San Francisco']['deaths'].values[0][56:])
# df_CA['deaths']
```
## Imputing Deaths
$hospitalizations_{t} = \beta_0 hospitalizations_{t-1} + \beta_1 imputed\ deaths_{t} + \beta_2 imputed\ deaths_{t-1}$
```
shifted_date = 50 # 50 days between 1/22 and 3/12
cen = 4 # Number of days to censor on left + 1
# Calculate df_CA
df_CA = df_county[df_county['StateName'].isin(['CA', "WA"])]
# Compute the Imputed Deaths
# lin_impu_future_predictions = []
# exp_impu_future_predictions = []
# shared_impu_exp_future_predictions = []
## Censoring the UCSF_data
UCSF_cen = UCSF_data[cen:]
all_imputed = []
for k in [1, 3, 5, 7]:
imputed_deaths = []
## Q: Should beginning of the range be 0 or the start_day?
## Q: Should we include this k?
## A: I hate time series...
for i in range(1,UCSF_cen.shape[0]-k + 1):
df_CA_test = df_CA.copy(deep=True)
# Calculate the Imputed Deaths
for county in range(len(df_CA_test)):
# do we need to remove columns and truncate counts??
# truncate list counts: deaths, cases, neighbor_deaths, neighbor_cases
for col in ['deaths', 'cases', 'neighbor_deaths', 'neighbor_cases']:
# adding 1 to predict t
df_CA_test[col].iloc[county] = df_CA_test[col].iloc[county][:shifted_date + i]
tmp = fit_and_predict_ensemble(df_CA_test, target_day=np.array([k]))
sf_dop = tmp[tmp["CountyName"] == "San Francisco"]
imputed_deaths.append(np.array(sf_dop["predicted_deaths_ensemble_" + str(k)])[0][0])
imputed_deaths = np.array(imputed_deaths)
all_imputed.append(imputed_deaths)
ks=[1, 3, 5, 7]
startdays = [26, 24, 22, 20]
all_preds = []
for j in range(4):
preds = []
for i in range(startdays[j],UCSF_cen.shape[0]-ks[j] + 1):
# Calculate the Imputed Deaths
tmp = UCSF_cen[:i] # day t-1
deaths = all_imputed[j][:i] # day t
assert len(tmp) == len(deaths)
d = {'Name':['UCSF'],'hospitalizations':[tmp], "imputed_deaths": [deaths]}
df_shared_UCSF = pd.DataFrame(data = d)
# Feature Transforms
feat_transforms = {}
feat_transforms["hospitalizations"] = [lambda x: np.log(x+1)]
feat_transforms["imputed_deaths"] = [lambda x: np.log(x + 1)]
# Auxiliary Time Variables
aux_vars = ["imputed_deaths"]
shared_model = SharedModel(df_shared_UCSF,mode = "predict_future",
outcome = "hospitalizations",
demographic_variables=[],
auxiliary_time_features=aux_vars,
target_days=np.array([k]),
feat_transforms=feat_transforms,
time_series_default_values = defaultdict(lambda: 0),
scale=False)
shared_model.create_dataset()
shared_model.fit_model()
shared_model.predict()
preds.append(shared_model.predictions[0][0])
all_preds.append(np.array(preds))
len(all_preds[0]) , len(all_preds[1]) , len(all_preds[2]) , len(all_preds[3])
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
ks=[1, 3, 5, 7]
axes = [axs[0, 0], axs[0, 1], axs[1, 0], axs[1, 1]]
plotnames=["1 day ahead", "3 days ahead", "5 days ahead", "7 days ahead"]
for i in range(4):
axes[i].plot(UCSF_cen[startdays[i]+ks[i]-1:],'r',label = "True")
axes[i].plot(all_preds[i],'b',label= "Preds")
axes[i].set_title(plotnames[i])
axes[i].set_ylim([0, 60])
if k == 1:
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
axs[0, 0].legend(prop=fontP)
for ax in axs.flat:
ax.set(xlabel='# days', ylabel='# hospitalizations')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
fig.suptitle(str(k)+"-day ahead prediction for shared exponential predictor")
plt.savefig("UCSF_Hospitalizations_plots/imputed_"+str(k)+".jpg", dpi=150)
for i in range(4):
print(ks[i])
print("MAE: "+str(mean_absolute_error(all_preds[i], UCSF_cen[startdays[i]+ks[i]-1:])))
print("LMAE: "+str(mean_absolute_error(np.log(all_preds[i]),
np.log(UCSF_cen[startdays[i]+ks[i]-1:]))))
# Calculate df_CA
df_CA = df_county[df_county['StateName'].isin(['CA', "WA"])]
# Compute the Imputed Deaths
# lin_impu_future_predictions = []
# exp_impu_future_predictions = []
# shared_impu_exp_future_predictions = []
## Censoring the UCSF_data
UCSF_cen = UCSF_data[cen:]
imputed_deaths = []
## Q: Should beginning of the range be 0 or the start_day?
## Q: Should we include this k?
## A: I hate time series...
for i in range(1,UCSF_cen.shape[0]-k + 1):
df_CA_test = df_CA.copy(deep=True)
# Calculate the Imputed Deaths
for county in range(len(df_CA_test)):
# do we need to remove columns and truncate counts??
# truncate list counts: deaths, cases, neighbor_deaths, neighbor_cases
for col in ['deaths', 'cases', 'neighbor_deaths', 'neighbor_cases']:
# adding 1 to predict t
df_CA_test[col].iloc[county] = df_CA_test[col].iloc[county][:shifted_date + i]
tmp = fit_and_predict_ensemble(df_CA_test, target_day=np.array([k]))
sf_dop = tmp[tmp["CountyName"] == "San Francisco"]
imputed_deaths.append(np.array(sf_dop["predicted_deaths_ensemble_" + str(k)])[0][0])
imputed_deaths = np.array(imputed_deaths)
# Should be a difference of k
print(len(imputed_deaths), UCSF_cen.shape)
preds = []
for i in range(start_day,UCSF_cen.shape[0]-k + 1):
# Calculate the Imputed Deaths
tmp = UCSF_cen[:i] # day t-1
deaths = imputed_deaths[:i] # day t
assert len(tmp) == len(deaths)
d = {'Name':['UCSF'],'hospitalizations':[tmp], "imputed_deaths": [deaths]}
df_shared_UCSF = pd.DataFrame(data = d)
# Feature Transforms
feat_transforms = {}
feat_transforms["hospitalizations"] = [lambda x: np.log(x+1)]
feat_transforms["imputed_deaths"] = [lambda x: np.log(x + 1)]
# Auxiliary Time Variables
aux_vars = ["imputed_deaths"]
shared_model = SharedModel(df_shared_UCSF,mode = "predict_future",
outcome = "hospitalizations",
demographic_variables=[],
auxiliary_time_features=aux_vars,
target_days=np.array([k]),
feat_transforms=feat_transforms,
time_series_default_values = defaultdict(lambda: 0),
scale=False)
shared_model.create_dataset()
shared_model.fit_model()
shared_model.predict()
preds.append(shared_model.predictions[0][0])
imputed_mae = np.mean(abs(np.array(preds) - UCSF_cen[start_day+k-1:]))
print("Imputed MAE: "+str(imputed_mae))
imputed_lmae = np.mean(abs(np.log(np.array(preds)) - np.log(UCSF_cen[start_day+k-1:])))
print("Imputed Exp log MAE: "+str(sharexp_lmae))
plt.plot(UCSF_cen[start_day+k-1:],'r',label = "UCSF hospitalisation data")
plt.plot(np.array(preds),'b',label= "Shared Exponential w/ Imputed Deaths")
# plt.plot(shared_exp_future_predictions[cen:],'orange',label= "future shared exponential predictions")
plt.legend()
plt.title(str(k)+"-day ahead prediction for shared exponential model w/ Imputed Deaths ")
plt.plot(UCSF_cen[start_day+k-1:],'r',label = "UCSF hospitalisation data")
plt.plot(shared_exp_future_predictions[cen:],'orange',label= "future shared exponential predictions")
plt.title(str(k)+"-day ahead prediction for shared exponential model without Imputed Deaths ")
plt.legend()
```
## Ensemble
```
ensemble = []
for i in range(start_day,UCSF_data.shape[0]-k+1):
tmp = UCSF_data[:i]
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
ensemble_future_prediction = fit_and_predict_ensemble(df_shared_UCSF,target_day = np.array([k]),outcome = 'hospitalizations',
methods = [shared_exponential,linear],mode = 'predict_future', verbose = False)['predicted_hospitalizations_ensemble_'+str(k)].values[0][0]
ensemble.append(ensemble_future_prediction)
```
## Cumulative Sum
```
# This is a cumulative sum
UCSF_transf = np.cumsum(UCSF_data)
lin_cum_future_predictions = []
exp_cum_future_predictions = []
shared_cum_exp_future_predictions = []
for i in range(start_day,UCSF_transf.shape[0]-k + 1):
tmp = UCSF_transf[:i] # Should there be an i+1 for predict_future? No
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
shared_cum_future_pred = fit_and_predict_shared_exponential(df_shared_UCSF,mode = "predict_future",outcome = "hospitalizations",demographic_vars=[],target_day=np.array([k]), verbose=False)
shared_cum_exp_future_predictions.append(shared_future_pred[0][0])
tmp = tmp.reshape(1,tmp.shape[0])
lin_cum_pred_future = linear_fit(tmp,'predict_future',target_day=np.array([k]))
exp_cum_pred_future = exponential_fit(tmp,'predict_future',target_day=np.array([k]))
lin_cum_future_predictions.append(lin_cum_pred_future[0][0])
exp_cum_future_predictions.append(exp_cum_pred_future[0][0])
cum_ensemble = []
for i in range(start_day,UCSF_data.shape[0]-k+1):
tmp = UCSF_transf[:i]
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
ensemble_future_prediction = fit_and_predict_ensemble(df_shared_UCSF,target_day = np.array([k]),outcome = 'hospitalizations',
methods = [shared_exponential,linear],mode = 'predict_future', verbose = False)['predicted_hospitalizations_ensemble_'+str(k)].values[0][0]
cum_ensemble.append(ensemble_future_prediction)
plt.plot(UCSF_transf[start_day+k-1:],'r',label = "UCSF_hospitalisation_data")
plt.plot(cum_ensemble,'orange',label= "Exp Predictor for Cumulative sum")
plt.legend()
plt.title(str(k)+"-day ahead prediction for exp predictor for cumulative sum")
#x = np.array(exp_cum_future_predictions)
x = np.array(cum_ensemble)
x[1:] -= x[:-1].copy()
plt.plot(UCSF_data[start_day+k:],'r',label = "UCSF hospitalisation data")
plt.plot(x[1:],'orange',label= "Exp predictior based on cumulative sum")
plt.legend()
plt.title(str(k)+"-day ahead prediction for exp predictor based on cumulative sum")
cumexp_mae = mean_absolute_error(x, UCSF_data[start_day+k-1:])
print("Cumulative Shared Exp + log MAE: "+str(cumexp_mae))
cumexp_lmae = mean_absolute_error(np.log(x), np.log(UCSF_data[start_day+k-1:]))
print("Cumulative Shared Exp log MAE: "+str(cumexp_lmae))
plt.plot(UCSF_transf[start_day+k-1:],'r',label = "True")
plt.plot(lin_cum_future_predictions,'orange',label= "Linear preds cum sum")
plt.legend()
plt.title(str(k)+"-day ahead prediction for linear predictor for cumulative sum")
plt.savefig("UCSF_Hospitalizations_plots/linear_fullcum_"+str(k)+".jpg", dpi=150)
x = np.array(lin_cum_future_predictions)
x[1:] -= x[:-1].copy()
lin_cum_future_predictions
lin_preds_undone = [lin_cum_future_predictions[0]]
for i in range(1, len(lin_cum_future_predictions)):
lin_preds_undone.append(lin_cum_future_predictions[i]-lin_cum_future_predictions[i-1])
lin_preds_undone
len(UCSF_data[start_day+k:])
len(x[1:])
len(lin_future_predictions)
lenx
plt.plot(UCSF_data[start_day+k-1:],'r',label = "True")
plt.plot(lin_preds_undone,'orange',label= "Linear preds cum sum")
plt.plot(lin_future_predictions,'b',label= "Linear preds")
plt.legend()
plt.title(str(k)+"-day ahead prediction for linear predictor based on cumulative sum")
plt.savefig("UCSF_Hospitalizations_plots/linear_cum_"+str(k)+".jpg", dpi=150)
plt.plot(UCSF_data[start_day+k-1+1:],'r',label = "True")
plt.plot(lin_preds_undone[1:],'orange',label= "Linear preds cum sum")
plt.plot(lin_future_predictions[1:],'b',label= "Linear preds")
plt.legend()
plt.title(str(k)+"-day ahead prediction for linear predictor based on cumulative sum")
plt.savefig("UCSF_Hospitalizations_plots/linear_cumcut_"+str(k)+".jpg", dpi=150)
cumlin_mae = np.mean(abs(x - UCSF_data[start_day+k-1:]))
print("Shared Lin log MAE: "+str(cumlin_mae))
cumlin_lmae = np.mean(abs(np.log(x) - np.log(UCSF_data[start_day+k-1:])))
print("Shared Lin log MAE: "+str(cumlin_lmae))
cumlin_mae = np.mean(abs(lin_future_predictions[1:] - UCSF_data[start_day+k-1+1:]))
print("Shared Lin log MAE: "+str(cumlin_mae))
cumlin_lmae = np.mean(abs(np.log(lin_future_predictions[1:]) - np.log(UCSF_data[start_day+k-1+1:])))
print("Shared Lin log MAE: "+str(cumlin_lmae))
cumlin_mae = np.mean(abs(x[1:] - UCSF_data[start_day+k-1+1:]))
print("Shared Lin log MAE: "+str(cumlin_mae))
cumlin_lmae = np.mean(abs(np.log(x[1:]) - np.log(UCSF_data[start_day+k-1+1:])))
print("Shared Lin log MAE: "+str(cumlin_lmae))
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle('Horizontally stacked subplots')
ax1.plot(UCSF_data[start_day+k:],'r',label = "UCSF hospitalisation data")
ax1.plot(x[1:],'b',label= "Linear predictior based on cumulative sum")
# plt.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
# plt.plot(lin_future_predictions,'b',label= "Linear predictions")
# plt.legend()
# plt.xlabel("# days since 3/28/20")
# plt.ylabel("# people hospitalized at UCSF")
# plt.title(str(k)+"-day ahead prediction for linear predictor")
# plt.savefig("UCSF_Hospitalizations_plots/linear_"+str(k)+".jpg")
ax2.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalization data")
ax2.plot(lin_future_predictions,'b',label= "Linear predictions")
plt.plot(UCSF_data[start_day+k-1:],'r',label = "UCSF hospitalisation data")
plt.plot(lin_future_predictions[1:],'b',label= "Linear predictior")
plt.legend()
plt.title(str(k)+"-day ahead prediction for linear predictor")
```
## Ensemble Modeling
```
ensemble = []
for i in range(start_day,UCSF_data.shape[0]-k+1):
tmp = UCSF_data[:i]
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
ensemble_prediction = fit_and_predict_ensemble(df_shared_UCSF,target_day = np.array([k]),outcome = 'hospitalizations',
methods = [exponential,linear],mode = 'predict_future', verbose = False)['predicted_hospitalizations_ensemble_1'].values[0][0]
ensemble.append(ensemble_prediction)
print(ensemble)
#ensemble_prediction.head()
ensemble_prediction
```
## MEPI
```
cut_data = UCSF_data
d = {'HospitalName':['UCSF'],'deaths':[cut_data], 'cases':[cut_data]}
tmp_df = pd.DataFrame(data = d)
ensemble_prediction
ensemble = []
for i in range(start_day,UCSF_data.shape[0]-k+1):
tmp = UCSF_data[:i]
d = {'Name':['UCSF'],'hospitalizations':[tmp]}
df_shared_UCSF = pd.DataFrame(data = d)
ensemble_prediction = fit_and_predict_ensemble(df_shared_UCSF,target_day = np.array([k]),outcome = 'hospitalizations',
methods = [exponential,linear],mode = 'predict_future', verbose = False)['predicted_hospitalizations_ensemble_3'].values[0][0]
ensemble.append(ensemble_prediction)
print(ensemble)
#ensemble_prediction.head()
```
| github_jupyter |
```
import os
import numpy as np
import cv2
from matplotlib import pyplot as plt
folder = "dataset/"
haarcascade_frontalface_default_path = "haarcascade_frontalface_default.xml"
haarcascade_eye_path = "haarcascade_eye.xml"
face_cascade = cv2.CascadeClassifier(haarcascade_frontalface_default_path)
eye_cascade = cv2.CascadeClassifier(haarcascade_eye_path)
```
# Créer un nouveau dataset
Plan :
- Créer le répertoire si la personne n'existe pas dans les datasets. Sinon, compter le nombre de fichiers pour ne pas écraser de photos déjà présentes.
- Récupérer la vidéo de la caméra.
- Sauvegarder une image sur 5 pour éviter d'avoir des images trop similaires.
```
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
person_name = "test"
path = folder + "/" + person_name
if not os.path.exists(path):
os.makedirs(path)
image_label = 0
else:
image_label = len(
[
f for f in os.listdir(path)
if f.endswith('.jpg')
and os.path.isfile(os.path.join(path, f))
]
) + 1
ret = True
current_iteration = 0
while ret == True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect face in the picture in grayscale
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for x, y, w, h in faces:
roi_color = img[y:y+h, x:x+w]
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Save 1 frame for 5 taken. This will constitute the dataset
if current_iteration%5 == 0:
export_image = cv2.resize(roi_color, (250, 250))
filename = "{}/{}/{}_{}.jpg".format(
folder, person_name, person_name, image_label
)
cv2.imwrite(filename, export_image)
image_label += 1
current_iteration += 1
cv2.imshow('Save new dataset', img)
if cv2.waitKey(1) == 27:
ret = False
break
cap.release()
cv2.destroyAllWindows()
```
# Entraînement
Plan :
- Charger le dataset
- Entraîner le modèle d'OpenCv : face.LBPHFaceRecognizer
- Récupérer la vidéo de la webcam
- Faire coïncider les noms des personnes et des visages.
```
#Loading datasets
faces = []
labels = []
labels_string = []
current_label = 0
# Load every image in the dataset folder. For each picture, theirs parents directory
# will be the label of the image.
for element in os.listdir(folder):
path = folder + element + "/"
if os.path.isdir(path):
for picture in os.listdir(path):
labels.append(current_label)
faces.append(
cv2.imread(path + picture, cv2.IMREAD_GRAYSCALE)
)
labels_string.append(element)
current_label += 1
```
### Entraînement du modèle d'OpenCv
```
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces, np.array(labels))
```
### Récupérer la vidéo et mettre des noms sur des visages
```
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
ret = True
while ret == True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Face detection
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for x, y, w, h in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y+4:y+h-4, x+4:x+w-4]
eyes = eye_cascade.detectMultiScale(roi_gray)
for ex, ey, ew, eh in eyes:
cv2.rectangle(
roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2
)
# Prediction
label = face_recognizer.predict(roi_gray)
# The label is currently a number. Here's the match between numbers and string names
label = labels_string[label[0]]
# Put the label on the picture. Origin is the top left corner of the face rectangle.
img = cv2.putText(img, label, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0))
cv2.imshow('Affichage Resultats', img)
c = cv2.waitKey(1)
if c == 27:
ret = False
break
cap.release()
cv2.destroyAllWindows()
```
| github_jupyter |
# <center>Big Data for Engineers – Exercises</center>
## <center>Spring 2019 – Week 9 – ETH Zurich</center>
## <center>Spark + MongoDB</center>
# 1. Spark DataFrames + SQL
## 1.1 Setup the Spark cluster on Azure
### Create a cluster
- Sign into the azure portal (portal.azure.com).
- Search for "HDInsight clusters" using the search box at the top.
- Click on "+ Add".
- Give the cluster a unique name.
- In the "Select Cluster Type" choose **Spark** and a standard Cluster Tier (Finish with pressing "select").
- In step 2, the container name will be filled in for you automatically. If you want to do the exercise sheet in several sittings, change it to something you can remember or write it down.
- Set up a Spark cluster with default configuration. It should cost something around 3.68 sFR/h.
- Wait for 20 mins so that your cluster is ready.
**Important**
Remember to **delete** the cluster once you are done. If you want to stop doing the exercises at any point, delete it and recreate it using the same container name as you used the first time, so that the resources are still there.

### Access your cluster
Make sure you can access your cluster (the NameNode) via SSH:
```
$ ssh <ssh_user_name>@<cluster_name>-ssh.azurehdinsight.net
```
If you are using Linux or MacOSX, you can use your standard terminal.
If you are using Windows you can use:
- Putty SSH Client and PSCP tool (get them at [here](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html)).
- This Notebook server terminal (Click on the Jupyter logo and the goto New -> Terminal).
- Azure Cloud Terminal (see the HBase exercise sheet for details)
The cluster has its own Jupyter server. We will use it. You can access it through the following link:
```
https://<cluster_name>.azurehdinsight.net/jupyter
```
You can access cluster's YARN in your browser
```
https://<cluster_name>.azurehdinsight.net/yarnui/hn/cluster
```
The Spark UI can be accessed via Azure Portal, see [Spark job debugging](https://docs.microsoft.com/en-us/azure/hdinsight/hdinsight-apache-spark-job-debugging)
# <span style="color:red">You need to upload this notebook to your cluster's Jupyter in order to execute Python code blocks.</span>
To do this, just open the Jupyter through the link given above and use the "Upload" button.

### 1.2. The Great Language Game
This week you will be using again the [language confusion dataset](http://lars.yencken.org/datasets/languagegame/). You will write queries with Spark DataFrames and SQL. You will have to submit the results of this exercise to Moodle to obtain the weekly bonus. You will need three things:
- The query you wrote
- Something related to its output (which you will be graded on)
- The time it took you to write it
- The time it took you to run it
As you might have observed in the sample queries above, the time a job took to run is displayed on the rightmost column of its ouptut. If it consists of several stages, however, you will need the sum of them. The easiest thing is if you just take the execution time of the whole query:

Of course, you will not be evaluated on the time it took you to write the queries (nor on the time it took them to run), but this is useful to us in order to measure the increase in performance when using Sparksoniq. There is a cell that outputs the time you started working before every query. Use this if you find it useful.
***For this exercise, we strongly suggest that you use the Azure cluster as described above.***
Log in to your cluster using SSH as explained above and run the following commands:
```
wget http://data.greatlanguagegame.com.s3.amazonaws.com/confusion-2014-03-02.tbz2
tar -jxvf confusion-2014-03-02.tbz2 -C /tmp
hdfs dfs -copyFromLocal /tmp/confusion-2014-03-02/confusion-2014-03-02.json /confusion.json
```
This dowloads the archive file to the cluster, decompresses it and uploads it to HDFS when using a cluster. Now, create an RDD from the file containing the entries:
```
data = sc.textFile('wasb:///confusion.json')
```
Last week you loaded the json data with the following snippet:
```
import json
entries = data.map(json.loads)
type(entries)
```
This week you will use DataFrames:
```
entries_df = spark.read.json(data).cache()
type(entries_df)
```
You can check the schema by executing the following code:
```
entries_df.printSchema()
```
You can place the data to a *temporary* table with the following code:
```
entries_df.registerTempTable("entries")
```
Now, you can use normal SQL, with sql magic (%%sql), to perform queries on the table **entries**. For example:
```
%%sql
SELECT *
FROM entries
WHERE country == "CH"
```
Good! Let's get to work. A few last things:
- This week, you should not have issues with the output being too long, since sql magic limits its size automatically.
- Remember to delete the cluster if you want to stop working! You can recreate it using the same container name and your resources will still be there.
And now to the actual queries:
1\. Find all games such that the guessed language is correct (=target), and such that this language is Russian.
```
from datetime import datetime
# Started working:
print(datetime.now().time())
%%sql
```
2\. List all chosen answers to games where the guessed language is correct (=target).
```
# Started working:
print(datetime.now().time())
%%sql
```
3\. Find all distinct values of languages (the target field).
```
# Started working:
print(datetime.now().time())
%%sql
```
4\. Return the top three games where the guessed language is correct (=target) ordered by language (ascending), then country (ascending), then date (ascending).
```
# Started working:
print(datetime.now().time())
%%sql
```
5\. Aggregate all games by country and target language, counting the number of guesses for each pair (country, target).
```
# Started working:
print(datetime.now().time())
%%sql
```
6\. Find the overall percentage of correct guesses when the first answer (amongst the array of possible answers) was the correct one.
```
# Started working:
print(datetime.now().time())
%%sql
```
7\. Sort the languages by increasing overall percentage of correct guesses.
```
# Started working:
print(datetime.now().time())
%%sql
```
8\. Group the games by the index of the correct answer in the choices array and output all counts.
The following code snippet will create a user-defined SQL function, which you can use in your SQL queries.
You may call it in your queries as `array_position(x, y)`, where `x` is an array (for example an entry for the column `choices`) and `y` is some data that the position/index of which you want to find in the array.
```
spark.udf.register("array_position", lambda x,y: x.index(y))
# Started working:
print(datetime.now().time())
%%sql
```
9\. What is the language of the sample that has the highest successful guess rate?
```
# Started working:
print(datetime.now().time())
%%sql
```
10\. Return all games played on the latest day.
```
# Started working:
print(datetime.now().time())
%%sql
```
## 2. Document stores
A record in document store is a *document*. Document encoding schemes include XML, YAML, JSON, and BSON, as well as binary forms like PDF and Microsoft Office documents (MS Word, Excel, and so on). MongoDB documents are similar to JSON objects. Documents are composed of field-value pairs and have the following structure:

The values of fields may include other documents, arrays, and arrays of documents. Data in MongoDB has a flexible schema in the same collection. All documents do not need to have the same set of fields or structure, and common fields in a collection's documents may hold different types of data.
### 2.1 General Questions
1. What are advantages of document stores over relational databases?
2. Can the data in document stores be normalized?
3. How does denormalization affect performance?
### 2.2 True/False Questions
Say if the following statements are *true* or *false*.
1. Document stores expose only a key-value interface.
2. Different relationships between data can be represented by references and embedded documents.
3. MongoDB does not support schema validation.
4. MongoDB stores documents in the XML format.
5. In document stores, you must determine and declare a table's schema before inserting data.
6. MongoDB performance degrades when the number of documents increases.
7. Document stores are column stores with flexible schema.
8. There are no joins in MongoDB.
## 3. MongoDB
### 3.1 Setup MongoDB
- Navigate to https://www.mongodb.com/download-center and register:

- Create a free-tier cluster. You are free to use any cloud provider and region of your preference. For example, you could choose AWS and Frankfurt:

- Create a database user:

- Whitelist your IP address (or all IP addresses if using the Azure notebook service):

- Create a `test` database with a `restaurants` collection:


- Install pymongo and dnspython:
```
!pip install pymongo[tls]
!pip install dnspython
```
- To connect to the database, copy the connection string for your version of python by navigating on the MongoDB website, as show in the following pictures:




```
import pymongo
import dns
from pprint import pprint
import urllib
import json
import dateutil
from datetime import datetime, timezone, timedelta
```
If you get an error when importing one of the modules above, then try:
```!pip install <module_name>```
```
client = pymongo.MongoClient("mongodb+srv://<username>:<password>@<cluster_address>/test?retryWrites=true")
db = client.test
```
- Import the `restaurants` dataset:
```
file = urllib.request.urlopen("https://raw.githubusercontent.com/mongodb/docs-assets/primer-dataset/primer-dataset.json")
file_data = []
for line in file:
record = json.loads(line)
grades = record['grades']
new_grades = []
for g in grades:
new_g = {}
for k, v in g.items():
if k == 'date':
new_g['date'] = datetime(1970, 1, 1, tzinfo=timezone.utc) + timedelta(milliseconds=v['$date'])
else:
new_g[k] = v
new_grades.append(new_g)
record['grades'] = new_grades
file_data.append(record)
db.restaurants.insert_many(file_data)
```
- Try to insert a document into the ```restaurants``` collection. In addition, you can see the structure of documents the in the collection:
```
db.restaurants.insert_one(
{
"address" : {
"street" : "2 Avenue",
"zipcode" : "10075",
"building" : "1480",
"coord" : [ -73.9557413, 40.7720266 ]
},
"borough" : "Manhattan",
"cuisine" : "Italian",
"grades" : [
{
"date" : dateutil.parser.parse("2014-10-01T00:00:00Z"),
"grade" : "A",
"score" : 11
},
{
"date" : dateutil.parser.parse("2014-01-16T00:00:00Z"),
"grade" : "A",
"score" : 17
}
],
"name" : "Vella",
"restaurant_id" : "41704620"
}
)
```
Query all documents in a collection:
```json
db.restaurants.find()
```
Query one document in a collection:
```json
db.restaurants.find_one()
```
To format the result, you can use ```pprint```, as in the following:
```json
for doc in db.restaurants.find().limit(3):
pprint(doc)
```
### Query Documents
For the ```db.collection.find()``` method, you can specify the following optional fields:
- a query filter to specify which documents to return,
- a query projection to specifies which fields from the matching documents to return (the projection limits the amount of data that MongoDB returns to the client over the network),
- optionally, a cursor modifier to impose limits, skips, and sort orders.
### 3.4 Questions
Write queries in MongoDB that return the following:
1. All restaurants in borough (a town) "Brooklyn" and cuisine (a style of cooking) "Hamburgers".
2. The number of restaurants in the borough "Brooklyn" and cuisine "Hamburgers".
3. All restaurants with zipcode 11225.
4. Names of restaurants with zipcode 11225 that have at least one grade "C".
5. Names of restaurants with zipcode 11225 that have as first grade "C" and as second grade "A".
6. Names and streets of restaurants that don't have an "A" grade.
7. All restaurants with a grade C and a score greater than 50.
8. All restaurants with a grade C or a score greater than 50.
9. All restaurants that have only A grades.
You can read more about MongoDB here:
https://docs.mongodb.com/getting-started/shell/query/
## 4. Indexing in MongoDB
Indexes support the efficient resolution of queries. Without indexes, MongoDB must scan every document of a collection to select those documents that match the query statement. Scans can be highly inefficient and require MongoDB to process a large volume of data.
Indexes are special data structures that store a small portion of the data set in an easy-to-traverse form. The index stores the value of a specific field or set of fields, ordered by the value of the field as specified in the index.
MongoDB supports indexes that contain either a single field or multiple fields depending on the operations that this index type supports.
By default, MongoDB creates the ```_id``` index, which is an ascending unique index on the ```_id``` field, for all collections when the collection is created. You cannot remove the index on the ```_id``` field.
### Managing indexes in MongoDB
An ```explain()``` operator provides information on the query plan. It returns a document that describes the process and indexes used to return the query. This may provide useful insight when attempting to optimize a query.
```json
db.restaurants.find({"borough" : "Brooklyn"}).explain()
```
You can create an index by calling the ```createIndex()``` method.
```json
db.restaurants.create_index([("borough", 1)])
```
Now, you retrieve a new query plan for indexed data.
```json
db.restaurants.find({"borough" : "Brooklyn"}).explain()
```
The value of the field in the index specification describes the kind of index for that field. For example, a value of 1 specifies an index that orders items in ascending order. A value of -1 specifies an index that orders items in descending order.
To remove all indexes, you can use ```db.collection.drop_indexes()```. To remove a specific index you can use ```db.collection.drop_index()```, such as ```db.restaurants.drop_index([("borough", 1)])```.
### 4.1 Questions
**Please answer questions 1 and 2 in Moodle**
1) Which queries will use the following index:
```json
db.restaurants.create_index([("borough", 1)])
```
A. ```db.restaurants.find({"addresses.city" : "Boston"})```
B. ```db.restaurants.find({}, {"borough" : 1})```
C. ```db.restaurants.find().sort([("borough", 1)])```
D. ```db.restaurants.find({"cuisine" : "Italian" }, {"borough" : 1})```
2) Which queries will use the following index:
```json
db.restaurants.create_index([("address", -1)])
```
A. ```db.restaurants.find({"address.zipcode" : "11225"})```
B. ```db.restaurants.find({"addresses.city" : "Boston"})```
C. ```db.restaurants.find({"addresses.city" : "Boston"}, {"address" : 1 })```
D. ```db.restaurants.find({"address" : 1 })```
3) Write a command for creating an index on the "zipcode" field.
4) Write an index to speed up the following query:
```json
db.restaurants.find({"grades.grade" : { "$ne" : "A"}}, {"name" : 1 , "address.street": 1})
```
5) Write an index to speed up the following query:
```json
db.restaurants.find({"grades.score" : {"$gt" : 50}, "grades.grade" : "C"})
```
| github_jupyter |
# L05 - Bonus Notebook: Working with Heterogenous Datasets
- Instructor: Dalcimar Casanova (dalcimar@gmail.com)
- Course website: https://www.dalcimar.com/disciplinas/aprendizado-de-maquina
- Bibliography: based on lectures of Dr. Sebastian Raschka
- Course website: http://pages.stat.wisc.edu/~sraschka/teaching/
```
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
```
- Suppose you have a dataset that has both numerical and categorical features as follows:
```
from google.colab import drive
drive.mount('/content/drive')
df = pd.read_csv('/content/drive/My Drive/Disciplinas/Aprendizado de Máquina/Public/L05_preprocessing-and-sklearn/code/data/iris_mod.csv', index_col='Id')
df.head()
```
- As usual, we first tranform the class labels into an integer format:
```
X = df.drop('Species', axis=1)
y = df['Species']
label_dict = {'Iris-setosa': 0,
'Iris-versicolor': 1,
'Iris-virginica': 2}
y = y.map(label_dict)
```
- Next, we are going to set up a `Pipeline` that performs certain preprocessing steps only on the numerical features:
```
numeric_features = ['SepalLength[cm]', 'SepalWidth[cm]', 'PetalLength[cm]', 'PetalWidth[cm]']
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler()),
('feature_extraction', PCA(n_components=2))])
```
- Above, we weren't interested in performing these preprocessing steps on the categorical feature(s); instead, we apply **different** preprocessing steps to the categorical variable like so:
```
categorical_features = ['Color_IMadeThisUp']
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder())])
```
- Scikit-learn's `ColumnTransformer` now allows us to merge these 2 seperate preprocessing pipelines, which operate on different feature sets in our dataset:
```
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
```
- As a result, we get a 5 dimensional feature array (design matrix) if we apply this preprocessor. What are these 5 columns?
```
temp = preprocessor.fit_transform(X)
temp.shape
temp[:5]
```
- The preprocessor can now also be conveniently be used in a Scikit-learn pipeline as shown below:
```
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=0)
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', KNeighborsClassifier(p=3))])
clf.fit(X_train, y_train)
print(f'Test accuracy: {clf.score(X_test, y_test)*100}%')
```
| github_jupyter |
```
import matplotlib.pyplot
import numpy.random
import torch.utils.data
import torchvision
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
```
# Tutorial 1a. Logistic Regression
In the first tutorial, we are going to train a logistic regressor on the MNIST dataset of handwritten digits. Next, we will turn this logistic regressor into a non-linear convolutional network.
```
if torch.cuda.is_available():
DEVICE = "cuda:0"
else:
DEVICE = "cpu"
```
## Loading Datasets
The following code will load the MNIST dataset. Run it and inspect some of the images and their labels to confirm they are correct.
```
train_dataloader = DataLoader(
dataset=MNIST(
root="/tmp/mnist",
train=True,
transform=ToTensor(),
download=True,
),
batch_size=64,
shuffle=True,
)
test_dataloader = DataLoader(
dataset=MNIST(
root="/tmp/mnist",
train=False,
transform=ToTensor(),
download=True,
),
batch_size=64,
shuffle=False,
)
image, target = [*test_dataloader][0]
random_index = numpy.random.randint(0, 64)
image, target = image[random_index, 0], target[random_index]
matplotlib.pyplot.imshow(
image,
cmap="gray",
interpolation="nearest",
)
matplotlib.pyplot.title(f"target = {target}")
matplotlib.pyplot.axis("off")
```
Next, we create a PyTorch dataloader for the MNIST dataset.
Next, implement a logistic regression model in PyTorch. Note that a logistic regressor uses a linear transformation of the input.
## Exercise 1. Logistic Regression Module
```
# class LogisticRegression(Module):
# def __init__(self, input_size: int, num_classes: int):
# super().__init__()
#
# ###########################################################################
# # TODO: Instantiate the layer here. #
# ###########################################################################
#
# def forward(self, x: Tensor) -> Tensor:
# ###########################################################################
# # TODO: Apply the layer to the input. #
# ###########################################################################
# pass
module = LogisticRegression(28 * 28, 10)
module = module.to(device=DEVICE)
###########################################################################
# TODO: Create criterion and optimizer here. #
###########################################################################
criterion =
optimizer =
```
## Training
We will use the following generic training loop for a PyTorch model.
```
# Train the model. If everything is correct, the loss should go below 0.45.
EPOCHS = 5
# Exponential moving average of the loss:
ema = None
for epoch in range(EPOCHS):
for batch_index, (train_images, train_targets) in enumerate(train_dataloader):
train_images = train_images.view(-1, 28 * 28).requires_grad_().to(device=DEVICE)
test_targets = train_targets.to(device=DEVICE)
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = module(train_images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, train_targets)
# Getting gradients w.r.t. parameters
loss.backward()
# Updates parameters:
optimizer.step()
# NOTE: It is important to call .item() on the loss before summing.
if ema is None:
ema = loss.item()
else:
ema += (loss.item() - ema) * 0.01
if batch_index % 500 == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_index * len(train_images),
len(train_dataloader.dataset),
100.0 * batch_index / len(train_dataloader),
ema,
),
)
```
**Question:** For the model you are currently using, is there any difference between using the model in `train` mode or using it in `eval` mode?
Create an SGD optimizer and us it to train the logistic regressor on the MNIST training data for a few epochs. What loss function do you need to use?
### Embeddings
Visualize the weights of the trained model. What do you see? Why?
```
assert module.y.weight.shape == (10, 28 * 28)
matplotlib.pyplot.imshow(
numpy.transpose(
torchvision.utils.make_grid(
module.y.weight.view(10, 1, 28, 28),
normalize=True,
nrow=5,
),
(1, 2, 0),
),
interpolation="nearest",
)
matplotlib.pyplot.grid(False)
matplotlib.pyplot.gca().axis("off")
```
## Evaluation
Use the following function to measure the test accuracy of your trained model.
```
correct_predictions = 0
predictions = 0
# Iterate through test dataset
for test_images, test_targets in test_dataloader:
test_images = test_images.view(-1, 28 * 28).to(DEVICE)
# Forward pass only to get logits/output
outputs = module(test_images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
predictions += test_targets.size(0)
if torch.cuda.is_available():
correct_predictions += (predicted.cpu() == test_targets.cpu()).sum()
else:
correct_predictions += (predicted == test_targets).sum()
correct_predictions.item() / predictions
```
| github_jupyter |
# Visualizing results
Here, we will show some typical examples of visualizations that are used often to show results in ML studies in materials science.
We will use the open-source [`ML_figures` package](https://github.com/kaaiian/ML_figures) and the example data provided by the package to generate these figures.
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format='retina'
# Import the ML_figures package and the figure-plotting functions
from ML_figures.figures import act_pred
from ML_figures.figures import residual, residual_hist
from ML_figures.figures import loss_curve
from ML_figures.figures import element_prevalence
```
## Predicted vs. actual value plots
These plots, you have already seen before in the previous notebooks.
```
# Read in example act vs. pred data
df_act_pred = pd.read_csv('ML_figures/example_data/act_pred.csv')
y_act, y_pred = df_act_pred.iloc[:, 1], df_act_pred.iloc[:, 2]
act_pred(y_act, y_pred,
reg_line=True,
save_dir='ML_figures/example_figures')
act_pred(y_act, y_pred,
name='example_no_hist',
x_hist=False, y_hist=False,
reg_line=True,
save_dir='ML_figures/example_figures')
```
## Residual error plots
Residual error plots show how far your model's predictions deviate from the actual values.
They are using the same data used in the predicted vs. actual plots; however, instead of plotting predicted vs. actual values, residual error plots plot (predicted - actual) vs. actual values.
This lets you visually analyze your model's prediction error on a straight horizontal line.
Alternatively, you can plot the residual errors on a histogram, and optionally with a kernel density estimation (kde).
```
residual(y_act, y_pred,
save_dir='ML_figures/example_figures')
residual_hist(y_act, y_pred,
save_dir='ML_figures/example_figures')
```
## Loss curves
Loss curves show the loss of a neural network model vs. epoch throughout the training process.
It is typically evaluated using the training and validation dataset at the end of each epoch (or every $n$ epochs, where $n$ is a small number, if evaluating every epoch takes too many resources).
Typically, loss curves plot the model performance (such as $r^2$ score) or loss (such as $\textrm{MAE}$) against epoch.
```
# Read in loss curve data
df_lc = pd.read_csv('ML_figures/example_data/training_progress.csv')
epoch = df_lc['epoch']
train_err, val_err = df_lc['mae_train'], df_lc['mae_val']
loss_curve(epoch, train_err, val_err,
save_dir='ML_figures/example_figures')
```
## Visualizing elemental prevalence
Depending on your dataset, what you are studying, and how the compounds/constituent elements of the compounds in the dataset are distributed, it may be useful to visualize the elemental prevalence in your dataset.
These figures let you visualize the relative amount of certain elements vs. other elements present in your dataset, and can help you in identifying dataset biases, imbalanced datasets, or other issues.
```
# Visualize element prevalence
formula = df_act_pred.iloc[:, 0]
element_prevalence(formula,
save_dir='ML_figures/example_figures',
log_scale=False)
element_prevalence(formula,
save_dir='ML_figures/example_figures',
name='example_log',
log_scale=True)
plt.rcParams.update({'font.size': 12})
element_prevalence(formula,
save_dir='ML_figures/example_figures',
ptable_fig=False,
log_scale=False)
element_prevalence(formula,
save_dir='ML_figures/example_figures',
name='example_log',
ptable_fig=False,
log_scale=True)
```
| github_jupyter |
# Transfer Learning
In this notebook, you will perform transfer learning to train CIFAR-10 dataset on ResNet50 model available in Keras.
## Imports
```
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
```
## Parameters
- Define the batch size
- Define the class (category) names
```
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
```
Define some functions that will help you to create some visualizations. (These will be used later)
```
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
```
## Loading and Preprocessing Data
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
```
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
```
### Visualize Dataset
Use the `display_image` to view some of the images and their class labels.
```
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
```
### Preprocess Dataset
Here, you'll perform normalization on images in training and validation set.
- You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
```
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
```
## Define the Network
You will be performing transfer learning on **ResNet50** available in Keras.
- You'll load pre-trained **imagenet weights** to the model.
- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
```
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
```
## Train the model
```
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
```
## Evaluate the Model
Calculate the loss and accuracy metrics using the model's `.evaluate` function.
```
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
```
### Plot Loss and Accuracy Curves
Plot the loss (in blue) and validation loss (in green).
```
plot_metrics("loss", "Loss")
```
Plot the training accuracy (blue) as well as the validation accuracy (green).
```
plot_metrics("accuracy", "Accuracy")
```
### Visualize predictions
You can take a look at the predictions on the validation set.
```
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.")
```
| github_jupyter |
## Aula 1
```
import pandas as pd
dados_portugues = pd.read_csv("data_set/stackoverflow_portugues.csv")
dados_portugues.head()
questao_portugues = dados_portugues.Questão[5]
print(questao_portugues)
```
## Aula 2
```
dados_ingles = pd.read_csv("data_set/stackoverflow_ingles.csv")
dados_ingles.head()
questao_ingles = dados_ingles.Questão[0]
print(questao_ingles)
dados_espanhol = pd.read_csv("data_set/stackoverflow_espanhol.csv")
dados_espanhol.head()
questao_espanhol = dados_espanhol.Questão[0]
print(questao_espanhol)
import re
re.findall(r"<.*?>",questao_portugues)
print(questao_portugues)
texto_teste = re.sub(r"<.*?>"," T----E----S----T----E ",questao_portugues)
print(texto_teste)
re.search(r"70","18728736187263817628631872638716283670")
regex = re.compile(r"70")
regex.search("18728736187263817628631872638716283670")
from timeit import timeit
setup = """import re"""
timeit("""re.search(r"70","18728736187263817628631872638716283670")""",
setup)
setup = """import re
regex = re.compile(r"70")"""
timeit("""regex.search("18728736187263817628631872638716283670")""",
setup)
```
## Aula 3
```
def remover(textos, regex):
if type(textos) == str:
return regex.sub("", textos)
else:
return [regex.sub("", texto) for texto in textos]
regex_html = re.compile(r"<.*?>")
questao_sem_tag = remover(questao_ingles, regex_html)
print(questao_sem_tag)
print(questao_ingles)
def substituir_codigo(textos, regex):
if type(textos) == str:
return regex.sub("CODE", textos)
else:
return [regex.sub("CODE", texto) for texto in textos]
questao_ingles
regex_codigo = re.compile(r"<code>(.|(\n))*?</code>")
questao_sem_code = substituir_codigo(questao_espanhol,regex_codigo)
print(questao_sem_code)
dados_portugues.head()
questoes_port_sem_code = substituir_codigo(dados_portugues.Questão,
regex_codigo)
questoes_port_sem_code_tag = remover(questoes_port_sem_code, regex_html)
dados_portugues["sem_code_tag"] = questoes_port_sem_code_tag
dados_portugues.head()
questoes_ing_sem_code = substituir_codigo(dados_ingles.Questão,
regex_codigo)
questoes_ing_sem_code_tag = remover(questoes_ing_sem_code, regex_html)
dados_ingles["sem_code_tag"] = questoes_ing_sem_code_tag
dados_ingles.head()
questoes_esp_sem_code = substituir_codigo(dados_espanhol.Questão,
regex_codigo)
questoes_esp_sem_code_tag = remover(questoes_esp_sem_code, regex_html)
dados_espanhol["sem_code_tag"] = questoes_esp_sem_code_tag
dados_espanhol.head()
print(dados_espanhol.sem_code_tag[0])
```
## Aula4
```
regex_pontuacao = re.compile(r"[^\w\s]")
print(remover(questoes_esp_sem_code_tag[0], regex_pontuacao))
def minusculo(textos):
if type(textos) == str:
return textos.lower()
else:
return [texto.lower() for texto in textos]
print(minusculo(questoes_esp_sem_code_tag[0]))
regex_digitos = re.compile(r"\d+")
print(remover("Alura \n 1234 Caelum 1234", regex_digitos))
regex_espaco = re.compile(r" +")
regex_quebra_linha = re.compile(r"(\n)")
def substituir_por_espaco(textos, regex):
if type(textos) == str:
return regex.sub(" ", textos)
else:
return [regex.sub(" ", texto) for texto in textos]
print(substituir_por_espaco("Alura \n \n Caleum", regex_quebra_linha))
questoes_port_sem_pont = remover(dados_portugues.sem_code_tag,
regex_pontuacao)
questoes_port_sem_pont_minus = minusculo(questoes_port_sem_pont)
questoes_port_sem_pont_minus_dig = remover(questoes_port_sem_pont_minus,
regex_digitos)
questoes_port_sem_quebra_linha = substituir_por_espaco(questoes_port_sem_pont_minus_dig,
regex_quebra_linha)
questoes_port_sem_espaco_duplicado = substituir_por_espaco(questoes_port_sem_quebra_linha,
regex_espaco)
dados_portugues["questoes_tratadas"] = questoes_port_sem_espaco_duplicado
dados_portugues.head()
dados_portugues.questoes_tratadas[0]
questoes_ing_sem_pont = remover(dados_ingles.sem_code_tag,
regex_pontuacao)
questoes_ing_sem_pont_minus = minusculo(questoes_ing_sem_pont)
questoes_ing_sem_pont_minus_dig = remover(questoes_ing_sem_pont_minus,
regex_digitos)
questoes_ing_sem_quebra_linha = substituir_por_espaco(questoes_ing_sem_pont_minus_dig,
regex_quebra_linha)
questoes_ing_sem_espaco_duplicado = substituir_por_espaco(questoes_ing_sem_quebra_linha,
regex_espaco)
dados_ingles["questoes_tratadas"] = questoes_ing_sem_espaco_duplicado
dados_ingles.head()
print(dados_ingles.questoes_tratadas[0])
questoes_esp_sem_pont = remover(dados_espanhol.sem_code_tag,
regex_pontuacao)
questoes_esp_sem_pont_minus = minusculo(questoes_esp_sem_pont)
questoes_esp_sem_pont_minus_dig = remover(questoes_esp_sem_pont_minus,
regex_digitos)
questoes_esp_sem_quebra_linha = substituir_por_espaco(questoes_esp_sem_pont_minus_dig,
regex_quebra_linha)
questoes_esp_sem_espaco_duplicado = substituir_por_espaco(questoes_esp_sem_quebra_linha,
regex_espaco)
dados_espanhol["questoes_tratadas"] = questoes_esp_sem_espaco_duplicado
dados_espanhol.head()
print(dados_espanhol.questoes_tratadas[0])
```
## Aula 5
```
from nltk.util import bigrams
texto_teste = "alura"
print(list(bigrams(texto_teste)))
from nltk.lm.preprocessing import pad_both_ends
print(list(bigrams(pad_both_ends(texto_teste, n = 2))))
```
## Aula 6
```
dados_portugues["idioma"] = "port"
dados_espanhol["idioma"] = "esp"
dados_ingles["idioma"] = "ing"
dados_portugues.head()
len(dados_espanhol)
from sklearn.model_selection import train_test_split
port_treino, port_teste = train_test_split(dados_portugues.questoes_tratadas,
test_size = 0.2,
random_state = 123)
from sklearn.model_selection import train_test_split
esp_treino, esp_teste = train_test_split(dados_espanhol.questoes_tratadas,
test_size = 0.2,
random_state = 123)
from sklearn.model_selection import train_test_split
ing_treino, ing_teste = train_test_split(dados_ingles.questoes_tratadas,
test_size = 0.2,
random_state = 123)
todas_questoes_port = ' '.join(port_treino)
from nltk.tokenize import WhitespaceTokenizer
todas_palavras_port = WhitespaceTokenizer().tokenize(todas_questoes_port)
print(todas_palavras_port)
from nltk.lm.preprocessing import padded_everygram_pipeline
port_treino_bigram, vocab_port = padded_everygram_pipeline(2,
todas_palavras_port)
from nltk.lm.preprocessing import padded_everygram_pipeline
port_treino_bigram, vocab_port = padded_everygram_pipeline(2,
todas_palavras_port)
from nltk.lm import MLE
modelo_port = MLE(2)
modelo_port.fit(port_treino_bigram, vocab_port)
modelo_port.generate(num_words=6)
from nltk.lm import NgramCounter
modelo_port.counts[['m']].items()
texto = "good morning"
palavras = WhitespaceTokenizer().tokenize(texto)
palavras_fakechar = [list(pad_both_ends(palavra, n = 2)) for palavra in palavras]
palavras_bigramns = [list(bigrams(palavra)) for palavra in palavras_fakechar]
print(palavras_bigramns)
print(palavras_bigramns[0])
print(modelo_port.perplexity(palavras_bigramns[0]))
print(modelo_port.perplexity(palavras_bigramns[1]))
```
| github_jupyter |
# 3. Train_NN
**Tensorboard**
- Input at command: tensorboard --logdir=./log
- Input at browser: http://127.0.0.1:6006
```
import time
import os
import pandas as pd
project_name = 'SceneClassification'
step_name = 'Train_NN'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = project_name + '_' + step_name + '_' + time_str
print('run_name: ' + run_name)
cwd = os.getcwd()
model_path = os.path.join(cwd, 'model')
feature_pca_file = os.path.join(model_path, 'feature_pca_SceneClassification_Dim_reduction_20171202_235958.h5')
print('model_path: ' + model_path)
print('feature_pca_file: ' + feature_pca_file)
test_images = os.listdir(os.path.join(cwd, 'input', 'data_test_a', 'test'))
print(test_images[0:10])
import h5py
import numpy as np
with h5py.File(feature_pca_file, 'r') as h:
x_train = np.array(h['train'])
y_train = np.array(h['train_label'])
x_val = np.array(h['val'])
y_val = np.array(h['val_label'])
x_test = np.array(h['test_b'])
print(x_train.shape)
print(len(y_train))
print(x_val.shape)
print(len(y_val))
print(x_test.shape)
from sklearn.utils import shuffle
x_train, y_train = shuffle(x_train, y_train)
print(x_train.shape)
print(y_train.shape)
from keras.utils.np_utils import to_categorical
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
print(y_train.shape)
print(y_val.shape)
from keras.models import *
from keras.layers import *
from keras.optimizers import Adam
model = Sequential()
model.add(Dense(1024, input_shape=x_train.shape[1:]))
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(80, activation='softmax'))
model.compile(optimizer=Adam(lr=1e-4),
loss='categorical_crossentropy',
metrics=['accuracy'])
from keras.callbacks import TensorBoard
log_path = os.path.join(model_path, run_name)
print('log_dir:' + log_path)
from keras.callbacks import TensorBoard, LearningRateScheduler
# annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x) # Do not why, when add annealer, the net will be divergency
tensorBoard = TensorBoard(log_dir=log_path)
hist = model.fit(x_train, y_train,
batch_size=1024,
epochs=200, #Increase this when not on Kaggle kernel
verbose=1, #1 for ETA, 0 for silent
validation_data=(x_val, y_val),
callbacks=[tensorBoard])
final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
val_preds = model.predict(x_val)
print(val_preds.shape)
print(y_val.shape)
from sklearn.metrics import log_loss, accuracy_score
print('Val log_loss: {}'.format(log_loss(y_val, val_preds)))
val_proba_limited = np.clip(val_preds, 0.005, 0.995)
print('Val limited log_loss: {}'.format(log_loss(y_val, val_proba_limited)))
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(hist.history['loss'], color='b')
plt.plot(hist.history['val_loss'], color='r')
plt.show()
plt.plot(hist.history['acc'], color='b')
plt.plot(hist.history['val_acc'], color='r')
plt.show()
```
## Output
```
run_name0 = run_name + '_' + str(int(final_acc*10000))
print('run_name: ' + run_name0)
def saveModel(model, run_name):
cwd = os.getcwd()
modelPath = os.path.join(cwd, 'model')
if not os.path.isdir(modelPath):
os.mkdir(modelPath)
weigthsFile = os.path.join(modelPath, run_name + '.h5')
model.save(weigthsFile)
saveModel(model, run_name0)
print(run_name0)
print('Done !')
```
| github_jupyter |
# *Circuitos Elétricos I*
## Aula 1
### Problema 1
A tensão e a corrente nos terminais de um elemento ideal de dois terminais são nulas para $t < 0$. Para $t ≥ 0$, são dadas por: $v(t) = 400e^{−100t}$ V, $i(t) = 5e^{−100t}$ A. Considera-se o sentido da corrente como sendo o mesmo da queda da tensão entre os terminais.
a) Determine a potência absorvida pelo elemento em $t = 10$ ms.
b) Determine a energia total ($w_{total}=\int_0^\infty p(t)dt$) fornecida ao elemento.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
print("Respostas:")
# a.
t = 10e-3;
v = 400*np.exp(-100*t) # tensão
i = 5*np.exp(-100*t) # corrente
p = v*i # potência
print("a. Potência em t = 10 ms: ", round(p,2), "W") # valor arredondado em duas casas decimais
# b.
t = np.linspace(0, 0.1, num = 1000) # tempo
v = 400*np.exp(-100*t) # tensão
i = 5*np.exp(-100*t) # corrente
p = v*i # potência
plt.plot(t, p)
plt.xlim(0, 0.1)
plt.grid()
plt.xlabel('tempo (s)')
plt.ylabel('potência (W)')
energiaTotal = integrate.trapz(p, t) # cálculo da potência total entregue ao elemento
print("b. Energia total = ", round(energiaTotal,2), "J") # valor da integral arredondado em duas casas decimais
```
### Problema 2
A tensão e a corrente nos terminais de um elemento ideal de dois terminais são nulas para $t < 0$. Para $t ≥ 0$, são dadas por: $v(t) = 400e^{−100t}\sin(200t)$ V, $i(t) = 5e^{−100t}\sin(200t)$ A. Considera-se o sentido da corrente como sendo o mesmo da queda da tensão entre os terminais.
a) Determine a potência absorvida pelo elemento em $t = 10$ ms.
b) Determine a energia total ($w_{total}=\int_0^\infty p(t)dt$) fornecida ao elemento.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
print("Respostas:\n")
# a.
t = 10e-3;
v = 400*np.exp(-100*t)*np.sin(200*t) # tensão
i = 5*np.exp(-100*t)*np.sin(200*t) # corrente
p = v*i # potência
print("a. Potência em t = 10 ms: ", round(p,2), "W\n") # valor arredondado em duas casas decimais
# b.
t = np.linspace(0, 0.1, num = 1000) # tempo
v = 400*np.exp(-100*t)*np.sin(200*t) # tensão
i = 5*np.exp(-100*t)*np.sin(200*t) # corrente
p = v*i # potência
plt.plot(t, p)
plt.xlim(0, 0.1)
plt.grid()
plt.xlabel('tempo (s)')
plt.ylabel('potência (W)')
energiaTotal = integrate.trapz(p, t) # cálculo da potência total entregue ao elemento
print("b. Energia total = ", round(energiaTotal,2), "J") # valor da integral arredondado em duas casas decimais
```
## Aula 2
Exemplo do livro texto:
a. Use as leis de Kirchhoff e lei de Ohm para determinar a corrente $i_0$ no circuito abaixo.
b. Teste a solução para $i_0$ verificando se a potência total gerada é igual à potência total dissipada.
```
from IPython.display import Image
Image("figuras/A2C1.png", width=500)
```
Antes de aplicar as leis de Kirchhoff para resolver o circuito, precisamos atribuir variáveis às correntes e tensões que são desconhecidas em cada um dos elementos bipolares (de dois terminais). A atribuição é feita de forma arbitrária. Uma das possíveis configurações escolhidas está mostrada na figura abaixo:
```
Image("figuras/A2C2.png", width=500)
```
Aplicando a LKT à malha simples que contém a fonte de 120 V, temos:
\begin{equation*}
-120+v_{10\Omega}+v_{50\Omega}=0
\label{eq:eq1} \tag{1}
\end{equation*}
Aplicando a lei de Ohm aos dois resistores e **observando a convenção passiva**, temos: $v_{10\Omega}=10i_0$ e $v_{50\Omega}=50i_1$. Substituindo na equação $\eqref{eq:eq1}$:
\begin{equation*}
10i_0+50i_1=120
\label{eq:eq2} \tag{2}
\end{equation*}
Aplicando a LKC ao nó interligando os dois resistores e a fonte de corrente, temos $i_0+6=i_1$, ou seja:
\begin{equation*}
i_0-i_1=-6
\label{eq:eq3} \tag{3}
\end{equation*}
Resolvendo o sistema formado pelas equações $\eqref{eq:eq2}$ e $\eqref{eq:eq3}$, obtemos as correntes $i_0$ e $i_1$. A seguir, temos um trecho de código que resolve o sistema linear e calcula as potências desenvolvidas por cada elemento do circuito.
```
import numpy as np
print("Respostas:\n")
#a.
# Fontes independentes
i2 = 6 #A
V1 = 120 #V
# Resistores
R1 = 10 #Ohms
R2 = 50 #Ohms
# Equações de resolução do circuito: LKT: (I) 10*i0 + 50*i1 = V1, LKC: (II) i0 - i1 = -i2
# Define o problema em termos de um sistema A*x = b, em que x = [i0, i1] é o vetor de incógnitas
A = np.array([[R1, R2], [1, -1]])
b = np.array([V1, -i2])
x = np.linalg.solve(A, b) # resolve o sistema de equações lineares
i0 = x[0]
i1 = x[1]
print("a. Solução do sistema: i0 =", round(x[0],2), "A, i1 =", round(x[1],2), "A \n") # Solução do sistema
#b. Cálculo das potências observando a convenção passiva:
V = R2*i1;
p_120V = -V1*i0
p_6A = -V*i2
p_R1 = R1*i0**2
p_R2 = R2*i1**2
print( "b. Potências: p120V = ", round(p_120V,2), "W, p6A = ", round(p_6A,2), "W, pR1 = ", round(p_R1,2), "W, pR2 = ", round(p_R2,2), "W")
print('\n Somatório das potências:', round(p_120V+p_6A+p_R1+p_R2))
```
## Aula 3
Problema (Nilsson, 2.31): para o circuito mostrado a seguir:
```
Image("figuras/A3C2.png", width=800)
```
a. Determine $i_{\Delta}$ e $v_0$.
b. Mostre que a potência fornecida equivale à potência consumida no circuito.
Antes de aplicar as leis de Kirchhoff para resolver o circuito, precisamos atribuir variáveis às correntes e tensões que são desconhecidas em cada um dos elementos bipolares (de dois terminais). A atribuição é feita de forma arbitrária. Uma das possíveis configurações escolhidas está mostrada na figura abaixo:
```
Image("figuras/A3C3.png", width=800)
```
Para facilitar a manipulação do código, vamos definir: $V_{S1}= 50 V$, $V_{S2}= 20 V$, $R_1= 18 \Omega$, $R_2= 40 \Omega$, $k_1=20$, $k_2=5$, $k_3=8$. Desse modo, aplicando LKT às malhas (I) e (II) do circuito, obtemos o seguinte sistema de equações na forma matricial:
$$\begin{bmatrix} k_1 & -R_1 \\ R_2+k_2 & -R_1 \end{bmatrix}\begin{bmatrix} i_{\sigma} \\ i_{\Delta} \end{bmatrix}=\begin{bmatrix} -V_{S1} \\ 0 \end{bmatrix}$$
```
import numpy as np
print("Respostas:\n")
#a.
# Fontes independentes
Vs1 = 50 #V
Vs2 = 20 #V
# Constantes atribuídas as fontes dependentes nas malhas (I), (II) e (III), respectivamente
k1 = 20
k2 = 5
k3 = 8
# Resistores
R1 = 18 #Ohms
R2 = 40 #Ohms
# Equações de resolução do circuito aplicando LKT:
#
# malha (I) -Vs1 -k1*i_sigma + R1*i_delta = 0 -> k1*i_sigma-R1*i_delta = -Vs1
# malha (II) -R1*i_delta + k2*i_sigma + R2*isigma = 0 -> (R2+k2)*i_sigma-R1*i_delta = 0
#
# Define o problema em termos de um sistema A*x = b, em que x = [isigma, idelta] é o vetor de incógnitas
A = np.array([[k1, -R1], [(R2+k2), -R1]])
b = np.array([-Vs1, 0])
x = np.linalg.solve(A, b) # resolve o sistema de equações lineares
isigma = x[0]
idelta = x[1]
# Determinando as correntes restantes via LKC:
i1 = idelta + isigma + k3*idelta
i2 = i1 - idelta
v0 = R2*isigma # tensão sobre R2
v1 = v0-Vs2 # tensão sobre a fonte dependente de corrente
print("a. Solução do sistema: idelta =", round(idelta,2), "A, v0 =", round(v0,2), "V \n") # Solução do sistema
#b. Cálculo das potências observando a convenção passiva:
p_50V = -Vs1*i1
p_20V = Vs2*(k3*idelta)
p_20is = -(k1*isigma)*i1
p_5is = (k2*isigma)*i2
p_8id = v1*(k3*idelta)
p_R1 = R1*idelta**2
p_R2 = R2*isigma**2
print( "b. Potências:\t p_50V = ", round(p_50V,2), "W, p_20V = ", round(p_20V,2), "W, pR1 = ", round(p_R1,2), "W, pR2 = ", round(p_R2,2), "W")
print( "\t\t p_20is = ", round(p_20is,2), "W, p_5is = ", round(p_5is,2), "W, p_8id = ", round(p_8id,2), "W")
print('\n Somatório das potências:', round(p_50V+p_20V+p_20is+p_5is+p_8id+p_R1+p_R2,2))
```
| github_jupyter |
# Retroceso de Fase (Phase Kickback)
En esta página, cubriremos un comportamiento de compuertas cuánticas controladas conocido como "retroceso de fase" (phase kickback). Este interesante efecto cuántico es un bloque de construcción en muchos algoritmos cuánticos famosos, incluido el algoritmo de factorización de Shor y el algoritmo de búsqueda de Grover.
## Vectores propios
Ya deberías estar familiarizado con los vectores propios y los valores propios, pero si no, puedes leer una buena introducción [aquí](https://www.khanacademy.org/math/linear-algebra/alternate-bases/eigen-everything/v/linear-algebra-introduction-to-eigenvalues-and-eigenvectors) . Si *estás* familiarizado, entonces deberías reconocer la ecuación de vector propio:
$$ \class{_matrix-A}{A}\class{_eig-vec-A}{|x\rangle} = \class{_eig-val-A}{\lambda}\class{_eig-vec-A}{|x\rangle} $$
Esto es aún más simple en la computación cuántica. Dado que todos nuestros vectores de estado tienen una magnitud de 1, nuestros valores propios también deben tener una magnitud de 1, es decir, $\lambda = e^{2\pi i \theta}$. Entonces, para una compuerta cuántica $U$ y su estado propio $|x\rangle$, tenemos:
$$ \class{_matrix-U}{U}\class{_eig-vec-U}{|x\rangle} = \class{_eig-val-U}{e^{2\pi i \theta}}\class{_eig-vec-U}{|x\rangle} $$
Para resumir: si una compuerta gira (y solo gira) todas las amplitudes de un vector de estado en la misma cantidad, entonces ese estado es un *estado* propio de esa compuerta.
<!-- ::: q-block -->
### Explorando los vectores propios
Usa el widget a continuación para ver cómo una compuerta de un solo qubit transforma un estado de un solo qubit. ¿Puedes averiguar qué estados son propios y de qué compuertas?
```
q-eigenvector-widget
```
<!-- ::: -->
## Compuertas controladas y estados propios
Una vez que te sientas cómodo con el concepto de estados propios, podemos comenzar a pensar en lo que sucede cuando controlamos estos circuitos en el estado de otro qubit. Por ejemplo, sabemos que la compuerta Z que actúa sobre el estado $|1\rangle$ introduce una fase global negativa ($\theta = 0.5$), averigüemos qué sucede cuando controlamos esta operación.
<!-- ::: q-block.tabs -->
### La compuerta Z controlada
<!-- ::: tab -->
### |10〉
<!-- ::: column(width=200) -->
Si el qubit de control es $|0\rangle$, entonces el comportamiento es trivial, no pasa nada.
<!-- ::: column(width=400) -->

<!-- ::: -->
<!-- ::: tab -->
### |11〉
<!-- ::: column(width=200) -->
Si el qubit de control es $|1\rangle$, la compuerta introduce una fase global (observa el signo menos en la imagen de la derecha), pero los estados del qubit no cambian.
<!-- ::: column(width=400) -->

<!-- ::: -->
<!-- ::: tab -->
### |1+〉
<!-- ::: column(width=200) -->
La compuerta Z controlada no hace nada cuando el control es $|0\rangle$, e introduce una fase negativa cuando el control es $|1\rangle$. Cuando el qubit de control está en superposición, la compuerta cambia la fase *relativa* entre los estados $|0\rangle$ y $|1\rangle$ del qubit de control.
<!-- ::: column(width=400) -->

<!-- ::: -->
<!-- ::: -->
<!-- ::: -->
Cuando el control es $|{+}\rangle$, y el objetivo es $|1\rangle$, la compuerta Z controlada cambia el estado del qubit de *control* , pero deja el qubit objetivo sin cambios. Este efecto se denomina "retroceso de fase" (phase kickback), ya que el valor propio regresa al estado del qubit de control.
En términos más generales, si tenemos una compuerta cuántica $U$ y su estado propio $|x\rangle$, entonces $U$ actuando sobre $|x\rangle$ agregará una fase global $\theta$ como vimos anteriormente.
$$ \class{_matrix-U}{U}\class{_eig-vec-U}{|x\rangle} = \class{_eig-val-U}{e^{2\pi i \theta}}\class{_eig-vec-U}{|x\rangle} $$
Si controlamos la operación $U|x\rangle$ por otro qubit en una superposición de $|0\rangle$ y $|1\rangle$, entonces esto tendrá el efecto de rotar el qubit de control alrededor del eje Z por un ángulo $\theta$. Es decir:
$$ \class{_matrix-CU}{CU}\class{_eig-vec-U}{|x\rangle}\class{_control-qubit-pre}{(\alpha|0\rangle + \beta|1\rangle)} = \class{_eig-vec-U}{|x\rangle}\class{_control-qubit-post}{(\alpha|0\rangle + \beta e^{2\pi i \theta}|1\rangle)} $$
En el ejemplo anterior, vemos que el 'control' de la compuerta Z controlada en realidad está haciendo una rotación en Z; algo que solo debería haber estado observando el qubit que ha cambiado. Por esta razón, a menudo verás la compuerta Z controlada dibujada como dos controles.
```
from qiskit import QuantumCircuit
qc = QuantumCircuit(2)
qc.cz(0,1)
qc.draw()
```
## La Compuerta CNOT
Veamos el efecto de retroceso de fase con una compuerta diferente de dos qubits. Dado que el estado $|{-}\rangle$ es un estado propio de la compuerta X, con valor propio $-1$, obtenemos:
$$ \class{_matrix-CX}{CX}\class{_eig-vec-X}{|{-}\rangle}\class{_control-qubit-pre}{(\alpha|0\rangle + \beta|1\rangle)} = \class{_eig-vec-X}{|{-}\rangle}\class{_control-qubit-post}{(\alpha|0\rangle - \beta |1\rangle)} $$

Nuevamente, en este caso el cambio de fase es $\theta = 0.5$, por lo que nuestro qubit de control se voltea alrededor del eje Z.
<!-- ::: q-block -->
### Ejemplo resuelto
<details>
<summary>Retroceso con la compuerta CNOT (haz clic para expander)</summary>
q-carousel
div.md
<img src="images/kickback/carousel/cnot/carousel-img-0.svg"><br>Por definición, la compuerta X controlada (CNOT) cambia el estado del qubit objetivo (en nuestro ejemplo, el bit más a la derecha es el objetivo) si el qubit de control es $|1\rangle$. En la imagen de arriba, mostramos la compuerta CNOT transformando un vector de estado a otro.
div.md
<img src="images/kickback/carousel/cnot/carousel-img-1.svg"><br>Por ejemplo, si el qubit de control está en el estado $|0\rangle$, siempre obtenemos exactamente el mismo vector de estado. En la imagen de arriba, las amplitudes de los estados donde el control es $|1\rangle$ son cero, por lo que no vemos ningún efecto cuando los intercambiamos.
div.md
<img src="images/kickback/carousel/cnot/carousel-img-2.svg"><br>Otro ejemplo: si el qubit de control está en el estado $|1\rangle$, entonces usamos una compuerta X en el qubit objetivo. En este caso, el qubit objetivo está en un estado propio de la compuerta X, por lo que obtenemos el mismo vector de estado, hasta la fase global. Este cambio es una fase global, ya que la fase relativa entre las amplitudes $|0\rangle$ y $|1\rangle$ del qubit objetivo permaneció igual.
div.md
<img src="images/kickback/carousel/cnot/carousel-img-3.svg"><br>Veamos el vector de estado cuando el control está en el estado $|{+}\rangle$ (es decir, $\tfrac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$), y el objetivo en el estado $|{-}\rangle$ (es decir, $\tfrac{1}{\sqrt{2}}(|0\rangle - |1\rangle)$). Podemos ver que estos dos qubits son separables, ya que medir uno no afecta el estado del otro.
div.md
<img src="images/kickback/carousel/cnot/carousel-img-4.svg"><br> Por ejemplo, independientemente de si medimos el qubit de control como $|0\rangle$ o $|1\rangle$, las amplitudes para medir el objetivo como $|0\rangle$ o $|1\rangle$ permanecen iguales.
div.md
<img src="images/kickback/carousel/cnot/carousel-img-5.svg"><br>Con todo esto en mente, veamos qué sucede cuando aplicamos el CNOT a este estado $|{+}{-}\rangle$.
div.md
<img src="images/kickback/carousel/cnot/carousel-img-6.svg"><br>Después del CNOT, estos qubits siguen siendo separables, ya que el estado de uno no cambia cuando medimos el otro. Sin embargo, la fase relativa entre las amplitudes $|0\rangle$ y $|1\rangle$ del qubit de control ha cambiado.
</details>
<!-- ::: -->
Cuando recordamos que la compuerta H hace la transformación $|0\rangle \rightarrow |{+}\rangle$ y $|1\rangle \rightarrow |{-}\rangle$ (y viceversa), obtenemos lo siguiente identidad:

## El problema de Deutsch
Acabamos de ver que condicionar una acción en el estado de un qubit en realidad puede cambiar el estado del qubit de control. Este es un efecto 'cuántico', es decir, algo que no vemos que suceda con los bits clásicos.
En la computación cuántica, queremos crear algoritmos que las computadoras clásicas *no puedan* ejecutar, por lo que un buen lugar para comenzar es tratar de replantear este efecto como un problema a resolver. De esta manera, podemos probar que las computadoras cuánticas son al menos un poco mejores en algo que las computadoras clásicas.
El problema de Deutsch hace exactamente esto. Este es un problema de 'caja negra'; un problema artificial en el que se nos permite aplicar una función a nuestros bits, pero no podemos ver cómo opera la función. El desafío es descubrir algunas propiedades de la caja probando diferentes entradas y salidas.
El problema de Deutsch es el siguiente: tenemos una función reversible clásica (a la que llamaremos $f$ de forma abreviada), que actúa sobre dos bits, $ a $ y $ b $. La función dejará el bit $ a $ solo, pero puede o no cambiar el bit $ b $. El problema de Deutsch nos pide que averigüemos si $f$ se comporta de manera diferente dependiendo del valor de $ a $ (lo llamaremos comportamiento "balanceado"), o si ignora a $ a $ y siempre hace lo mismo con $ b $ (comportamiento "constante"). El desafío es hacer esto aplicando $f$ la menor cantidad de veces posible.

El mejor algoritmo clásico para este problema aplica $f$ dos veces con diferentes valores de $ a $, luego observa si $f$ se comportó de manera diferente.
## Algoritmo de Deutsch
Como habrás adivinado, podemos usar el retroceso de fase (phase kickback) para crear un algoritmo cuántico que funciona incluso mejor que el algoritmo clásico. Si ponemos el qubit $ a $ en el estado $|{+}\rangle$ y el qubit $ b $ en el estado $|{-}\rangle$, entonces cualquier cambio condicionado a $ a $ devolverá una fase negativa relativa, volteando al qubit $ a $ de $|{+}\rangle$ a $|{-}\rangle$. Luego podemos aplicar una compuerta H a $ a $ para ver si se produjo un retroceso de fase o no.

<!-- ::: q-block.reminder -->
### Más información
<details><summary>Dentro de la caja negra (haz clic para ampliar)</summary> Si esto todavía parece mágico, puede ayudar pensar en todas las posibles funciones de Deutsch y los circuitos cuánticos que las implementan. Hay cuatro posibles funciones de Deutsch: dos constantes y dos balanceadas.</details>
Si es constante, la función puede no hacer nada o voltear el qubit $ b $. Si está balanceada, la función puede voltear a $ b $ solo cuando $ a $ es $|1\rangle$, o voltear a $ b $ solo cuando $ a $ es $|0\rangle$. Puedes ver los cuatro escenarios en la imagen a continuación.
<img src="images/kickback/deutsch-oracles.svg">
Con ambas funciones constantes, el qubit superior permanecerá sin cambios (ya que no le estamos haciendo nada), y con las funciones equilibradas, el efecto de retroceso cambia el qubit superior de $|{+}\rangle$ a $|{- }\rangle$.
<!-- ::: -->
Este no es el ejemplo más impresionante de aceleración cuántica; es muy específico y no encontramos problemas de caja negra en la naturaleza. En cambio, el problema de Deutsch nos da un resultado alentador y algunos efectos interesantes para ser explorados. En el resto de este curso, ampliaremos este sencillo experimento para resolver problemas aún más impresionantes, incluida la factorización.
<!-- ::: q-block.exercise -->
### Ejercicio
Haz una función, `deutsch()` que tome una función Deutsch como `QuantumCircuit` y use el algoritmo Deutsch para resolverlo en un simulador cuántico. Tu función debe devolver `True` si la función Deutsch está balanceada y `False` si es constante.
Puedes usar la función `deutsch_problem()` para crear un `QuantumCircuit` que puedes usar como entrada para tu función `deutsch()`.
<!-- ::: -->
```
from qiskit import QuantumCircuit
import numpy as np
def deutsch_problem(seed=None):
"""Devuelve un circuito que realiza la función del
problema de Deutsch.
Args:
seed (int): Si se establece, el circuito devuelto
siempre será el mismo para la misma semilla.
Returns: QuantumCircuit
"""
np.random.seed(seed)
problem = QuantumCircuit(2)
if np.random.randint(2):
print("La función es balanceada.")
problem.cx(0, 1)
else:
print("La función es constante.")
if np.random.randint(2):
problem.x(1)
return problem
def deutsch(function):
"""Implementa el algoritmo de Deutsch.
Args:
function (QuantumCircuit): Función Deutsch a ser resuelta.
Debe ser un circuito de 2 qubits ya sea balanceado
o constante.
Returns:
bool: True si el circuito está balanceado,
de lo contrario False.
"""
# tu código aquí
```
## Resumen
En esta página revisamos:
- recapitulación del concepto de valores y vectores propios
- exploración del efecto de retroceso de fase (phase kickback) y revisión de algunos ejemplos específicos
- se introdujo el problema de Deutsch como un escenario donde las computadoras cuánticas tienen una ventaja sobre las computadoras clásicas
Si olvidas todo lo demás de esta página, lo más importante que debes recordar y sentirte cómodo es este resumen de retroceso de fase a continuación:
<!-- ::: q-block.reminder -->
### Recordatorio: Retroceso de fase (Phase kickback)
Si tenemos una compuerta cuántica $U$ y su estado propio $|x\rangle$, entonces $U$ actuando sobre $|x\rangle$ agregará una fase global $\theta$. Es decir:
$$ \class{_matrix-U}{U}\class{_eig-vec-U}{|x\rangle} = \class{_eig-val-U}{e^{2\pi i \theta}}\class{_eig-vec-U}{|x\rangle} $$
Si controlamos la operación $U|x\rangle$ por otro qubit en una superposición de $|0\rangle$ y $|1\rangle$, entonces esto tendrá el efecto de rotar el qubit de control alrededor del eje Z por un ángulo $\theta$. Es decir:
$$ \class{_matrix-CU}{CU}\class{_eig-vec-U}{|x\rangle}\class{_control-qubit-pre}{(\alpha|0\rangle + \beta|1\rangle)} = \class{_eig-vec-U}{|x\rangle}\class{_control-qubit-post}{(\alpha|0\rangle + \beta e^{2\pi i \theta}|1\rangle)} $$
<!-- ::: -->
| github_jupyter |
```
import pandas as pd
import numpy as np
```
## Load review dataset
```
products = pd.read_csv('amazon_baby_subset.csv')
```
### 1. listing the name of the first 10 products in the dataset.
```
products['name'][:10]
```
### 2. counting the number of positive and negative reviews.
```
print (products['sentiment'] == 1).sum()
print (products['sentiment'] == -1).sum()
print (products['sentiment']).count()
```
## Apply text cleaning on the review data
### 3. load the features
```
import json
with open('important_words.json') as important_words_file:
important_words = json.load(important_words_file)
print important_words[:3]
```
### 4. data transformations:
- fill n/a values in the review column with empty strings
- Remove punctuation
- Compute word counts (only for important_words)
```
products = products.fillna({'review':''}) # fill in N/A's in the review column
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
products['review_clean'] = products['review'].apply(remove_punctuation)
products.head(3)
```
### 5. compute a count for the number of times the word occurs in the review
```
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
products.head(1)
```
### 7. compute the number of product reviews that contain the word perfect.
```
products['contains_perfect'] = products['perfect'] >=1
print products['contains_perfect'].sum()
```
## 1. Quiz Question.
How many reviews contain the word perfect?
## Answer
2955
## Convert data frame to multi-dimensional array
### 8. convert our data frame to a multi-dimensional array.
The function should accept three parameters:
- dataframe: a data frame to be converted
- features: a list of string, containing the names of the columns that are used as features.
- label: a string, containing the name of the single column that is used as class labels.
The function should return two values:
- one 2D array for features
- one 1D array for class labels
The function should do the following:
- Prepend a new column constant to dataframe and fill it with 1's. This column takes account of the intercept term. Make sure that the constant column appears first in the data frame.
- Prepend a string 'constant' to the list features. Make sure the string 'constant' appears first in the list.
- Extract columns in dataframe whose names appear in the list features.
- Convert the extracted columns into a 2D array using a function in the data frame library. If you are using Pandas, you would use as_matrix() function.
- Extract the single column in dataframe whose name corresponds to the string label.
- Convert the column into a 1D array.
- Return the 2D array and the 1D array.
```
def get_numpy_data(dataframe, features, label):
dataframe['constant'] = 1
features = ['constant'] + features
features_frame = dataframe[features]
feature_matrix = features_frame.as_matrix()
label_sarray = dataframe[label]
label_array = label_sarray.as_matrix()
return(feature_matrix, label_array)
```
### 9. extract two arrays feature_matrix and sentiment
```
feature_matrix, sentiment = get_numpy_data(products, important_words, 'sentiment')
```
## 2. Quiz Question:
How many features are there in the feature_matrix?
```
print feature_matrix.shape
```
## 2. Answer:
194
## 3. Quiz Question:
Assuming that the intercept is present, how does the number of features in feature_matrix relate to the number of features in the logistic regression model?
## Estimating conditional probability with link function
### 10. Compute predictions given by the link function.
- Take two parameters: feature_matrix and coefficients.
- First compute the dot product of feature_matrix and coefficients.
- Then compute the link function P(y=+1|x,w).
- Return the predictions given by the link function.
```
'''
feature_matrix: N * D
coefficients: D * 1
predictions: N * 1
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
# YOUR CODE HERE
score = np.dot(feature_matrix, coefficients) # N * 1
# Compute P(y_i = +1 | x_i, w) using the link function
# YOUR CODE HERE
predictions = 1.0/(1+np.exp(-score))
# return predictions
return predictions
```
## Compute derivative of log likelihood with respect to a single coefficient
### 11. computes the derivative of log likelihood with respect to a single coefficient w_j
The function should do the following:
- Take two parameters errors and feature.
- Compute the dot product of errors and feature.
- Return the dot product. This is the derivative with respect to a single coefficient w_j.
```
"""
errors: N * 1
feature: N * 1
derivative: 1
"""
def feature_derivative(errors, feature):
# Compute the dot product of errors and feature
derivative = np.dot(np.transpose(errors), feature)
# Return the derivative
return derivative
```
### 12. Write a function compute_log_likelihood
```
def compute_log_likelihood(feature_matrix, sentiment, coefficients):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
# scores.shape (53072L, 1L)
# indicator.shape (53072L,)
lp = np.sum((np.transpose(np.array([indicator]))-1)*scores - np.log(1. + np.exp(-scores)))
return lp
```
## Taking gradient steps
### 13. Write a function logistic_regression to fit a logistic regression model using gradient ascent.
The function accepts the following parameters:
- feature_matrix: 2D array of features
- sentiment: 1D array of class labels
- initial_coefficients: 1D array containing initial values of coefficients
- step_size: a parameter controlling the size of the gradient steps
- max_iter: number of iterations to run gradient ascent
- The function returns the last set of coefficients after performing gradient ascent.
The function carries out the following steps:
1. Initialize vector coefficients to initial_coefficients.
1. Predict the class probability P(yi=+1|xi,w) using your predict_probability function and save it to variable predictions.
1. Compute indicator value for (yi=+1) by comparing sentiment against +1. Save it to variable indicator.
1. Compute the errors as difference between indicator and predictions. Save the errors to variable errors.
1. For each j-th coefficient, compute the per-coefficient derivative by calling feature_derivative with the j-th column of feature_matrix. Then increment the j-th coefficient by (step_size*derivative).
1. Once in a while, insert code to print out the log likelihood.
1. Repeat steps 2-6 for max_iter times.
```
# coefficients: D * 1
from math import sqrt
def logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
# lplist = []
for itr in xrange(max_iter):
# Predict P(y_i = +1|x_1,w) using your predict_probability() function
# YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = np.transpose(np.array([indicator])) - predictions
for j in xrange(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j]
# compute the derivative for coefficients[j]. Save it in a variable called derivative
# YOUR CODE HERE
derivative = feature_derivative(errors, feature_matrix[:,j])
# add the step size times the derivative to the current coefficient
# YOUR CODE HERE
coefficients[j] += step_size*derivative
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
# lplist.append(compute_log_likelihood(feature_matrix, sentiment, coefficients))
lp = compute_log_likelihood(feature_matrix, sentiment, coefficients)
print 'iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp)
"""
import matplotlib.pyplot as plt
x= [i for i in range(len(lplist))]
plt.plot(x,lplist,'ro')
plt.show()
"""
return coefficients
```
### 14. run the logistic regression solver
- feature_matrix = feature_matrix extracted in #9
- sentiment = sentiment extracted in #9
- initial_coefficients = a 194-dimensional vector filled with zeros
- step_size = 1e-7
- max_iter = 301
```
initial_coefficients = np.zeros((194,1))
step_size = 1e-7
max_iter = 301
coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter)
coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter)
```
## 5. Quiz question :
As each iteration of gradient ascent passes, does the log likelihood increase or decrease?
### Answer
increase
### 15. compute class predictions
- First compute the scores using feature_matrix and coefficients using a dot product.
- Then apply threshold 0 on the scores to compute the class predictions. Refer to the formula above.
```
"""
feature_matrix: N * D
coefficients: D * 1
predictions: N * 1
"""
predictions = predict_probability(feature_matrix, coefficients)
NumPositive = (predictions > 0.5).sum()
print NumPositive
score = np.dot(feature_matrix, coefficients) # N * 1
print (score > 0).sum()
```
### 6. Quiz question:
How many reviews were predicted to have positive sentiment?
### Answer:
25126
## Measuring accuracy
```
print 0 in products['sentiment']
print -1 in products['sentiment']
print np.transpose(predictions.flatten()).shape
print (products['sentiment']).shape
print (np.transpose(predictions.flatten()))[:5]
correct_num = np.sum((np.transpose(predictions.flatten())> 0.5) == np.array(products['sentiment']>0))
total_num = len(products['sentiment'])
print "correct_num: {}, total_num: {}".format(correct_num, total_num)
accuracy = correct_num * 1./ total_num
print accuracy
np.transpose(predictions.flatten())> 0.5
np.array(products['sentiment']>0)
correct_num = np.sum((np.transpose(score.flatten())> 0) == np.array(products['sentiment']>0))
total_num = len(products['sentiment'])
print "correct_num: {}, total_num: {}".format(correct_num, total_num)
accuracy = correct_num * 1./ total_num
print accuracy
```
### 7. Quiz question:
What is the accuracy of the model on predictions made above? (round to 2 digits of accuracy)
### Answer:
0.75
## Which words contribute most to positive & negative sentiments
### 17.compute the "most positive words"
- Treat each coefficient as a tuple, i.e. (word, coefficient_value). The intercept has no corresponding word, so throw it out.
- Sort all the (word, coefficient_value) tuples by coefficient_value in descending order. Save the sorted list of tuples to word_coefficient_tuples.
```
coefficients = list(coefficients[1:]) # exclude intercept
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients)]
word_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)
```
## Ten "most positive" words
### 18. Compute the 10 most positive words
```
word_coefficient_tuples[:10]
```
### 8. Quiz question:
Which word is not present in the top 10 "most positive" words?
## Ten "most negative" words
```
word_coefficient_tuples[-10:]
```
### 9. Quiz question:
Which word is not present in the top 10 "most negative" words?
```
print np.array([1,2,3])==np.array([1,3,2])
```
| github_jupyter |
# Plotting and Visualization
```
from __future__ import division
from numpy.random import randn
import numpy as np
import os
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
from pandas import Series, DataFrame
import pandas as pd
np.set_printoptions(precision=4)
%matplotlib inline
%pwd
```
## A brief matplotlib API primer
### Figures and Subplots
```
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
fig
from numpy.random import randn
ax3.plot(randn(50).cumsum(), 'k--')
'''sort of random walk'''
_ = ax1.hist(randn(100), bins=20, color='k', alpha=0.3)
ax2.scatter(np.arange(30), np.arange(30) + 3 * randn(30))
fig
plt.close('all')
fig, axes = plt.subplots(2, 3)
axes
```
#### Adjusting the spacing around subplots
```
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
for i in range(2):
for j in range(2):
axes[i, j].hist(randn(500), bins=50, color='k', alpha=0.5)
plt.subplots_adjust(wspace=0, hspace=0)
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
for i in range(2):
for j in range(2):
#two for loop are designed for plot four figures secsussionally
axes[i, j].hist(randn(500), bins=50, color='k', alpha=0.5)
plt.subplots_adjust(wspace=0, hspace=0)
```
### Colors, markers, and line styles
```
plt.plot(randn(30).cumsum(), 'ko--')
#this is so-called random walking
data = randn(30).cumsum()
plt.plot(data, 'k--', label='Default')
plt.plot(data, 'k-', drawstyle='steps-post', label='steps-post')
plt.legend(loc='best')
```
### Ticks, labels, and legends
#### Setting the title, axis labels, ticks, and ticklabels
```
fig = plt.figure(); ax = fig.add_subplot(1, 1, 1)
ax.plot(randn(1000).cumsum())
ticks = ax.set_xticks([0, 250, 500, 750, 1000])
labels = ax.set_xticklabels(['one', 'two', 'three', 'four', 'five'],
rotation=30, fontsize='small')
ax.set_title('My first matplotlib plot')
ax.set_xlabel('Stages')
```
#### Adding legends
```
#Random Walk for 3 times
fig = plt.figure(); ax = fig.add_subplot(1, 1, 1)
ax.plot(randn(1000).cumsum(), 'k', label='one')
ax.plot(randn(1000).cumsum(), 'k--', label='two')
ax.plot(randn(1000).cumsum(), 'k.', label='three')
ax.legend(loc='best') # choose the best location for the legend location
```
### Annotations and drawing on a subplot
```
'''for the error, it is because that the lack of local data file'''
from datetime import datetime
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
data = pd.read_csv('ch08/spx.csv', index_col=0, parse_dates=True)
spx = data['SPX']
spx.plot(ax=ax, style='k-')
crisis_data = [
(datetime(2007, 10, 11), 'Peak of bull market'),
(datetime(2008, 3, 12), 'Bear Stearns Fails'),
(datetime(2008, 9, 15), 'Lehman Bankruptcy')
]
for date, label in crisis_data:
ax.annotate(label, xy=(date, spx.asof(date) + 50),
xytext=(date, spx.asof(date) + 200),
arrowprops=dict(facecolor='black'),
horizontalalignment='left', verticalalignment='top')
# Zoom in on 2007-2010
ax.set_xlim(['1/1/2007', '1/1/2011'])
ax.set_ylim([600, 1800])
ax.set_title('Important dates in 2008-2009 financial crisis')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
rect = plt.Rectangle((0.2, 0.75), 0.4, 0.15, color='k', alpha=0.3)
circ = plt.Circle((0.7, 0.2), 0.15, color='b', alpha=0.3)
pgon = plt.Polygon([[0.15, 0.15], [0.35, 0.4], [0.2, 0.6]],
color='g', alpha=0.5)
ax.add_patch(rect)
ax.add_patch(circ)
ax.add_patch(pgon)
```
### Saving plots to file
```
from io import BytesIO
buffer = BytesIO()
plt.savefig(buffer)
plot_data = buffer.getvalue()
```
### matplotlib configuration
```
plt.rc('figure', figsize=(10, 10))
```
## Plotting functions in pandas
### Line plots
```
plt.close('all')
s = Series(np.random.randn(10).cumsum(), index=np.arange(0, 100, 10))
s.plot()
df = DataFrame(np.random.randn(10, 4).cumsum(0),
columns=['A', 'B', 'C', 'D'],
index=np.arange(0, 100, 10))
df.plot()
```
### Bar plots
```
fig, axes = plt.subplots(2, 1)
data = Series(np.random.rand(16), index=list('abcdefghijklmnop'))
data.plot(kind='bar', ax=axes[0], color='k', alpha=0.7)
data.plot(kind='barh', ax=axes[1], color='k', alpha=0.7)
df = DataFrame(np.random.rand(6, 4),
index=['one', 'two', 'three', 'four', 'five', 'six'],
columns=pd.Index(['A', 'B', 'C', 'D'], name='Genus'))
df
df.plot(kind='bar')
#try a different plot kind, and with stacked
df.plot(kind='barh', stacked=True, alpha=0.5)
```
### Histograms and density plots
```
comp1 = np.random.normal(0, 1, size=200) # N(0, 1)
comp2 = np.random.normal(10, 2, size=200) # N(10, 4)
values = Series(np.concatenate([comp1, comp2]))
values.hist(bins=100, alpha=0.3, color='k', normed=True)
values.plot(kind='kde', style='k--')
```
## Plotting Maps: Visualizing Haiti Earthquake Crisis data
```
def to_cat_list(catstr):
stripped = (x.strip() for x in catstr.split(','))
return [x for x in stripped if x]
def get_all_categories(cat_series):
cat_sets = (set(to_cat_list(x)) for x in cat_series)
return sorted(set.union(*cat_sets))
def get_english(cat):
code, names = cat.split('.')
if '|' in names:
names = names.split(' | ')[1]
return code, names.strip()
get_english('2. Urgences logistiques | Vital Lines')
```
**The rest part of codes I leaved blank here, because it is required to load local data files, but the basic idea is similiar with previous part**
| github_jupyter |
```
#Importing openCV
import cv2
#Displaying image
image = cv2.imread('test_image.jpg')
cv2.imshow('input_image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
### Converting the image to grayscale
```
import cv2
import numpy as np
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
gray_conversion= cv2.cvtColor(lanelines_image, cv2.COLOR_RGB2GRAY)
#Displaying grayscale image
cv2.imshow('input_image', gray_conversion)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Smoothing the image
import cv2
import numpy as np
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
gray_conversion= cv2.cvtColor(lanelines_image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
cv2.imshow('input_image', blur_conversion)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
### Canny edge detection
```
import cv2
import numpy as np
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
gray_conversion= cv2.cvtColor(lanelines_image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
canny_conversion = cv2.Canny(blur_conversion, 50,155)
cv2.imshow('input_image', canny_conversion)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
### Masking the region of interest
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny_edge(image):
gray_conversion= cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
canny_conversion = cv2.Canny(blur_conversion, 50,150)
return canny_conversion
def reg_of_interest(image):
Image_height = image.shape[0]
polygons = np.array([[(200, Image_height), (1100, Image_height), (550, 250)]])
image_mask = np.zeros_like(image)
cv2.fillPoly(image_mask, polygons, 255)
return image_mask
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
canny_conversion = canny_edge(lanelines_image)
cv2.imshow('result', reg_of_interest(canny_conversion))
cv2.waitKey(0)
cv2.destroyAllWindows()
# Applying bitwise_and
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny_edge(image):
gray_conversion= cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
canny_conversion = cv2.Canny(blur_conversion, 50,150)
return canny_conversion
def reg_of_interest(image):
image_height = image.shape[0]
polygons = np.array([[(200, image_height), (1100, image_height), (551, 250)]])
image_mask = np.zeros_like(image)
cv2.fillPoly(image_mask, polygons, 255)
masking_image = cv2.bitwise_and(image,image_mask)
return masking_image
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
canny_conversion = canny_edge(lanelines_image)
cropped_image = reg_of_interest(canny_conversion)
cv2.imshow('result', cropped_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Applying the Hough transform
import cv2
import numpy as np
import matplotlib.pyplot as plt
def canny_egde(image):
gray_conversion= cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
canny_conversion = cv2.Canny(blur_conversion, 50,150)
return canny_conversion
def reg_of_interest(image):
image_height = image.shape[0]
polygons = np.array([[(200, image_height), (1100, image_height), (551, 250)]])
image_mask = np.zeros_like(image)
cv2.fillPoly(image_mask, polygons, 255)
masking_image = cv2.bitwise_and(image,image_mask)
return masking_image
def show_lines(image, lines):
lines_image = np.zeros_like(image)
if lines is not None:
for line in lines:
X1, Y1, X2, Y2 = line.reshape(4)
cv2.line(lines_image, (X1, Y1), (X2, Y2), (255,0,0), 10)
return lines_image
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
canny_conv = canny_edge(lanelines_image)
cropped_image = reg_of_interest(canny_conv)
lane_lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength= 40, maxLineGap=5)
linelines_image = show_lines(lanelines_image, lane_lines)
cv2.imshow('result', linelines_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Combining with actual image
image = cv2.imread('test_image.jpg')
lane_image = np.copy(image)
canny = canny_edge(lane_image)
cropped_image = reg_of_interest(canny)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength= 40, maxLineGap=5)
line_image = show_lines(lane_image, lines)
combine_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)
cv2.imshow('result', combine_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
### Detect road marking in images
```
#Optimization the detected road markings
import cv2
import numpy as np
import matplotlib.pyplot as plt
def make_coordinates(image, line_parameters):
slope, intercept = line_parameters
y1 = image.shape[0]
y2 = int(y1*(3/5))
x1 = int((y1- intercept)/slope)
x2 = int((y2 - intercept)/slope)
return np.array([x1, y1, x2, y2])
def average_slope_intercept(image, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameter = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameter[0]
intercept = parameter[1]
if slope < 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
left_fit_average =np.average(left_fit, axis=0)
right_fit_average = np.average(right_fit, axis =0)
left_line =make_coordinates(image, left_fit_average)
right_line = make_coordinates(image, right_fit_average)
return np.array([left_line, right_line])
def canny_edge(image):
gray_coversion= cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
canny_conversion = cv2.Canny(blur_conversion, 50,150)
return canny_conversion
def show_lines(image, lines):
lanelines_image = np.zeros_like(image)
if lines is not None:
for line in lines:
X1, Y1, X2, Y2 = line.reshape(4)
cv2.line(lanelines_image, (X1, Y1), (X2, Y2), (255,0,0), 10)
return lanelines_image
def reg_of_interest(image):
image_height = image.shape[0]
polygons = np.array([[(200, image_height), (1100, image_height), (551, 250)]])
image_mask = np.zeros_like(image)
cv2.fillPoly(image_mask, polygons, 255)
masking_image = cv2.bitwise_and(image,image_mask)
return masking_image
image = cv2.imread('test_image.jpg')
lanelines_image = np.copy(image)
canny_image = canny_edge(lanelines_image)
cropped_image = reg_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength= 40, maxLineGap=5)
averaged_lines = average_slope_intercept(lanelines_image, lines)
line_image = show_lines(lanelines_image, averaged_lines)
combine_image = cv2.addWeighted(lanelines_image, 0.8, line_image, 1, 1)
cv2.imshow('result', combine_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
### Detecting road markings in video
```
#Detecting road markings in video
import cv2
import numpy as np
import matplotlib.pyplot as plt
def make_coordinates(image, line_parameters):
try:
slope, intercept = line_parameters
except TypeError:
slope, intercept = 0.001,0
#slope, intercept = line_parameters
y1 = image.shape[0]
y2 = int(y1*(3/5))
x1 = int((y1- intercept)/slope)
x2 = int((y2 - intercept)/slope)
return np.array([x1, y1, x2, y2])
def average_slope_intercept(image, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameter = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameter[0]
intercept = parameter[1]
if slope < 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
left_fit_average =np.average(left_fit, axis=0)
right_fit_average = np.average(right_fit, axis =0)
left_line =make_coordinates(image, left_fit_average)
right_line = make_coordinates(image, right_fit_average)
return np.array([left_line, right_line])
def canny_edge(image):
gray_conversion= cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur_conversion = cv2.GaussianBlur(gray_conversion, (5,5),0)
canny_conversion = cv2.Canny(blur_conversion, 50,150)
return canny_conversion
def show_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
cv2.line(line_image, (x1, y1), (x2, y2), (255,0,0), 10)
return line_image
def reg_of_interest(image):
image_height = image.shape[0]
polygons = np.array([[(200, image_height), (1100, image_height), (550, 250)]])
image_mask = np.zeros_like(image)
cv2.fillPoly(image_mask, polygons, 255)
masking_image = cv2.bitwise_and(image,image_mask)
return masking_image
cap = cv2.VideoCapture("test2.mp4")
while(cap.isOpened()):
_, frame = cap.read()
canny_image = canny_edge(frame)
cropped_canny = reg_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_canny, 2, np.pi/180, 100, np.array([]), minLineLength=40,maxLineGap=5)
averaged_lines = average_slope_intercept(frame, lines)
line_image = show_lines(frame, averaged_lines)
combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
cv2.imshow("result", combo_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
```
| github_jupyter |
## Linear Regression
Linear regression may be a good model for some of the data but there is a good chance that it will not model the spatial data (X, Y) well. This would require something such as decision trees or a neural network. Regardless, we will see how the linear regression goes. First we will import the relevant libraries and data.
```
### importing libraries ###
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import cross_val_score
### importing data ###
# features
features_train = pd.read_csv('data/features_train.csv', index_col = 0)
features_test = pd.read_csv('data/features_test.csv', index_col = 0)
# target
target_train = pd.read_csv('data/target_train.csv', index_col = 0)
target_test = pd.read_csv('data/target_test.csv', index_col = 0)
```
Let's first check to see how a linear regression model performs without any tuning.
```
### linear regression function ###
def run_model():
model = LinearRegression().fit(features_train, target_train)
pred_train = model.predict(features_train)
pred_test = model.predict(features_test)
print('For the train set the r2 is:', r2_score(target_train, pred_train),
'and the RMSE is:', mean_squared_error(target_train, pred_train, squared = False))
print('For the test set the r2 is:', r2_score(target_test, pred_test),
'and the RMSE is:', mean_squared_error(target_test, pred_test, squared = False))
run_model()
```
The r2 and RMSE values are quite poor for the base dataset. Let's see if we can improve this somehow. First let's look at correlations.
```
### checking correlations ###
# correlation matrix function
def create_corr():
# creating correlation matrix
train_joined = features_train.join(target_train)
corr_mat = train_joined.corr()
# mask
mask = np.zeros_like(corr_mat, dtype = np.bool)
mask[np.triu_indices_from(mask)] = True
# plotting heat map
plt.figure(figsize=(30, 15))
sns.heatmap(corr_mat,cmap='coolwarm', center=0,annot=True, linewidths= 3, mask=mask)
plt.ylim(len(train_joined.columns),-0.5, -0.5)
plt.show()
create_corr()
```
We see that area does not have a strong correlation with any variable. We do see potential multicollinearity however with X and Y, DC and both DMC and is_summer, etc. We will deal with multicollinearity later but for now, let's see if we can improve correlations with area. First, we will visualize the relationships between the features and target to see if there is a nonlinear relationship.
```
### scatterplots ###
# subplots
fig, ax = plt.subplots(12, figsize = (10, 30))
# initial index
index = 0
# scatterplots
for column in features_test.columns:
ax[index].scatter(x = features_test[column], y = target_test)
ax[index].title.set_text(column)
index += 1
plt.tight_layout()
```
From what we can see, it appears that wind may have a quadratic relationship. Rain seems to have only 4 values that are different from the majority. We will transform wind data to try to better capture its relationship with area.
```
### accounting for quadratic relationship ###
features_train['wind'] = features_train['wind'].apply(lambda x: (x + 3) ** 0.5)
features_test['wind'] = features_test['wind'].apply(lambda x: (x + 3) ** 0.5)
create_corr()
```
We see that the correlation between wind and area went from 0.065 to 0.068 which is insignificant but better. Let's check to see how the model does (even though the difference is so small that it shouldn't make a noticeable difference).
```
### testing new model ###
run_model()
```
The model metrics are slightly worse but likely to be within error. Let's now account for multicollinearity by checking variational influence factors.
```
# variation inflation factor
# function for VIF
def check_vif():
vif = pd.Series([variance_inflation_factor(features_train.values, i)
for i in range(features_train.shape[1])],
index = features_train.columns)
print(vif)
check_vif()
```
The rule of thumb is that if the VIF of a feature is >5, it suffers from multicollinearity. However, all values are <5 so we should keep them with regards to multicollinearity. Now, in order to improve the metrics for the model, we will start dropping columns that are below a certain threshold of correlation. Initially, we will set the minimum correlation to 0.05.
```
### dropping columns ###
# columns we will keep
model_columns = ['X', 'is_weekend', 'FFMC', 'DMC', 'ISI', 'wind']
# saving old data just incase
features_train_save = features_train.copy()
features_test_save = features_test.copy()
# revamping
features_train = features_train.loc[:, model_columns]
features_test = features_test.loc[: , model_columns]
### checking new model ###
run_model()
```
The metrics have improved slightly. Let's increase the threshold again to 0.7 this time.
```
### checking correlations ###
create_corr()
### dropping columns ###
# columns we will keep
model_columns = ['X', 'is_weekend', 'FFMC']
# saving old data just incase
features_train_save = features_train.copy()
features_test_save = features_test.copy()
# revamping
features_train = features_train.loc[:, model_columns]
features_test = features_test.loc[: , model_columns]
### checking new model ###
run_model()
```
Dropping the columns did not improve the model. Our best model performance was the one that kept X, is_weekend, FFMC, DMC, ISI, and wind. We will revert to the previous model and perform a k fold cross validation on it to make sure our model is a an accurate fit.
```
### using best model
# recovering features
features_train = features_train_save
features_test = features_test_save
# creating model
model = LinearRegression().fit(features_train, target_train)
#5 fold cross validation
cross_val = cross_val_score(model, features_test, target_test, cv = 5)
print('The cross val mean is:', np.mean(cross_val), 'and variance is:', np.std(cross_val))
```
The variance is quite high compared to the accuracy. Either way the model having a negative r2 score means that it is worse than a horizontal line for predictions so will leave it here. We don't believe that we can significantly improve the linear regression model so we will not be checking assumptions such as homoscedasticity.
## Conclusion
Linear regression is not a good model for predicting forest fire area burnt. We will need to use a different model.
* r2 = -0.0239
* rmse = 1.0119
| github_jupyter |
# _MiSTree Tutorial 2_ - Minimum Spanning Trees
## (1) _Basic Usage_
To construct the minimum spanning tree (MST) from a data set we will usually
interact with the ``get_mst`` class. Unless you need to do something more sophisticated
with the MST you will not need to use the internal functions that are used by the class.
To initiate the class we will run:
```
from __future__ import print_function
import numpy as np
import matplotlib.pylab as plt
import mistree as mist
```
### (1.1) _Initialising_
#### _In 2D_
```
x = np.random.random_sample(1000)
y = np.random.random_sample(1000)
mst = mist.GetMST(x=x, y=y)
```
#### _In 3D_
```
x = np.random.random_sample(1000)
y = np.random.random_sample(1000)
z = np.random.random_sample(1000)
mst = mist.GetMST(x=x, y=y, z=z)
```
#### _In Tomographic Coordinates_
We generate a uniform random distribution on the sphere.
```
phi = 360.*np.random.random_sample(1000)
theta = np.rad2deg(np.arccos(1.-2.*np.random.random_sample(1000)))
mst = mist.GetMST(phi=phi, theta=theta)
```
#### _In Tomographic Celestial Coordinates_
Once again using a uniform random distribution on the sphere.
```
ra = 360.*np.random.random_sample(1000)
dec = np.rad2deg(np.arccos(1.-2.*np.random.random_sample(1000))) - 90.
mst = mist.GetMST(ra=ra, dec=dec)
```
#### _In Spherical Polar Coordinates_
This generates a uniform distribution of points with a sphere of radius 10.
```
phi = 360.*np.random.random_sample(1000)
theta = np.rad2deg(np.arccos(1.-2.*np.random.random_sample(1000)))
r = 10.*(np.random.random_sample(1000))**(1./3.)
mst = mist.GetMST(phi=phi, theta=theta, r=r)
```
#### _In Spherical Celestial Coordinates_
This generates a uniform distribution of points with a sphere of radius 10.
```
ra = 360.*np.random.random_sample(1000)
dec = np.rad2deg(np.arccos(1.-2.*np.random.random_sample(1000))) - 90.
r = 10.*np.random.random_sample(1000)**(1./3.)
mst = mist.GetMST(ra=ra, dec=dec, r=r)
```
### (1.2) _Measure MST statistics_
And to construct the MST and output the MST statistics: degree (d), edge length (l),
branch length (b) and branch shape (s):
```
x = np.random.random_sample(1000)
y = np.random.random_sample(1000)
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats()
```
If you would also like the edge (``l_index``) and branch index (``b_index``),
this can be done in two ways:
```
d, l, b, s, l_index, b_index = mst.get_stats(include_index=True)
# alternatively:
d, l, b, s = mst.get_stats()
l_index = mst.edge_index
b_index = mst.branch_index
```
The edge index (``l_index``) is a 2 dimensional array, indicating the pair of nodes
that make up each edge. The branch index are list of the member edges in each branches.
### (1.3) _Plotting the MST_
#### _Plotting Edges_
To plot the MST, i.e. the nodes and edges you can use the following piece of python code
where we plot a set of 2D random points:
```
x = np.random.random_sample(100)
y = np.random.random_sample(100)
mst = mist.GetMST(x=x, y=y)
d, l, b, s, l_index, b_index = mst.get_stats(include_index=True)
plt.figure(figsize=(7., 7.))
# plotting nodes:
plt.scatter(x, y, s=10, color='r')
# plotting MST edges:
plt.plot([x[l_index[0]], x[l_index[1]]],
[y[l_index[0]], y[l_index[1]]],
color='k')
plt.xlim(0., 1.)
plt.ylim(0., 1.)
plt.xlabel(r'$X$', size=16)
plt.ylabel(r'$Y$', size=16)
plt.tight_layout()
plt.show()
```
#### _Plotting Branches_
If you would also like to plot branches then you can use the following piece of python code:
```
plt.figure(figsize=(7., 7.))
# plotting nodes:
plt.scatter(x, y, s=10, color='r')
# plotting branches:
for i in range(0, len(b_index)):
plt.plot([x[l_index[0][b_index[i][0]]], x[l_index[1][b_index[i][0]]]],
[y[l_index[0][b_index[i][0]]], y[l_index[1][b_index[i][0]]]],
color='C0', linestyle=':')
plt.plot([x[l_index[0][b_index[i][1:-1]]], x[l_index[1][b_index[i][1:-1]]]],
[y[l_index[0][b_index[i][1:-1]]], y[l_index[1][b_index[i][1:-1]]]],
color='C0')
plt.plot([x[l_index[0][b_index[i][-1]]], x[l_index[1][b_index[i][-1]]]],
[y[l_index[0][b_index[i][-1]]], y[l_index[1][b_index[i][-1]]]],
color='C0', linestyle=':')
# ploting MST edges:
plt.plot([x[l_index[0]], x[l_index[1]]],
[y[l_index[0]], y[l_index[1]]],
color='grey', linewidth=2, alpha=0.25)
plt.plot([], [], color='C0', label=r'$Branch$ $Mid$')
plt.plot([], [], color='C0', label=r'$Branch$ $End$', linestyle=':')
plt.plot([], [], color='grey', alpha=0.25, label=r'$MST$ $Edges$')
plt.xlim(0., 1.)
plt.ylim(0., 1.)
plt.xlabel(r'$X$', size=16)
plt.ylabel(r'$Y$', size=16)
plt.legend(loc='best')
plt.tight_layout()
plt.show()
```
## (2) _Binning and Plotting_
### (2.1) _Quick Bin and Plot_
A very simple plot of the MST summary statistics can be generated using:
```
x = np.random.random_sample(1000)
y = np.random.random_sample(1000)
z = np.random.random_sample(1000)
mst = mist.GetMST(x=x, y=y, z=z)
d, l, b, s = mst.get_stats()
# begins by binning the data and storing this in a dictionary.
hmst = mist.HistMST()
hmst.setup()
mst_dict = hmst.get_hist(d, l, b, s)
# plotting which takes as input the dictionary created before.
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot()
```
The first ``HistMST`` class bins the data and stores it as a dictionary and the
``PlotHistMST`` class is used make the plot.
### (2.1) _Binning_
Once we have created the data set we need to bin the data. This is done by first
initialising the ``HistMST`` class and then setting it up. The most simple case
(using the default settings) is shown below.
```
hmst = mist.HistMST()
hmst.setup()
```
We can make the following changes:
```
# to bin in log_10(l) and log_10(b) rather than just l and b:
hmst.setup(uselog=True)
# to bin using s rather than sqrt(1-s)
hmst.setup(use_sqrt_s=False)
# to output the unnormalised histograms (i.e. just counts)
hmst.setup(usenorm=False)
# to change the range of the binning, the number of bins, etc:
# for the degree, although this is rarely necessary, please ensure the minimum
# and maximum are half integers and the number of bins is equal to maximum-minimum.
hmst.setup(d_min=0.5, d_max=6.5, num_d_bins=6) # these are the default values.
# for edge lengths, note the default values are l_min=0., l_max=1.05*l.max()
# and ``num_l_bins=100``.
hmst.setup(l_min=0., l_max=10., num_l_bins=100)
# for edge lengths, note the default value are b_min=0. and b_max=1.05*b.max()
# and ``num_b_bins=100``.
hmst.setup(b_min=0., b_max=10., num_b_bins=100)
# for branch shape in either projections the range can be altered by changing the following,
# however it will rarely be necessary to change from the default values of s_min=0 and s_max=1.,
# but you may want to alter the binning (default is 50).
hmst.setup(s_min=0., s_max=1., num_s_bins=50)
# if you are instead using $log_{10}l$ and $log_{10}b$ then you would specify the range
# by using the following but note the binning still uses num_l_bins and num_b_bins.
hmst.setup(logl_min=-2., logl_max=4., logb_min=-1, logb_max=5.)
```
Once this is done we can actually pass the MST statistics to the class and create a dictionary
of the binned statistics:
```
hmst = mist.HistMST()
hmst.setup(uselog=True)
mst_dict = hmst.get_hist(d, l, b, s)
```
The dictionary created is stored with the following entries:
```
print(mst_dict.keys())
```
- ``uselog`` : stores whether the bins for l and b are in logs.
- ``use_sqrt_s`` : stores whether the the bins for s are in s or sqrt(1-s)
- ``usenorm`` : stores whether the histograms are normalised.
- ``isgroup`` : stores whether the histogram come from a group of histograms (discussed later)
- ``x_d`` : bin centres for degree
- ``y_d`` : bin heights for degree
- ``x_l`` : bin centres for edge length
- ``y_l`` : bin heights for edge length
- ``x_b`` : bin centres for branch length
- ``y_b`` : bin heights for branch length
- ``x_s`` : bin centres for branch shape
- ``y_s`` : bin heights for branch shape
Finally, if we want to instead store the ensemble mean and standard deviation of a group of MSTs we would
add the individual MST to ``HistMST`` class in the following way:
```
hmst = mist.HistMST()
hmst.setup(uselog=True)
hmst.start_group() # this tells HistMST to store the individual binned MST statistics
for i in range(0, 10):
# Read or measure MST statistics, we will use the default levy flight distribution here
x, y, z = mist.get_levy_flight(50000)
mst = mist.GetMST(x=x, y=y, z=z)
d, l, b, s = mst.get_stats()
# we use it just as we did before, where the outputted dictionary is for that single
# realisation
mst_dict = hmst.get_hist(d, l, b, s)
print(i+1, '/ 10')
# to output the mean and standard deviation of the ensemble histograms.
mst_dict_group = hmst.end_group()
# you must use hmst.start_group() to start collecting MST statistics from another group
# otherwise this will continue collecting histograms for the current group
```
Similarly to before the dictionary contains many of the same elements with some additional ones.
```
print(mst_dict_group.keys())
```
- ``y_d_std`` : standard deviation for the bin heights for degree
- ``y_l_std`` : standard deviation for the bin heights for edge length
- ``y_b_std`` : standard deviation for the bin heights for branch length
- ``y_s_std`` : standard deviation for the bin heights for branch shape
This makes the assumption that the counts follow a Gaussian distribution, since these
are counts this actually follows a discrete Poisson distribution but for large values
a Gaussian is an appropriate approximation (usually greater than 50 should be okay).
This is important to consider if you use these summary statistics in regions where
the counts are small.
### (2.2) _Plotting_
You can choose to use this as well or use the default matplotlib fonts.
Once we have the binned MST dictionary we can plot it very simply using ``PlotHistMST`` class:
```
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot()
```
To alter how the plot looks we can alter the following:
```
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict, color='Dodgerblue', linewidth=2., linestyle='--', alpha=0.8,
label='Levy Flight')
pmst.plot()
```
To change from the default box binned plots to smooth lines (excluding degree):
```
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(usebox=False)
```
Comparing randoms points, a Levy-Flight distribution and adjusted Levy-Flight distribution:
```
# We first create a random distribution
x_r = 75.*np.random.random_sample(50000)
y_r = 75.*np.random.random_sample(50000)
z_r = 75.*np.random.random_sample(50000)
# a levy flight distribution
x_lf, y_lf, z_lf = mist.get_levy_flight(50000)
# an adjusted levy flight distribution
x_alf, y_alf, z_alf = mist.get_adjusted_levy_flight(50000)
# then construct and measure the MST for each distribution
mst = mist.GetMST(x=x_r, y=y_r, z=z_r)
d_r, l_r, b_r, s_r = mst.get_stats()
mst = mist.GetMST(x=x_lf, y=y_lf, z=z_lf)
d_lf, l_lf, b_lf, s_lf = mst.get_stats()
mst = mist.GetMST(x=x_alf, y=y_alf, z=z_alf)
d_alf, l_alf, b_alf, s_alf = mst.get_stats()
# bin the MST statistics
hmst = mist.HistMST()
hmst.setup(uselog=True)
hist_alf = hmst.get_hist(d_alf, l_alf, b_alf, s_alf)
hist_lf = hmst.get_hist(d_lf, l_lf, b_lf, s_lf)
hist_r = hmst.get_hist(d_r, l_r, b_r, s_r)
# and plot it
pmst = mist.PlotHistMST()
pmst.read_mst(hist_r, label='Randoms')
pmst.read_mst(hist_lf, label='Levy Flight')
pmst.read_mst(hist_alf, label='Adjusted Levy Flight')
pmst.plot()
```
We can plot difference subplots:
```
pmst = mist.PlotHistMST()
pmst.read_mst(hist_lf, label='Levy Flight')
pmst.read_mst(hist_alf, label='Adjusted Levy Flight')
pmst.plot(usecomp=True)
```
Finally plotting the histogram for a group works in the very same way except we
pass the dictionary of a group. The final plot has 1 sigma error bars.
```
hmst = mist.HistMST()
hmst.setup(uselog=True)
hist_lf = hmst.get_hist(d_lf, l_lf, b_lf, s_lf)
hmst.start_group()
for i in range(0, 10):
x_alf, y_alf, z_alf = mist.get_adjusted_levy_flight(50000)
mst = mist.GetMST(x=x_alf, y=y_alf, z=z_alf)
d_alf, l_alf, b_alf, s_alf = mst.get_stats()
_hist_alf = hmst.get_hist(d_alf, l_alf, b_alf, s_alf)
print(i+1, '/ 10')
hist_alf_group = hmst.end_group()
pmst = mist.PlotHistMST()
pmst.read_mst(hist_lf, label='Levy Flight')
pmst.read_mst(hist_alf_group, label='Adjusted Levy Flight')
pmst.plot(usecomp=True)
```
## (3) _Advanced Usage_
### (3.1) _k Nearest Neighbours_
The k-nearest neighbour graph is a spanning graph which is passed on to the
``scipy`` kruskal algorithm. The actual graph is constructed using the ``scikit-learn``
``kneighbors_graph`` and by default will include the nearest 20 neighbours to
each node. We can specify the number of nearest neighbours (we will set this to 30)
in the following way:
```
x = np.random.random_sample(1000)
y = np.random.random_sample(1000)
mst = mist.GetMST(x=x, y=y) # Assuming our input data set is 2D.
mst.define_k_neighbours(30)
d, l, b, s = mst.get_stats()
# or directly:
mst = mist.GetMST(x=x, y=y) # Assuming our input data set is 2D.
d, l, b, s = mst.get_stats(k_neighbours=30)
```
Note: changing ``k`` to larger values will result in longer computation time to construct
the MST.
### (3.2) _Scale Cuts_
In cosmological data sets we often need to remove small scales due to numerical
simulation or observational limitations. To remove this we carry out
the following:
```
x = np.random.random_sample(100000)
y = np.random.random_sample(100000)
mst = mist.GetMST(x=x, y=y)
mst.scale_cut(0.002)
d, l, b, s = mst.get_stats()
# or directly:
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats(scale_cut_length=0.002)
```
| github_jupyter |
<h1 style="font-size:35px;
color:black;
">Lab 4 Iterative Phase Estimation Algorithm</h1>
Prerequisite
- [Ch.3.5 Quantum Fourier Transform](https://qiskit.org/textbook/ch-algorithms/quantum-fourier-transform.html)
- [Ch.3.6 Quantum Phase Estimation](https://qiskit.org/textbook/ch-algorithms/quantum-phase-estimation.html)
- [Ch.1.4 Single Qubit Gates](https://qiskit.org/textbook/ch-states/single-qubit-gates.html)
- [Summary of Quantum Operations](https://qiskit.org/documentation/tutorials/circuits/3_summary_of_quantum_operations.html)
Other relevant materials
- [Device backend noise model simulations](https://qiskit.org/documentation/tutorials/simulators/2_device_noise_simulation.html)
- [Hellinger fidelity](https://qiskit.org/documentation/stubs/qiskit.quantum_info.hellinger_fidelity.html#qiskit.quantum_info.hellinger_fidelity)
```
from qiskit import *
import numpy as np
from qiskit.visualization import plot_histogram
import qiskit.tools.jupyter
import matplotlib.pyplot as plt
sim = Aer.get_backend('qasm_simulator')
```
<h2 style="font-size:24px;">Part 1: Implementation of Iterative Phase Estimation algorithm</h2>
<br>
<div style="background: #E8E7EB; border-radius: 5px;
-moz-border-radius: 5px;">
<p style="background: #800080;
border-radius: 5px 5px 0px 0px;
padding: 10px 0px 10px 10px;
font-size:18px;
color:white;
"><b>Goal</b></p>
<p style=" padding: 0px 0px 10px 10px;
font-size:16px;">Estimate a phase value on a system of two qubits through Iterative Phase Estimation (IPE) algorithm.</p>
</div>
Having gone through previous labs, you should have noticed that the "length" of a quantum circuit is the primary factor when determining the magnitude of the errors in the resulting output distribution; quantum circuits with greater depth have decreased fidelity. Therefore, implementing algorithms based on shallow depth circuits is of the great importance in near-term quantum computing. In Lab 4, we learn one such algorithm for estimating quantum phase called the Iterative Phase Estimation (IPE) algorithm which requires a system comprised of only a single ancillary qubit and evaluate the phase through a repetative process.
<h3 style="font-size: 20px">1. Understand a circuit with non-unitary operations.</h3>
Before we learn how the IPE algorithm works, lets review reset and conditional operations in Qiskit that go into building a IPE circuit. Read the Qiskit tutorial [here](https://qiskit.org/documentation/tutorials/circuits/3_summary_of_quantum_operations.html) ( go to `Non-unitary operations` section ) to understand how to build a circuit that performs conditional operations and reset.
<h4 style="font-size: 17px">📓Step A. Run the following cell and predict the outcome of the circuit. </h4>
```
q = QuantumRegister(1)
c = ClassicalRegister(2)
qc0 = QuantumCircuit(q, c)
qc0.h(q[0])
qc0.measure(q[0], c[0])
qc0.reset(q[0])
qc0.h(q[0])
qc0.p(np.pi/3, q[0]).c_if(c,1)
qc0.h(q[0])
qc0.measure(q[0],c[1])
qc0.draw()
```
Execute the cell below to see the result.
```
count0 = execute(qc0, sim).result().get_counts()
plot_histogram(count0)
```
<h4 style="font-size: 17px">📓Step B. Complete the rest of the circuit so that the ancillar qubit ( top qubit ) after the reset would be in the state $\frac{1}{\sqrt2}(|0\rangle + e^{-i\frac{\pi}{2}}|1\rangle)$ if the value of the classical bit is one or remains zero state otherwise.</h4>
```
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc1 = QuantumCircuit(q,c)
qc1.h(q[0])
qc1.x(q[1])
qc1.cp(np.pi/5, q[0], q[1])
qc1.measure(q[0], c[0])
qc1.reset(q[0])
###### your code goes here #####
##########################
qc1.h(q[0])
qc1.measure(q[0],c[1])
qc1.draw()
```
Running the following cell to display the result.
```
count1 = execute(qc1, sim).result().get_counts()
plot_histogram(count1)
```
<h3 style="font-size: 20px">2. Iterative Phase Estimation (IPE) Algorithm.</h3>
The Quantum Phase Estimation (QPE) circuit that we have learned and used previously is limited by the number of qubits necessary for the algorithm’s precision. Every additional qubit has added costs in terms of noise and hardware requirements; noisy results that we obtained from executing the QPE circuit on a real quantum device in Lab 3 would get worse as the number of the qubits on the circuit increases. The IPE algorithm implements quantum phase estimation with only a single ancillary qubit, and the accuracy of the algorithm is restricted by the number of iterations rather than the number of counting qubits. Therefore, IPE circuits are of practical interest and are of foremost importance for near-term quantum computing as QPE is an essential element in many quantum algorithms.
Consider the problem of finding $\varphi$ given $|\Psi\rangle$ and $U$ such that $U |\Psi\rangle = e^{i \phi} | \Psi \rangle$, with $\phi = 2 \pi \varphi$. Let's assume for now that $\varphi$ can be written as $\varphi = \varphi_1/2 + \varphi_2/4 + ... + \varphi_m/2^m = 0.\varphi_1 \varphi_2 ... \varphi_m$, where we have previously defined the notation $0.\varphi_1 \varphi_2 ... \varphi_m$.
Assume that $U$ is a unitary operator acting on one qubit. We therefore need a system of two qubits, $q_0$ and $q_1$, where $q_0$ is ancillar qubit and the qubit $q_1$ represents the physical system on which $U$ operates. Having them initialized as $q_0 \rightarrow |+\rangle$ and $q_1 \rightarrow |\Psi \rangle$, application of control-U between $q_0$ and $q_1$ $2^t$ times would change the state of $q_0$ to $|0\rangle + e^{i 2 \pi 2^{t} \varphi} | 1 \rangle$. That is, the phase of $U$ has been kicked back into $q_0$ as many times as the control operation has been performed.
Therefore,
for $t=0$, the phase encoded into $q_0$ would be $e^{i 2 \pi 2^{0} \varphi} = e^{i 2 \pi \varphi} = e^{i 2 \pi 0.\varphi_1 \varphi_2 ... \varphi_m}$
and
for $t=1$, the phase would be $e^{i 2 \pi 2^{1} \varphi} = e^{i 2 \pi \varphi_1} e^{i 2 \pi 0.\varphi_2 \varphi_3 ... \varphi_m}$ and
for $t=2$, $e^{i 2 \pi 2^{2} \varphi} = e^{i 2 \pi 2 \varphi_1} e^{i 2 \pi \varphi_2} e^{i 2 \pi 0.\varphi_3 \varphi_4 ... \varphi_m}$
and
for $t=m-1$, $e^{i 2 \pi 2^{m-1} \varphi} = e^{i 2 \pi 2^{m-2} \varphi_1} e^{i 2 \pi 2^{m-3} \varphi_2} ... e^{i 2 \pi 2^{-1} \varphi_m} = e^{i 2 \pi 0.\varphi_m}$.
Note that for the last case with $t=m-1$, the state of $q_0$ is $|0\rangle + e^{i 2 \pi 0.\varphi_m}|1\rangle$; $|+\rangle$ if $\varphi_m = 0$ and $|-\rangle$ if $\varphi_m = 1$ which would produce outcomes $|0\rangle$ and $|1\rangle$ respectively when it gets measured in x-basis.
In `the first step` of the IPE algorithm, we directly measure the least significant bit of the phase $\varphi$, $\varphi_m$, by initializing the 2-qubit registers as described above ( $q_0 \rightarrow |+\rangle$ and $q_1 \rightarrow |\Psi \rangle$ ), performing $2^{m-1}$ control-$U$ operations between the qubits, and measuring $q_0$ in the x-basis.
For `the second step`, we initialize the systems in the same way and apply $2^{m-2}$ control-$U$ operations. The relative phase in $q_0$ after these operations is now $e^{i 2 \pi 0.\varphi_{m-1}\varphi_{m}}= e^{i 2 \pi 0.\varphi_{m-1}} e^{i 2 \pi \varphi_m/4}$.
To extract the phase bit $\varphi_{m-1}$, first perform a phase correction of $\varphi_m /2$, a rotation around the $Z-$axis of angle $-\varphi_m /4$, which results in the state of $q_0$ to be $|0\rangle + e^{i 2 \pi 0.\varphi_{m-1}} | 1 \rangle$. Perform a measurement on $q_0$ in x-basis to obtain the phase bit $\varphi_{m-1}$.
Therefore, the $k$th step of the IPE, getting $\varphi_{m-k+1}$, consists of the register initialization ($q_0$ in $|+\rangle$, $q_1$ in $|\Psi\rangle$), the application of control-$U$ $2^{m-k}$ times, a rotation around $Z$ of angle $\omega_k = -2 \pi 0.0\varphi_{k+1} ... \varphi_m$, and a measurement of $q_0$ in x-basis: a Hadamard transform to $q_0$, and a measurement of $q_0$ in the standard basis. Note that $q_1$ remains in the state $|\Psi\rangle$ throughout the algorithm.
<h3 style="font-size: 20px">3. Estimate the phase of the $T$-gate implementing IPE algorithm.</h3>
Review the `section 2. Example: T-gate` in [Ch.3.8 Quantum Phase Estimation](https://qiskit.org/textbook/ch-algorithms/quantum-phase-estimation.html) and the `section 4. Measuring in Different Bases` in [Ch.1.4 Single Qubit Gates](https://qiskit.org/textbook/ch-states/single-qubit-gates.html)
As we already learned the Qiskit textbook, the phase of a T-gate is exactly expressed using three bits.
<h4 style="font-size: 17px">📓Step A. Obtain the least significant phase bit of the $T$-gate by setting up the circuit <code>T_x3</code> properly and assign the value to the variable <code>x_3</code>.</h4>
In the previous section, `the first step` explains how to construct the circuit to extract the least significant phase bit.
```
q = QuantumRegister(2)
c = ClassicalRegister(1)
T_x3 = QuantumCircuit(q,c)
########## your code goes here #######
##1 Initialization
##2 Apply control-U operator as many times as needed to get the least significant phase bit
##3 measure the anscillar qubit in x-basis
########## Simulate the circuit and assign the output value to the variable 'x_3'
job = execute(T_x3, sim, shots=1, memory=True)
x_3 = int(job.result().get_memory()[0])
```
<h4 style="font-size: 17px">📓Step B. Extract the middle phase bit of the $T$-gate by creating the circuit <code>T_x2</code> with phase correction using <code>x_3</code> value from Step A. Assign the outcome bit to the variable <code>x_2</code>.</h4>
Read the `the second step` in the previous section.
```
q = QuantumRegister(2)
c = ClassicalRegister(1)
T_x2 = QuantumCircuit(q,c)
########### your code goes here ##########
##1 Initialization
##2 phase correction
##3 Apply control-U operator as many times as needed
##4 measure the anscillar qubit in x-basis
######## Simulate the circuit and assign the output value to the variable 'x_2'
job = execute(T_x2, sim, shots=1, memory=True)
x_2 = int(job.result().get_memory()[0])
```
<h4 style="font-size: 17px">📓Step C. Find the most significant phase bit of the $T$-gate and assign it to the variable <code>x_1</code>. </h4>
```
q = QuantumRegister(2)
c = ClassicalRegister(1)
T_x1 = QuantumCircuit(q,c)
########### your code goes here #########
##1 Initialization
##2 phase correction
##3 Apply control-U operator as many times as needed to get the least significant phase bit
##4 measure the anscillar qubit in x-basis
########## Simulate the circuit and assign the output value to the variable 'x_2'
job = execute(T_x1, sim, shots=1, memory=True)
x_1 = int(job.result().get_memory()[0])
```
Therefore, the $T$-gate phase bit that you found is 0.x_1x_2x_3. Run the following cell to check if your answer is correct by comparing your phase bit `x_1x_2x_3` with `001`, the answer in the Qiskit textbook, which corresponds to $\frac{1}{8}$ ( = `0.125`), the phase of the $T$-gate.
```
T_phase_bits = '{}{}{}'.format(x_1, x_2, x_3)
T_phase_bits == '001'
```
<h4 style="font-size: 17px">📓Step D. Construct the full IPE circuit and pass it to the variable <code>qc_T</code> ; Put the all steps that you performed into one circuit utilizing conditional operations and reset. </h4>
Instead of using three seperate circuits to extract each phase bit value, build one circuit; perform a reset operation on the ancillar qubit after each bit gets measured into a classical register. Therefore, the circuit requires three classical registers for this example; the least significant bit measured into the classical register, c[0] and the most significant bit measured into c[2]. Implement conditional operator between the ancillar qubit and the classical register for the phase correction.
```
##### your code goes here ######
################
qc_T.draw()
```
<h4 style="font-size: 17px">Step E. Excute the following cell to perform the simulation and display the result.</h4>
```
count0 = execute(qc_T, sim).result().get_counts()
key_new = [str(int(key,2)/2**n) for key in list(count0.keys())]
count1 = dict(zip(key_new, count0.values()))
fig, ax = plt.subplots(1,2)
plot_histogram(count0, ax=ax[0])
plot_histogram(count1, ax=ax[1])
plt.tight_layout()
```
<h2 style="font-size:24px;">Part 2: Comparison between QPE and IPE results in the presence of noise</h2>
<br>
<div style="background: #E8E7EB; border-radius: 5px;
-moz-border-radius: 5px;">
<p style="background: #800080;
border-radius: 5px 5px 0px 0px;
padding: 10px 0px 10px 10px;
font-size:18px;
color:white;
"><b>Goal</b></p>
<p style=" padding: 0px 0px 10px 10px;
font-size:16px;">Understand the importance of implementing shallow circuit algorithms on current noisy quantum computers. </p>
</div>
In Part 2 of Lab 3, we executed a Quantum Phase Estimation (QPE) circuit on a real quantum device. Having recognized the limits of the performance due to noise that presents in current quantum system, we utilized several techniques to reduce its influence on the outcome. However, the final result that was obtained, even after all these procedures, is still far from ideal. Here, we implement Iterative Phase Estimation (IPE) algorithm to overcome the effect of noise in phase estimation to a great extent and compare the result with the QPE outcome.
To investigate the impact of the noise from real quantum system on the outcome, we will perform noisy simulations of IPE circuit employing the Qiskit Aer `noise` module which produces a simplified noise model for an IBM quantum system. To learn more about noisy simulation, read [here](https://qiskit.org/documentation/tutorials/simulators/2_device_noise_simulation.html).
As in Lab 3, we consider to estimate the phase of `p` gate with $\theta = \frac{1}{3}$. Suppose that the accuracy of the estimation that we desire here is same as when the QPE circuit has four counting qubits, which determines the number of iteration and classical registers required for the IPE circuit.
<h4 style="font-size: 17px">📓Step A. How many classical registers is needed? Assign the value to the variable <code>n</code>. </h4>
```
## your answer goes here
n =
```
<h4 style="font-size: 17px">📓Step B. Construct the IPE circuit in the following cell. </h4>
```
q = QuantumRegister(2)
c = ClassicalRegister(n)
IPE = QuantumCircuit(q,c)
########## your code goes here ############
#####################
IPE.draw()
```
<h4 style="font-size: 17px">Step C. Run the cell below to create the QPE circuit for the comparison. </h4>
```
def qft(n):
"""Creates an n-qubit QFT circuit"""
circuit = QuantumCircuit(n)
def swap_registers(circuit, n):
for qubit in range(n//2):
circuit.swap(qubit, n-qubit-1)
return circuit
def qft_rotations(circuit, n):
"""Performs qft on the first n qubits in circuit (without swaps)"""
if n == 0:
return circuit
n -= 1
circuit.h(n)
for qubit in range(n):
circuit.cp(np.pi/2**(n-qubit), qubit, n)
qft_rotations(circuit, n)
qft_rotations(circuit, n)
swap_registers(circuit, n)
return circuit
# define the parameters
t, psi = 4, 1/3*np.pi*2
# building a circuit
QPE = QuantumCircuit(t+1,t)
QPE.h(range(t))
QPE.x(t)
for idx in range(t):
QPE.cp(psi*2**idx, idx, t)
qft_dag = qft(t).to_gate().inverse()
qft_dag.label = 'QFT+'
QPE.append(qft_dag, range(t))
QPE.measure(range(t), range(t))
QPE.draw()
```
<h4 style="font-size: 17px">📓Step D. Transpile the circuits for the backend <code>ibmq_Athens</code>. </h4>
Run the following cell to check the properties of the backend, `ibmq_Athens`. Pick an initial_layout, and transpile the IPE circuit setting `optimization_level` = 3, and save the transpiled circuit to the variable `IPE_trans`. Print out the depth of the transpiled circuit.
```
from qiskit.test.mock import FakeAthens
import qiskit.tools.jupyter
backend = FakeAthens()
backend
######## your code to transpile IPE circuit goes here ########
#####################
print(IPE_trans.depth())
```
Execute the cell below to transpile QPE circuit.
```
num = 500
QPE_trans = transpile([QPE]*num, backend, optimization_level=3)
QPE_trans_depth = np.array([QPE_trans[idx].depth() for idx in range(num)])
print(min(QPE_trans_depth), max(QPE_trans_depth))
best_arg = np.argmin(QPE_trans_depth)
QPE_trans_best = QPE_trans[best_arg]
```
<h4 style="font-size: 17px">Step E. Run the following cells to perform the noise simulation of the transipiled circuits. </h4>
```
from qiskit.providers.aer.noise import NoiseModel
noise_model = NoiseModel.from_backend(backend)
shots = 20000
counts = execute([IPE_trans, QPE_trans_best], sim, noise_model=noise_model).result().get_counts()
```
<h4 style="font-size: 17px">Step F. Execute the cell below to compute the exact phase estimation results. </h4>
```
from qiskit.quantum_info import Statevector
QPE_exact = QuantumCircuit(t+1)
QPE_exact.h(range(t))
QPE_exact.x(t)
for idx in range(t):
QPE_exact.cp(psi*2**idx, idx, t)
qft_dag = qft(t).to_gate().inverse()
qft_dag.label = 'QFT+'
QPE_exact.append(qft_dag, range(t))
#QPE_exact.draw('mpl')
state = Statevector.from_instruction(QPE_exact)
pmf = state.probabilities_dict(range(4))
```
<h4 style="font-size: 17px">Step G. Show the comparison figure by running the following cell.</h4>
```
def count_new(count):
phi_est = np.array([round(int(key, 2)/2**t, 3) for key in list(count.keys())])
key_new = list(map(str, phi_est))
count_new = dict(zip(key_new, count.values()))
return count_new
pmf_new = count_new(pmf)
count_IPE = count_new(counts[0])
count_QPE = count_new(counts[1])
fig, ax = plt.subplots(1, 2, figsize=(32,10))
fig.suptitle('QPE .vs. IPE for estimating $\\theta=1/3$', fontsize=23)
plot_histogram([pmf_new, count_QPE], ax=ax[0], legend=['No_noise', 'Athens_sim'])
plot_histogram([pmf_new, count_IPE], ax=ax[1], legend=['No_noise', 'Athens_sim'])
ax[0].set_title('QPE circuit result', fontsize=16)
ax[0].set_xlabel('$\\theta$', fontsize=16)
ax[1].set_title('IPE circuit result', fontsize=16)
ax[1].set_xlabel('$\\theta$', fontsize=16)
plt.show()
```
If you create the IPE circuit successfully to estimate the phase, $\theta = \frac{1}{3}$, you would get the similar plots as shown below.

<h4 style="font-size: 17px">📓Step G. Discuss about the results. </h4>
```
```
| github_jupyter |
# Quantizing RNN Models
In this example, we show how to quantize recurrent models.
Using a pretrained model `model.RNNModel`, we convert the built-in pytorch implementation of LSTM to our own, modular implementation.
The pretrained model was generated with:
```time python3 main.py --cuda --emsize 1500 --nhid 1500 --dropout 0.65 --tied --wd=1e-6```
The reason we replace the LSTM that is because the inner operations in the pytorch implementation are not accessible to us, but we still want to quantize these operations. <br />
Afterwards we can try different techniques to quantize the whole model.
_NOTE_: We use `tqdm` to plot progress bars, since it's not in `requirements.txt` you should install it using
`pip install tqdm`.
```
from model import DistillerRNNModel, RNNModel
from data import Corpus
import torch
from torch import nn
import distiller
from distiller.modules import DistillerLSTM as LSTM
from tqdm import tqdm # for pretty progress bar
import numpy as np
from copy import deepcopy
```
### Preprocess the data
```
corpus = Corpus('./data/wikitext-2/')
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
device = 'cuda:0'
batch_size = 20
eval_batch_size = 10
train_data = batchify(corpus.train, batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
```
### Loading the model and converting to our own implementation
```
rnn_model = torch.load('./checkpoint.pth.tar.best')
rnn_model = rnn_model.to(device)
rnn_model
```
Here we convert the pytorch LSTM implementation to our own, by calling `LSTM.from_pytorch_impl`:
```
def manual_model(pytorch_model_: RNNModel):
nlayers, ninp, nhid, ntoken, tie_weights = \
pytorch_model_.nlayers, \
pytorch_model_.ninp, \
pytorch_model_.nhid, \
pytorch_model_.ntoken, \
pytorch_model_.tie_weights
model = DistillerRNNModel(nlayers=nlayers, ninp=ninp, nhid=nhid, ntoken=ntoken, tie_weights=tie_weights).to(device)
model.eval()
model.encoder.weight = nn.Parameter(pytorch_model_.encoder.weight.clone().detach())
model.decoder.weight = nn.Parameter(pytorch_model_.decoder.weight.clone().detach())
model.decoder.bias = nn.Parameter(pytorch_model_.decoder.bias.clone().detach())
model.rnn = LSTM.from_pytorch_impl(pytorch_model_.rnn)
return model
man_model = manual_model(rnn_model)
torch.save(man_model, 'manual.checkpoint.pth.tar')
man_model
```
### Batching the data for evaluation
```
sequence_len = 35
def get_batch(source, i):
seq_len = min(sequence_len, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
hidden = rnn_model.init_hidden(eval_batch_size)
data, targets = get_batch(test_data, 0)
```
### Check that the convertion has succeeded
```
rnn_model.eval()
man_model.eval()
y_t, h_t = rnn_model(data, hidden)
y_p, h_p = man_model(data, hidden)
print("Max error in y: %f" % (y_t-y_p).abs().max().item())
```
### Defining the evaluation
```
criterion = nn.CrossEntropyLoss()
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def evaluate(model, data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
with tqdm(range(0, data_source.size(0), sequence_len)) as t:
# The line below was fixed as per: https://github.com/pytorch/examples/issues/214
for i in t:
data, targets = get_batch(data_source, i)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
hidden = repackage_hidden(hidden)
avg_loss = total_loss / (i + 1)
t.set_postfix((('val_loss', avg_loss), ('ppl', np.exp(avg_loss))))
return total_loss / len(data_source)
```
# Quantizing the Model
## Collect activation statistics
The model uses activation statistics to determine how big the quantization range is. The bigger the range - the larger the round off error after quantization which leads to accuracy drop.
Our goal is to minimize the range s.t. it contains the absolute most of our data.
After that, we divide the range into chunks of equal size, according to the number of bits, and transform the data according to this scale factor.
Read more on scale factor calculation [in our docs](https://nervanasystems.github.io/distiller/algo_quantization.html).
The class `QuantCalibrationStatsCollector` collects the statistics for defining the range $r = max - min$.
Each forward pass, the collector records the values of inputs and outputs, for each layer:
- absolute over all batches min, max (stored in `min`, `max`)
- average over batches, per batch min, max (stored in `avg_min`, `avg_max`)
- mean
- std
- shape of output tensor
All these values can be used to define the range of quantization, e.g. we can use the absolute `min`, `max` to define the range.
```
import os
from distiller.data_loggers import QuantCalibrationStatsCollector, collector_context
man_model = torch.load('./manual.checkpoint.pth.tar')
distiller.utils.assign_layer_fq_names(man_model)
collector = QuantCalibrationStatsCollector(man_model)
if not os.path.isfile('manual_lstm_pretrained_stats.yaml'):
with collector_context(collector) as collector:
val_loss = evaluate(man_model, val_data)
collector.save('manual_lstm_pretrained_stats.yaml')
```
## Prepare the Model For Quantization
We quantize the model after the training has completed.
Here we check the baseline model perplexity, to have an idea how good the quantization is.
```
from distiller.quantization import PostTrainLinearQuantizer, LinearQuantMode
from copy import deepcopy
# Load and evaluate the baseline model.
man_model = torch.load('./manual.checkpoint.pth.tar')
val_loss = evaluate(man_model, val_data)
print('val_loss:%8.2f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
```
Now we do our magic - __Preparing the model for quantization__.
The quantizer replaces the layers in out model with their quantized versions.
```
# Define the quantizer
quantizer = PostTrainLinearQuantizer(
deepcopy(man_model),
model_activation_stats='./manual_lstm_pretrained_stats.yaml')
# Quantizer magic
stats_before_prepare = deepcopy(quantizer.model_activation_stats)
dummy_input = (torch.zeros(1,1).to(dtype=torch.long), man_model.init_hidden(1))
quantizer.prepare_model(dummy_input)
```
### Net-Aware Quantization
Note that we passes a dummy input to `prepare_model`. This is required for the quantizer to be able to create a graph representation of the model, and to infer the connectivity between the modules.
Understanding the connectivity of the model is required to enable **"Net-aware quantization"**. This term (coined in [\[1\]](#references), section 3.2.2), means we can achieve better quantization by considering sequences of operations.
In the case of LSTM, we have an element-wise add operation whose output is split into 4 and fed into either Tanh or Sigmoid activations. Both of these ops saturate at relatively small input values - tanh at approximately $|4|$, and sigmoid saturates at approximately $|6|$. This means we can safely clip the output of the element-wise add operation between $[-6,6]$. `PostTrainLinearQuantizer` detects this patterm and modifies the statistics accordingly.
```
import pprint
pp = pprint.PrettyPrinter(indent=1)
print('Stats BEFORE prepare_model:')
pp.pprint(stats_before_prepare['rnn.cell_0.eltwiseadd_gate']['output'])
print('\nStats AFTER to prepare_model:')
pp.pprint(quantizer.model_activation_stats['rnn.cell_0.eltwiseadd_gate']['output'])
```
Note the value for `avg_max` did not change, since it was already below the clipping value of $6.0$.
### Inspecting the Quantized Model
Let's see how the model has after being prepared for quantization:
```
quantizer.model
```
Note how `encoder` and `decoder` have been replaced with wrapper layers (for the relevant module type), which handle the quantization. The same holds for the internal layers of the `DistillerLSTM` module, which we don't print for brevity sake. To "peek" inside the `DistillerLSTM` module, we need to access it directly. As an example, let's take a look at a couple of the internal layers:
```
print(quantizer.model.rnn.cell_0.fc_gate_x)
print(quantizer.model.rnn.cell_0.eltwiseadd_gate)
```
## Running the Quantized Model
### Try 1: Initial settings - simple symmetric quantization
Finally, let's go ahead and evaluate the quantized model:
```
val_loss = evaluate(quantizer.model.to(device), val_data)
print('val_loss:%8.2f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
```
As we can see here, the perplexity has increased much - meaning our quantization has damaged the accuracy of our model.
### Try 2: Assymetric, per-channel
Let's try quantizing each channel separately, and making the range of the quantization asymmetric.
```
quantizer = PostTrainLinearQuantizer(
deepcopy(man_model),
model_activation_stats='./manual_lstm_pretrained_stats.yaml',
mode=LinearQuantMode.ASYMMETRIC_SIGNED,
per_channel_wts=True
)
quantizer.prepare_model(dummy_input)
quantizer.model
val_loss = evaluate(quantizer.model.to(device), val_data)
print('val_loss:%8.2f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
```
A tiny bit better, but still no good.
### Try 3: Mixed FP16 and INT8
Let us try the half precision (aka FP16) version of the model:
```
model_fp16 = deepcopy(man_model).half()
val_loss = evaluate(model_fp16, val_data)
print('val_loss: %8.6f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
```
The result is very close to our original model! That means that the roundoff when quantizing linearly to 8-bit integers is what hurts our accuracy.
Luckily, `PostTrainLinearQuantizer` supports quantizing some/all layers to FP16 using the `fp16` parameter. In light of what we just saw, and as stated in [\[2\]](#References), let's try keeping element-wise operations at FP16, and quantize everything else to 8-bit using the same settings as in try 2.
```
overrides_yaml = """
.*eltwise.*:
fp16: true
encoder:
fp16: true
decoder:
fp16: true
"""
overrides = distiller.utils.yaml_ordered_load(overrides_yaml)
quantizer = PostTrainLinearQuantizer(
deepcopy(man_model),
model_activation_stats='./manual_lstm_pretrained_stats.yaml',
mode=LinearQuantMode.ASYMMETRIC_SIGNED,
overrides=overrides,
per_channel_wts=True
)
quantizer.prepare_model(dummy_input)
quantizer.model
val_loss = evaluate(quantizer.model.to(device), val_data)
print('val_loss:%8.6f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
```
The accuracy is still holding up very well, even though we quantized the inner linear layers!
### Try 4: Clipping Activations
Now, lets try to choose different boundaries for `min`, `max`.
Instead of using absolute ones, we take the average of all batches (`avg_min`, `avg_max`), which is an indication of where usually most of the boundaries lie. This is done by specifying the `clip_acts` parameter to `ClipMode.AVG` or `"AVG"` in the quantizer ctor:
```
overrides_yaml = """
encoder:
fp16: true
decoder:
fp16: true
"""
overrides = distiller.utils.yaml_ordered_load(overrides_yaml)
quantizer = PostTrainLinearQuantizer(
deepcopy(man_model),
model_activation_stats='./manual_lstm_pretrained_stats.yaml',
mode=LinearQuantMode.ASYMMETRIC_SIGNED,
overrides=overrides,
per_channel_wts=True,
clip_acts="AVG"
)
quantizer.prepare_model(dummy_input)
val_loss = evaluate(quantizer.model.to(device), val_data)
print('val_loss:%8.6f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
```
Great! Even though we quantized all of the layers except the embedding and the decoder - we got almost no accuracy penalty. Lets try quantizing them as well:
```
quantizer = PostTrainLinearQuantizer(
deepcopy(man_model),
model_activation_stats='./manual_lstm_pretrained_stats.yaml',
mode=LinearQuantMode.ASYMMETRIC_SIGNED,
per_channel_wts=True,
clip_acts="AVG"
)
quantizer.prepare_model(dummy_input)
val_loss = evaluate(quantizer.model.to(device), val_data)
print('val_loss:%8.6f\t|\t ppl:%8.2f' % (val_loss, np.exp(val_loss)))
quantizer.model
```
Here we see that sometimes quantizing with the right boundaries gives better results than actually using floating point operations (even though they are half precision).
## Conclusion
Choosing the right boundaries for quantization was crucial for achieving almost no degradation in accrucay of LSTM.
Here we showed how to use the Distiller quantization API to quantize an RNN model, by converting the PyTorch implementation into a modular one and then quantizing each layer separately.
## References
1. **Jongsoo Park, Maxim Naumov, Protonu Basu, Summer Deng, Aravind Kalaiah, Daya Khudia, James Law, Parth Malani, Andrey Malevich, Satish Nadathur, Juan Miguel Pino, Martin Schatz, Alexander Sidorov, Viswanath Sivakumar, Andrew Tulloch, Xiaodong Wang, Yiming Wu, Hector Yuen, Utku Diril, Dmytro Dzhulgakov, Kim Hazelwood, Bill Jia, Yangqing Jia, Lin Qiao, Vijay Rao, Nadav Rotem, Sungjoo Yoo, Mikhail Smelyanskiy**. Deep Learning Inference in Facebook Data Centers: Characterization, Performance Optimizations and Hardware Implications. [arxiv:1811.09886](https://arxiv.org/abs/1811.09886)
2. **Qinyao He, He Wen, Shuchang Zhou, Yuxin Wu, Cong Yao, Xinyu Zhou, Yuheng Zou**. Effective Quantization Methods for Recurrent Neural Networks. [arxiv:1611.10176](https://arxiv.org/abs/1611.10176)
| github_jupyter |
```
"""
3D forward modeling of total-field magnetic anomaly using triaxial
ellipsoids (model with isotropic and anisotropic susceptibilities)
"""
# insert the figures in the notebook
%matplotlib inline
import numpy as np
from fatiando import utils, gridder
import triaxial_ellipsoid
from mesher import TriaxialEllipsoid
import plot_functions as pf
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import BoundaryNorm
# The regional field
F, inc, dec = 23500., 30, -15
# Create a model formed by two ellipsoids
# The first ellipsoid does not have remanent magnetization and
# has an anisotropic susceptibility (different principal susceptibilities
# k1 = 0.3, k2 = 0.2, k3 = 0.1).
# The second has a remanent magnetization of 2 A/m
# and an isotropic susceptibility of (all principal susceptibilities
# equal to 0.01)
model = [TriaxialEllipsoid(-2500., -2500., 1000., 900., 600., 300., 45., -10., 34.,
{'principal susceptibilities': [0.3, 0.2, 0.1],
'susceptibility angles': [-20., 20., 9.]}),
TriaxialEllipsoid(2500., 2500., 1000., 950., 640., 600., 45., 62., -7.,
{'remanent magnetization': [1.2, 90, 0.],
'principal susceptibilities': [0.01, 0.01, 0.01],
'susceptibility angles': [13, 50, 7]})]
# Create a regular grid at 0m height
shape = (200, 200)
area = [-5000, 5000, -5000, 5000]
xp, yp, zp = gridder.regular(area, shape, z = 0)
# Time execution of the function triaxial_ellipsoid.tf
%timeit triaxial_ellipsoid.tf(xp, yp, zp, model, F, inc, dec)
# Calculate the total-field anomaly
tf = triaxial_ellipsoid.tf(xp, yp, zp, model, F, inc, dec)
# Plot the results
plt.close('all')
plt.figure()
plt.axis('scaled')
ranges = np.max(np.abs([np.min(tf), np.max(tf)]))
levels = MaxNLocator(nbins=20).tick_values(-ranges, ranges)
cmap = plt.get_cmap('RdBu_r')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.contourf(0.001*yp.reshape(shape), 0.001*xp.reshape(shape),
tf.reshape(shape), levels=levels,
cmap = cmap, norm=norm)
plt.ylabel('x (km)')
plt.xlabel('y (km)')
plt.xlim(0.001*np.min(yp), 0.001*np.max(yp))
plt.ylim(0.001*np.min(xp), 0.001*np.max(xp))
cbar = plt.colorbar()
plt.tight_layout()
plt.show()
plt.close('all')
fig = plt.figure(figsize=(8,6))
ax = fig.gca(projection='3d')
ranges = np.max(np.abs([np.min(tf), np.max(tf)]))
levels = MaxNLocator(nbins=20).tick_values(-ranges, ranges)
cmap = plt.get_cmap('seismic')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=False)
cs = ax.contour(xp.reshape(shape), yp.reshape(shape), tf.reshape(shape),
zdir='z', offset=0, cmap=cmap, norm=norm, levels=levels,
linewidths=2)
#cbar = fig.colorbar(cs)
for m in model:
pf.draw_ellipsoid(ax, m, body_color=(1,1,0), body_alpha=0.3)
ax.set_xlabel('x (m)')
ax.set_ylabel('y (m)')
ax.set_zlabel('z (m)')
ax.view_init(215, 20)
plt.tight_layout(True)
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/PGM-Lab/probai-2021-pyro/blob/main/Day2/notebooks/solutions_bayesian_regression_VI.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<span style="color:red">This notebook is an adapted version from </span> http://pyro.ai/examples/bayesian_regression.html
## Setup
Let's begin by installing and importing the modules we'll need.
```
!pip install -q --upgrade torch
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import torch.nn as nn
# ignore future warnings
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
```
# Dataset
The following example is taken from \[1\]. We would like to explore the relationship between topographic heterogeneity of a nation as measured by the Terrain Ruggedness Index (variable *rugged* in the dataset) and its GDP per capita. In particular, it was noted by the authors in \[1\] that terrain ruggedness or bad geography is related to poorer economic performance outside of Africa, but rugged terrains have had a reverse effect on income for African nations. Let us look at the data \[2\] and investigate this relationship. We will be focusing on three features from the dataset:
- `rugged`: quantifies the Terrain Ruggedness Index
- `cont_africa`: whether the given nation is in Africa
- `rgdppc_2000`: Real GDP per capita for the year 2000
We will take the logarithm for the response variable GDP as it tends to vary exponentially. We also use a new variable `african_rugged`, defined as the product between the variables `rugged` and `cont_africa`, to capture the correlation between ruggedness and whether a country is in Africa.
```
DATA_URL = "https://raw.githubusercontent.com/PGM-Lab/probai-2021-pyro/main/Day1/rugged_data.csv"
data = pd.read_csv(DATA_URL, encoding="ISO-8859-1")
df = data[["cont_africa", "rugged", "rgdppc_2000"]]
df = df[np.isfinite(df.rgdppc_2000)]
df["rgdppc_2000"] = np.log(df["rgdppc_2000"])
df["african_rugged"] = data["cont_africa"] * data["rugged"]
df = df[["cont_africa", "rugged", "african_rugged", "rgdppc_2000"]]
# Divide the data into predictors and response and store the data in numpy arrays
data_array = np.array(df)
x_data = data_array[:, :-1]
y_data = data_array[:, -1]
# Display first 10 entries
display(df[0:10])
```
# 1. Linear Regression
Regression is one of the most common and basic supervised learning tasks in machine learning. Suppose we're given a dataset $\mathcal{D}$ of the form
$$ \mathcal{D} = \{ (X_i, y_i) \} \qquad \text{for}\qquad i=1,2,...,N$$
The goal of linear regression is to fit a function to the data of the form:
$$ y = w X + b + \epsilon $$
where $w$ and $b$ are learnable parameters and $\epsilon$ represents observation noise. Specifically $w$ is a matrix of weights and $b$ is a bias vector.
Let's first implement linear regression in PyTorch and learn point estimates for the parameters $w$ and $b$. Then we'll see how to incorporate uncertainty into our estimates by using Pyro to implement Bayesian regression.
```
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
african_nations = data[data["cont_africa"] == 1]
non_african_nations = data[data["cont_africa"] == 0]
sns.scatterplot(non_african_nations["rugged"],
np.log(non_african_nations["rgdppc_2000"]),
ax=ax[0])
ax[0].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="Non African Nations")
sns.scatterplot(african_nations["rugged"],
np.log(african_nations["rgdppc_2000"]),
ax=ax[1])
ax[1].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="African Nations")
```
## 1.1 Model
We would like to predict log GDP per capita of a nation as a function of three features from the dataset - whether the nation is in Africa, its Terrain Ruggedness Index, and the interaction between these two. Let's define our regression model. We'll define an specific object encapsulating this linear regression model. Our input `x_data` is a tensor of size $N \times 3$ and our output `y_data` is a tensor of size $N \times 1$. The method `predict(self,x_data)` defines a linear transformation of the form $Xw + b$ where $w$ is the weight matrix and $b$ is the additive bias.
The parameters of the model are defined using ``torch.nn.Parameter``, and will be learned during training.
```
class RegressionModel():
def __init__(self):
self.w = torch.nn.Parameter(torch.zeros(1, 3))
self.b = torch.nn.Parameter(torch.zeros(1, 1))
def params(self):
return {"b":self.b, "w": self.w}
def predict(self, x_data):
return (self.b + torch.mm(self.w, torch.t(x_data))).squeeze(0)
regression_model = RegressionModel()
```
## 1.2 Training
We will use the mean squared error (MSE) as our loss and Adam as our optimizer. We would like to optimize the parameters of the `regression_model` neural net above. We will use a somewhat large learning rate of `0.05` and run for 1000 iterations.
```
loss_fn = torch.nn.MSELoss(reduction='sum')
optim = torch.optim.Adam(regression_model.params().values(), lr=0.05)
num_iterations = 5000
data_array = torch.tensor(df.values, dtype=torch.float)
x_data, y_data = data_array[:, :-1], data_array[:, -1]
def main():
x_data = data_array[:, :-1]
y_data = data_array[:, -1]
for j in range(num_iterations):
# run the model forward on the data
y_pred = regression_model.predict(x_data)
# calculate the mse loss
loss = loss_fn(y_pred, y_data)
# initialize gradients to zero
optim.zero_grad()
# backpropagate
loss.backward()
# take a gradient step
optim.step()
if (j + 1) % 500 == 0:
print("[iteration %04d] loss: %.4f" % (j + 1, loss.item()))
# Inspect learned parameters
print("Learned parameters:")
for name, param in regression_model.params().items():
print(name, param.data.numpy())
main()
```
## 1.3 Evaluating the model
We now plot the regression line learned for african and non-african nations relating the rugeedness index with the GDP of the country.
```
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
fig.suptitle("Regression line ", fontsize=16)
ax[0].scatter(x_data[x_data[:,0]==0,1].detach().numpy(), y_data[x_data[:,0]==0].detach().numpy())
ax[1].scatter(x_data[x_data[:,0]==1,1].detach().numpy(), y_data[x_data[:,0]==1].detach().numpy())
for i in range(10):
ax[0].plot(x_data[x_data[:,0]==0,1].detach().numpy(),regression_model.predict(x_data[x_data[:,0]==0,:]).detach().numpy(), color='r')
ax[1].plot(x_data[x_data[:,0]==1,1].detach().numpy(),regression_model.predict(x_data[x_data[:,0]==1,:]).detach().numpy(), color='r')
ax[0].set(xlabel="Terrain Ruggedness Index",ylabel="log GDP (2000)",title="Non African Nations")
ax[1].set(xlabel="Terrain Ruggedness Index",ylabel="log GDP (2000)",title="African Nations")
plt.show()
```
## 1.4 The relationship between ruggedness and log GPD
Using this analysis, we can estimate the relationship between ruggedness and log GPD. As can be seen, this relationship is positive for African nations, but negative for Non African Nations.
```
slope_within_africa = regression_model.params()['w'][0,1] + regression_model.params()['w'][0,2]
slope_outside_africa = regression_model.params()['w'][0,1]
print(slope_within_africa.detach().numpy())
print(slope_outside_africa.detach().numpy())
```
# 2. Bayesian Linear Regression
[Bayesian modeling](http://mlg.eng.cam.ac.uk/zoubin/papers/NatureReprint15.pdf) offers a systematic framework for reasoning about model uncertainty. Instead of just learning point estimates, we're going to learn a _distribution_ over variables that is consistent with the observed data.
In order to make our linear regression Bayesian, we need to put priors on the parameters $w$ and $b$. These are distributions that represent our prior belief about reasonable values for $w$ and $b$ (before observing any data).
## 2.1 Model
We now have all the ingredients needed to specify our model. First we define priors over weights and bias. Note the priors that we are using for the different latent variables in the model.
The following figures shows a graphical description of the model:
<img src="https://github.com/PGM-Lab/probai-2021-pyro/raw/main/Day2/Figures/BayesianLinearRegressionModel.png" alt="Drawing" width=800 >
## 2.2 Full mean field
First we consider a full mean filed approach, where the variational approximation factorizes as
$$
q({\bf w}, b) = q(b)\prod _{i=1}^Mq(w_i)
$$
### Helper-routine: Calculate ELBO
```
def calculate_ELBO(x_data, y_data, gamma_w, gamma_b, theta, q_w_mean, q_w_prec, q_b_mean, q_b_prec):
"""
Helper routine: Calculate ELBO. Data is the sampled x and y values, gamma is the prior precision over the
weights and theta is the prior precision associated with y. Everything prefixed a 'q' relates to the
variational posterior.
Note: This function obviously only works for this particular model and is not a general solution.
:param x_data: The predictors
:param y_data: The response variable
:param gamma_w: prior precision for the weights
:param gamma_b: prior precision for the intercept
:param theta: prior precision for y
:param q_w_mean: VB posterior mean for the distribution of the weights w
:param q_w_prec: VB posterior precision for the distribution of the weights w
:param q_b_mean: VB posterior mean for the intercept b
:param q_b_prec: VB posterior precision for the intercept b
:return: the ELBO
"""
# We calculate the ELBO as E_q log p(y,x,w,b) - E_q log q(w,b), where
# log p(y,x,w) = sum_i log p(y|x,w,b) + log p(w) + log p(b)
# log q(w,b) = log q(w) + log q(b)
M = x_data.shape[1]
# E_q log p(w)
E_log_p = -0.5 * M * np.log(2 * np.pi) + 0.5 * M * gamma_w - 0.5 * gamma_w * np.sum(np.diagonal(np.linalg.inv(q_w_prec))
+ (q_w_mean*q_w_mean).flatten())
# E_q log p(b)
E_log_p += -0.5 * np.log(2 * np.pi) + 0.5 * np.log(gamma_b) - 0.5 * gamma_b * (1/q_b_prec + q_b_mean**2)
# sum_i E_q log p(y|x,w,b)
E_w_w = np.linalg.inv(q_w_prec) + q_w_mean @ q_w_mean.transpose()
E_b_b = 1/q_b_prec + q_b_mean**2
for i in range(x_data.shape[0]):
E_x_ww_x = np.matmul(x_data[i, :].transpose(), np.matmul(E_w_w, x_data[i, :]))
E_log_p += -0.5 * np.log(2 * np.pi) + 0.5 * np.log(theta) \
- 0.5 * theta * (y_data[i]**2 + E_x_ww_x + E_b_b
+ 2 * q_b_mean * np.matmul(q_w_mean.transpose(), x_data[i, :])
- 2 * y_data[i] * np.matmul(q_w_mean.transpose(), x_data[i,:])
- 2 * y_data[i] * q_b_mean)
# Entropy of q_b
ent = 0.5 * np.log(1 * np.pi * np.exp(1) / q_b_prec)
ent += 0.5 * np.log(np.linalg.det(2 * np.pi * np.exp(1) * np.linalg.inv(q_w_prec)))
return E_log_p - ent
```
### <span style="color:red">Exercise 1: Introduce the variational updating rules</span>
* Introduce variational updating rules for $q(w_i)$, which is normally distributed.
- Updating equation for **precision** of $q(w_i)$:
$$
\tau \leftarrow (\gamma_w+\theta\sum_{i=1}^N(x_{ij}^2))
$$
- Updating equation for **mean** of $q(w_i)$:
$$\mu \leftarrow \tau^{-1}\theta\sum_{i=1}^Nx_{ij}(y_i - (\sum_{k\neq j}x_{ik}\mathbb{E}(W_k)+\mathbb{E}(B)))
$$
* Introduce variational updating rules for $q(b)$, which is normally distributed.
- Updating equation for **precision** of $q(b)$:
$$
\tau \leftarrow (\gamma_b+\theta N)
$$
- Updating equation for **mean** of $q(b)$:
$$
\mu \leftarrow \tau^{-1} \theta\sum_{i=1}^N(y_i -
\mathbb{E}(\mathbf{W}^T)\mathbf{x}_i)
$$
```
# The variational updating rule for weight component 'comp'
def update_w_comp(x_data, y_data, gamma_w, theta, q_w_mean, q_w_prec, q_b_mean, comp):
# Lenght of weight vector
M = x_data.shape[1]
# The precision (a scalar)
Q = gamma_w
# The mean (a scalar)
mu = 0.0
for i in range(x_data.shape[0]):
Q += theta * x_data[i, comp]**2
mu += (y_data[i] - q_b_mean - (np.sum(x_data[i, :] @ q_w_mean) - x_data[i, comp]*q_w_mean[comp])) \
* x_data[i, comp]
mu = theta * 1/Q * mu
# Update the appropriate entries in the mean vector and precision matrix
q_w_prec[comp, comp] = Q
q_w_mean[comp] = mu.item()
return q_w_prec, q_w_mean
# The variational updating rule for the intercept
def update_b(x_data, y_data, gamma_b, theta, q_w_mean):
# The precision (a scalar)
tau = (gamma_b + theta * x_data.shape[0])
# The mean (a scalar)
mu = 0
for i in range(x_data.shape[0]):
mu += (y_data[i] - q_w_mean.transpose() @ x_data[i, :])
mu = 1/tau * theta * mu
return tau, mu
```
## 2.3 Inference
To do inference we'll use coordinate ascent, which is implemented by the above updating rules. Just like in the non-Bayesian linear regression, each iteration of our training objective will be optimzed, with the difference that in this case, we'll use the ELBO objective instead of the MSE loss.
```
# Initialize the variational distributions
data_array = np.array(df)
x_data = data_array[:, :-1]
y_data = data_array[:, -1]
M = x_data.shape[1]
gamma_w = 1
gamma_b = 1
theta = 1
q_w_mean = np.random.normal(0, 1, (3, 1))
q_w_prec = np.diag((1, 1, 1))
q_b_mean = np.random.normal(0, 1)
q_b_prec = 1
elbos = []
# Calculate ELBO
this_lb = calculate_ELBO(x_data, y_data, gamma_w, gamma_b, theta, q_w_mean, q_w_prec, q_b_mean, q_b_prec)
elbos.append(this_lb)
previous_lb = -np.inf
# Start iterating
print("\n" + 100 * "=" + "\n VB iterations:\n" + 100 * "=")
for iteration in range(100):
# Update the variational distributions
for i in range(M):
q_w_prec, q_w_mean = update_w_comp(x_data, y_data, gamma_w, theta, q_w_mean, q_w_prec, q_b_mean, i)
q_b_prec, q_b_mean = update_b(x_data, y_data, gamma_b, theta, q_w_mean)
this_lb = calculate_ELBO(x_data, y_data, gamma_w, gamma_b, theta, q_w_mean, q_w_prec, q_b_mean, q_b_prec)
elbos.append(this_lb)
print(f"Iteration {iteration:2d}. ELBO: {this_lb.item():13.7f}")
if this_lb < previous_lb:
raise ValueError("ELBO is decreasing. Something is wrong! Goodbye...")
if iteration > 0 and np.abs((this_lb - previous_lb) / previous_lb) < 1E-8:
# Very little improvement. We are done.
break
# If we didn't break we need to run again. Update the value for "previous"
previous_lb = this_lb
print("\n" + 100 * "=" + "\n")
# Store the results
w_mean_mf = q_w_mean
w_prec_mf = q_w_prec
b_mean_mf = q_b_mean
b_prec_mf = q_b_prec
plt.plot(range(len(elbos)), elbos)
plt.xlabel('NUmber of iterations')
plt.ylabel('ELBO')
```
Now, we have a Gaussian posterior for $q(b)$ and $q(w)$ with means and precisions:
```
print("Mean q(b):", b_mean_mf)
print("Precision q(b):", b_prec_mf)
print("Mean q(w):", w_mean_mf)
print("Precision q(w):", w_prec_mf)
```
Note that instead of just point estimates, we now have uncertainty estimates for our learned parameters.
## 2.4 Model's Uncertainty
We can now sample different regression lines from the variational posteriors, thus reflecting the model uncertainty.
```
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6), sharey=True)
fig.suptitle("Uncertainty in Regression line ", fontsize=16)
num_samples = 20
ax[0].scatter(x_data[x_data[:,0]==0,1], y_data[x_data[:,0]==0])
for _ in range(num_samples):
b_sample = np.random.normal(loc=q_b_mean, scale=1/np.sqrt(q_b_prec))
w_sample = np.random.multivariate_normal(mean=q_w_mean.flatten(), cov=np.linalg.inv(q_w_prec))
ax[0].plot(x_data[x_data[:,0]==0,1], (x_data[x_data[:,0]==0,:] @ w_sample)+b_sample, 'r-')
ax[0].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="Non African Nations")
ax[1].scatter(x_data[x_data[:,0]==1,1], y_data[x_data[:,0]==1])
for _ in range(num_samples):
b_sample = np.random.normal(loc=q_b_mean, scale=1/np.sqrt(q_b_prec))
w_sample = np.random.multivariate_normal(mean=q_w_mean.flatten(), cov=np.linalg.inv(q_w_prec))
ax[1].plot(x_data[x_data[:,0]==1,1], (x_data[x_data[:,0]==1,:] @ w_sample)+b_sample, 'r-')
ax[1].set(xlabel="Terrain Ruggedness Index",
ylabel="log GDP (2000)",
title="African Nations")
plt.show()
```
The above figure shows the uncertainty in our estimate of the regression line. Note that for lower values of ruggedness there are many more data points, and as such, the regression lines are less uncertainty than in high ruggness values, where there is much more uncertainty, specially in the case of African nations.
## 2.5 The relationship between ruggedness and log GPD
Finally, we go back to the previous analysis about the relationship between ruggedness and log GPD. Now, we can compute uncertainties over this relationship. As can be seen, this relationship is negative for Non African Nations with high probability, and positive for African nations in most of the cases. But there is non-negligible probability that this is relationship is also negative. This is the consequence of the low number of samples in the case of African nations.
```
weight = np.random.multivariate_normal(mean=q_w_mean.flatten(), cov=np.linalg.inv(q_w_prec),size=1000)
gamma_within_africa = weight[:,1] + weight[:,2]
gamma_outside_africa = weight[:,1]
fig = plt.figure(figsize=(10, 6))
sns.distplot(gamma_within_africa, kde_kws={"label": "African nations"},)
sns.distplot(gamma_outside_africa, kde_kws={"label": "Non-African nations"})
fig.suptitle("Density of Slope : log(GDP) vs. Terrain Ruggedness", fontsize=16)
plt.legend()
plt.show()
```
### References
1. McElreath, D., *Statistical Rethinking, Chapter 7*, 2016
2. Nunn, N. & Puga, D., *[Ruggedness: The blessing of bad geography in Africa"](https://diegopuga.org/papers/rugged.pdf)*, Review of Economics and Statistics 94(1), Feb. 2012
```
```
| github_jupyter |
## 範例重點
### 學習在模型開始前檢查各個環節
1. 是否有 GPU 資源
2. 將前處理轉為函式,統一處理訓練、驗證與測試集
3. 將超參數變數化,易於重複使用函式、模型等
```
## 確認硬體資源 (如果你是在 Linux, 若是在 Windows, 請參考 https://blog.csdn.net/idwtwt/article/details/78017565)
!nvidia-smi
import os
from tensorflow import keras
# 本範例不需使用 GPU, 將 GPU 設定為 "無"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# 從 Keras 的內建功能中,取得 train 與 test 資料集
train, test = keras.datasets.cifar10.load_data()
## 資料前處理
def preproc_x(x, flatten=True):
x = x / 255.
if flatten:
x = x.reshape((len(x), -1))
return x
def preproc_y(y, num_classes=10):
if y.shape[-1] == 1:
y = keras.utils.to_categorical(y, num_classes)
return y
x_train, y_train = train
x_test, y_test = test
# 資料前處理 - X 標準化
x_train = preproc_x(x_train)
x_test = preproc_x(x_test)
# 資料前處理 -Y 轉成 onehot
y_train = preproc_y(y_train)
y_test = preproc_y(y_test)
def build_mlp(input_shape, output_units=10, num_neurons=[512, 256, 128]):
input_layer = keras.layers.Input(input_shape)
for i, n_units in enumerate(num_neurons):
if i == 0:
x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(input_layer)
else:
x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(x)
out = keras.layers.Dense(units=output_units, activation="softmax", name="output")(x)
model = keras.models.Model(inputs=[input_layer], outputs=[out])
return model
model = build_mlp(input_shape=x_train.shape[1:])
model.summary()
## 超參數設定
LEARNING_RATE = 0.001
EPOCHS = 100
BATCH_SIZE = 256
optimizer = keras.optimizers.Adam(lr=LEARNING_RATE)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True)
# 訓練模型並檢視驗證集的結果
import matplotlib.pyplot as plt
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["accuracy"]
valid_acc = model.history.history["val_accuracy"]
plt.plot(range(len(train_loss)), train_loss, label="train loss")
plt.plot(range(len(valid_loss)), valid_loss, label="valid loss")
plt.legend()
plt.title("Loss")
plt.show()
plt.plot(range(len(train_acc)), train_acc, label="train accuracy")
plt.plot(range(len(valid_acc)), valid_acc, label="valid accuracy")
plt.legend()
plt.title("Accuracy")
plt.show()
```
## Work
1. 請嘗試將 preproc_x 替換成以每筆資料的 min/max 進行標準化至 -1 ~ 1 間,再進行訓練
2. 請嘗試將 mlp 疊更深 (e.g 5~10 層),進行訓練後觀察 learning curve 的走勢
3. (optional) 請改用 GPU 進行訓練 (如果你有 GPU 的話),比較使用 CPU 與 GPU 的訓練速度
| github_jupyter |
# Read csv md files and load to notes db
```
import sqlite3
import os
import pandas as pd
import platform
import sys
HOME = os.environ['HOME']
email = 'james.f.owers+mendeley@gmail.com'
# http://support.mendeley.com/customer/en/portal/articles/227951-how-do-i-locate-mendeley-desktop-database-files-on-my-computer-
loc = dict(
Darwin='Library/Application Support/Mendeley Desktop',
Linux='.local/share/data/Mendeley Ltd./Mendeley Desktop'
)
db_loc = '{}{}{}@www.mendeley.com.sqlite'.format(loc[platform.system()], os.sep, email).split('/')
db_path = '{}{}{}'.format(HOME, os.sep, os.sep.join(db_loc))
db = sqlite3.connect(db_path)
db_path
table_name = 'DocumentNotes'
notes = pd.read_sql_query("SELECT * from {}".format(table_name), db)
notes
```
## Make an edit and write
## On mac, editing the DocumentNotes table has no effect on GUI
```
table_name = 'DocumentNotes'
orig_notes = notes.copy()
notes.loc[0, 'text'] = '<strong>a big</strong> change!'
notes.loc[0, 'baseNote'] = '<strong>a big</strong> change!'
notes.to_sql(table_name, db, if_exists='replace', index=False)
notes = pd.read_sql_query("SELECT * from {}".format(table_name), db)
notes
orig_notes.to_sql(table_name, db, if_exists='replace', index=False)
notes = pd.read_sql_query("SELECT * from {}".format(table_name), db)
notes
```
## On mac, editing the notes field in Documents table changes the
```
table_name = 'Documents'
documents = pd.read_sql_query("SELECT * from {}".format(table_name), db)
orig_documents = documents.copy()
documents.loc[documents['id']== 97, 'note'] = None
documents.to_sql(table_name, db, if_exists='replace', index=False)
documents = pd.read_sql_query("SELECT * from {}".format(table_name), db)
documents
orig_documents.to_sql(table_name, db, if_exists='replace', index=False)
documents = pd.read_sql_query("SELECT * from {}".format(table_name), db)
documents
```
## Read csv and write
```
!mkdir test
!./export_notes.py test
!cat test/Brien2012\ \(435\)\ Handbook\ of\ Document\ Image\ Processing\ and\ Recognition.md
doc_name_str = '{citationKey} ({docid}) {title}'
notes_folder = 'test'
import glob
all_files = glob.glob(os.path.join(notes_folder, "*.md")) #make list of paths
for f in all_files:
# Reading the file content to create a DataFrame
with open(f, 'r') as ff:
md_str = ff.read()
file_name = os.path.splitext(os.path.basename(f))[0]
doc_id = int(file_name.split('(', 1)[1].split(')', 1)[0])
notes.loc[notes['documentId'] == doc_id, 'text'] = md_str
notes.loc[notes['documentId'] == doc_id, 'baseNote'] = md_str
# file_name, doc_id, md_str
notes.to_sql(table_name, db, if_exists='replace', index=False)
notes = pd.read_sql_query("SELECT * from {}".format(table_name), db)
notes
orig_notes.to_sql(table_name, db, if_exists='replace', index=False)
notes = pd.read_sql_query("SELECT * from {}".format(table_name), db)
notes
```
| github_jupyter |
#### Create Data Generator
```
class DataGen(tf.keras.utils.Sequence):
def __init(self, ids, path, batch_size=8, image_size=64):
self.ids = ids
self.path = path
self.batch_size = batch_size
self.image_size = image_size
self.on_epoch_end()
def __load__(self, id_name):
image_path = os.path.join(self.path, id_name, "images", id_name) + ".tif"
mask_path = os.path.join(self.path, id_name, "masks/")
all_masks = os.listdir(mask_path)
image = cv2.imread(image_path, 1)
image = cv2.resize(image, (self.image_size, self.image_size))
masks = np.zeros((self.image_size, self.image_size, 1))
for name in all_masks:
_mask_path = mask_path + name
_mask_image = cv2.imread(_mask_path, -1) #greyscale
_mask_image = cv2.resize(image, (self.image_size, self.image_size)) # 64x64
_mask_image = np.expand_dims(_mask_image, axis=1)
mask = np.maximum(mask, _mask_image)
# normalize image
image = image/255.0
mask = mask/255.0
return image, mask
def __getitem__(self, index):
if(index+1)*self.batch_size > len(self.ids):
self.batch_size = len(self.ids) - index*self.batch_size
files_batch = self.ids[index*self.batch_size : (index+1)*self.batch_size]
image = []
mask = []
for id_name in file_batch:
_img, _mask = self.__load__(id_name)
image.append(_img)
mask.append(_mask)
image = np.array(image)
mask = np.array(mask)
return image, mask
def on_epoch_end(self):
pass
def __len__(self):
return int(np.ceil(len(self.ids)/float(self.batch_size)))
```
#### Set Hyperparameters
```
image_size = 64
train_path = '/home/jovyan/ghw2019_planetpieces/contributors/claire/unet-model-claire/data/train'
epochs = 1
batch_size = 8
train_ids = next(os.walk(train_path))[1]
val_data_size = 5
valid_ids = train_ids[:val_data_size]
train_ids = train_ids[val_data_size:]
gen = DataGen(train_ids, train_path,
batch_size=batch_size, image_size=image_size)
x, y = gen.__getitem__(0)
print(x.shape, y.shape)
class UNet():
def __init__(self):
print ('build UNet ...')
def get_crop_shape(self, target, refer):
# width, the 3rd dimension
cw = (target.get_shape()[2] - refer.get_shape()[2]).value
assert (cw >= 0)
if cw % 2 != 0:
cw1, cw2 = int(cw/2), int(cw/2) + 1
else:
cw1, cw2 = int(cw/2), int(cw/2)
# height, the 2nd dimension
ch = (target.get_shape()[1] - refer.get_shape()[1]).value
assert (ch >= 0)
if ch % 2 != 0:
ch1, ch2 = int(ch/2), int(ch/2) + 1
else:
ch1, ch2 = int(ch/2), int(ch/2)
return (ch1, ch2), (cw1, cw2)
def create_model(self, img_shape, num_class):
concat_axis = 3
inputs = tf.keras.layers.Input(shape = img_shape)
conv1 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
conv1 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up_conv5 = tf.keras.layers.UpSampling2D(size=(2, 2))(conv5)
ch, cw = self.get_crop_shape(conv4, up_conv5)
crop_conv4 = tf.keras.layers.Cropping2D(cropping=(ch,cw))(conv4)
up6 = tf.keras.layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
conv6 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up_conv6 = tf.keras.layers.UpSampling2D(size=(2, 2))(conv6)
ch, cw = self.get_crop_shape(conv3, up_conv6)
crop_conv3 = tf.keras.layers.Cropping2D(cropping=(ch,cw))(conv3)
up7 = tf.keras.layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
conv7 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up_conv7 = tf.keras.layers.UpSampling2D(size=(2, 2))(conv7)
ch, cw = self.get_crop_shape(conv2, up_conv7)
crop_conv2 = tf.keras.layers.Cropping2D(cropping=(ch,cw))(conv2)
up8 = tf.keras.layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
conv8 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up_conv8 = tf.keras.layers.UpSampling2D(size=(2, 2))(conv8)
ch, cw = self.get_crop_shape(conv1, up_conv8)
crop_conv1 = tf.keras.layers.Cropping2D(cropping=(ch,cw))(conv1)
up9 = tf.keras.layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
conv9 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
ch, cw = self.get_crop_shape(inputs, conv9)
conv9 = tf.keras.layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
conv10 = tf.keras.layers.Conv2D(num_class, (1, 1))(conv9)
model = tf.keras.models.Model(inputs=inputs, outputs=conv10)
return model
```
| github_jupyter |
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Plotly's Python API is updated frequently. Run `pip install plotly --upgrade` to update your Plotly version.
```
import plotly
plotly.__version__
```
#### Simple Candlestick with Pandas
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
from datetime import datetime
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')
trace = go.Candlestick(x=df['Date'],
open=df['AAPL.Open'],
high=df['AAPL.High'],
low=df['AAPL.Low'],
close=df['AAPL.Close'])
data = [trace]
py.iplot(data, filename='simple_candlestick')
```
#### Candlestick without Rangeslider
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
from datetime import datetime
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')
trace = go.Candlestick(x=df['Date'],
open=df['AAPL.Open'],
high=df['AAPL.High'],
low=df['AAPL.Low'],
close=df['AAPL.Close'])
layout = go.Layout(
xaxis = dict(
rangeslider = dict(
visible = False
)
)
)
data = [trace]
fig = go.Figure(data=data,layout=layout)
py.iplot(fig, filename='simple_candlestick_without_range_slider')
```
#### Adding Customized Text and Annotations
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
from datetime import datetime
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')
trace = go.Candlestick(x=df['Date'],
open=df['AAPL.Open'],
high=df['AAPL.High'],
low=df['AAPL.Low'],
close=df['AAPL.Close'])
data = [trace]
layout = {
'title': 'The Great Recession',
'yaxis': {'title': 'AAPL Stock'},
'shapes': [{
'x0': '2016-12-09', 'x1': '2016-12-09',
'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',
'line': {'color': 'rgb(30,30,30)', 'width': 1}
}],
'annotations': [{
'x': '2016-12-09', 'y': 0.05, 'xref': 'x', 'yref': 'paper',
'showarrow': False, 'xanchor': 'left',
'text': 'Increase Period Begins'
}]
}
fig = dict(data=data, layout=layout)
py.iplot(fig, filename='aapl-recession-candlestick')
```
#### Custom Candlestick Colors
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
from datetime import datetime
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')
trace = go.Candlestick(
x=df['Date'],
open=df['AAPL.Open'],
high=df['AAPL.High'],
low=df['AAPL.Low'],
close=df['AAPL.Close'],
increasing=dict(line=dict(color= '#17BECF')),
decreasing=dict(line=dict(color= '#7F7F7F'))
)
data = [trace]
py.iplot(data, filename='styled_candlestick')
```
#### Simple Example with `datetime` Objects
```
import plotly.plotly as py
import plotly.graph_objs as go
from datetime import datetime
open_data = [33.0, 33.3, 33.5, 33.0, 34.1]
high_data = [33.1, 33.3, 33.6, 33.2, 34.8]
low_data = [32.7, 32.7, 32.8, 32.6, 32.8]
close_data = [33.0, 32.9, 33.3, 33.1, 33.1]
dates = [datetime(year=2013, month=10, day=10),
datetime(year=2013, month=11, day=10),
datetime(year=2013, month=12, day=10),
datetime(year=2014, month=1, day=10),
datetime(year=2014, month=2, day=10)]
trace = go.Candlestick(x=dates,
open=open_data,
high=high_data,
low=low_data,
close=close_data)
data = [trace]
py.iplot(data, filename='candlestick_datetime')
```
### Dash Example
[Dash](https://plotly.com/products/dash/) is an Open Source Python library which can help you convert plotly figures into a reactive, web-based application. Below is a simple example of a dashboard created using Dash. Its [source code](https://github.com/plotly/simple-example-chart-apps/tree/master/dash-candlestickplot) can easily be deployed to a PaaS.
```
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-candlestickplot/", width="100%", height="750px", frameBorder="0")
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-candlestickplot/code", width="100%", height=500, frameBorder="0")
```
#### Reference
For more information on candlestick attributes, see: https://plotly.com/python/reference/#candlestick
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
!pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'candlestick-charts.ipynb', 'python/candlestick-charts/', 'Candlestick Charts',
'How to make interactive candlestick charts in Python with Plotly. '
'Six examples of candlestick charts with Pandas, time series, and yahoo finance data.',
title = 'Python Candlestick Charts | plotly',
thumbnail='thumbnail/candlestick.jpg', language='python',
page_type='example_index', has_thumbnail='true', display_as='financial', order=2,
ipynb= '~notebook_demo/275')
```
| github_jupyter |
```
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
<img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
# Scaling Criteo: ETL with NVTabular
## Overview
NVTabular is a feature engineering and preprocessing library for tabular data designed to quickly and easily manipulate terabyte scale datasets used to train deep learning based recommender systems. It provides a high level abstraction to simplify code and accelerates computation on the GPU using the RAPIDS cuDF library.<br><br>
**In this notebook, we will show how to scale NVTabular to multi-GPUs and multiple nodes.** Prerequisite is to be familiar with NVTabular and its API. You can read more NVTabular and its API in our [Getting Started with Movielens notebooks](https://github.com/NVIDIA/NVTabular/tree/main/examples/getting-started-movielens).<br><br>
The full [Criteo 1TB Click Logs dataset](https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) contains ~1.3 TB of uncompressed click logs containing over four billion samples spanning 24 days. In our benchmarks, we are able to preprocess and engineer features in **13.8min with 1x NVIDIA A100 GPU and 1.9min with 8x NVIDIA A100 GPUs**. This is a **speed-up of 100x-10000x** in comparison to different CPU versions, You can read more in our [blog](https://developer.nvidia.com/blog/announcing-the-nvtabular-open-beta-with-multi-gpu-support-and-new-data-loaders/).
Our pipeline will be representative with most common preprocessing transformation for deep learning recommender models.
* Categorical input features are `Categorified` to be continuous integers (0, ..., |C|) for the embedding layers
* Missing values of continuous input features are filled with 0. Afterwards the continuous features are clipped and normalized.
### Learning objectives
In this notebook, we learn how to to scale ETLs with NVTabular
- Learn to use larger than GPU/host memory datasets
- Use multi-GPU or multi node for ETL
- Apply common deep learning ETL workflow
### Multi-GPU and multi-node scaling
NVTabular is built on top off [RAPIDS.AI cuDF](https://github.com/rapidsai/cudf/), [dask_cudf](https://docs.rapids.ai/api/cudf/stable/dask-cudf.html) and [dask](https://dask.org/).<br><br>
**Dask** is a task-based library for parallel scheduling and execution. Although it is certainly possible to use the task-scheduling machinery directly to implement customized parallel workflows (we do it in NVTabular), most users only interact with Dask through a Dask Collection API. The most popular "collection" API's include:
* Dask DataFrame: Dask-based version of the Pandas DataFrame/Series API. Note that dask_cudf is just a wrapper around this collection module (dask.dataframe).
* Dask Array: Dask-based version of the NumPy array API
* Dask Bag: Similar to a Dask-based version of PyToolz or a Pythonic version of PySpark RDD
For example, Dask DataFrame provides a convenient API for decomposing large Pandas (or cuDF) DataFrame/Series objects into a collection of DataFrame partitions.
<img src="./imgs/dask-dataframe.svg" width="20%">
We use **dask_cudf** to process large datasets as a collection of cuDF dataframes instead of Pandas. CuDF is a GPU DataFrame library for loading, joining, aggregating, filtering, and otherwise manipulating data.
<br><br>
**Dask enables easily to schedule tasks for multiple workers: multi-GPU or multi-node. We just need to initialize a Dask cluster (`LocalCUDACluster`) and NVTabular will use the cluster to execture the workflow.**
## ETL with NVTabular
Here we'll show how to use NVTabular first as a preprocessing library to prepare the [Criteo 1TB Click Logs dataset](https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) dataset. The following notebooks can use the output to train a deep learning model.
### Data Prep
The previous notebook [01-Download-Convert](./01-Download-Convert.ipynb) converted the tsv data published by Criteo into the parquet format that our accelerated readers prefer. Accelerating these pipelines on new hardware like GPUs may require us to make new choices about the representations we use to store that data, and parquet represents a strong alternative.
We load the required libraries.
```
# Standard Libraries
import os
import re
import shutil
import warnings
# External Dependencies
import numpy as np
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
# NVTabular
import nvtabular as nvt
from nvtabular.ops import (
Categorify,
Clip,
FillMissing,
Normalize,
)
from nvtabular.utils import _pynvml_mem_size, device_mem_size
```
Once our data is ready, we'll define some high level parameters to describe where our data is and what it "looks like" at a high level.
```
# define some information about where to get our data
BASE_DIR = os.environ.get("BASE_DIR", "/raid/data/criteo")
INPUT_DATA_DIR = os.environ.get("INPUT_DATA_DIR", BASE_DIR + "/converted/criteo")
dask_workdir = os.path.join(BASE_DIR, "test_dask/workdir")
OUTPUT_DATA_DIR = os.environ.get("OUTPUT_DATA_DIR", BASE_DIR + "/test_dask/output")
stats_path = os.path.join(BASE_DIR, "test_dask/stats")
# Make sure we have a clean worker space for Dask
if os.path.isdir(dask_workdir):
shutil.rmtree(dask_workdir)
os.makedirs(dask_workdir)
# Make sure we have a clean stats space for Dask
if os.path.isdir(stats_path):
shutil.rmtree(stats_path)
os.mkdir(stats_path)
# Make sure we have a clean output path
if os.path.isdir(OUTPUT_DATA_DIR):
shutil.rmtree(OUTPUT_DATA_DIR)
os.mkdir(OUTPUT_DATA_DIR)
```
We use the last day as validation dataset and the remaining days as training dataset.
```
fname = "day_{}.parquet"
num_days = len(
[i for i in os.listdir(INPUT_DATA_DIR) if re.match(fname.format("[0-9]{1,2}"), i) is not None]
)
train_paths = [os.path.join(INPUT_DATA_DIR, fname.format(day)) for day in range(num_days - 1)]
valid_paths = [
os.path.join(INPUT_DATA_DIR, fname.format(day)) for day in range(num_days - 1, num_days)
]
print(train_paths)
print(valid_paths)
```
### Deploy a Distributed-Dask Cluster
Now we configure and deploy a Dask Cluster. Please, [read this document](https://github.com/NVIDIA/NVTabular/blob/d419a4da29cf372f1547edc536729b0733560a44/bench/examples/MultiGPUBench.md) to know how to set the parameters.
```
# Dask dashboard
dashboard_port = "8787"
# Deploy a Single-Machine Multi-GPU Cluster
protocol = "tcp" # "tcp" or "ucx"
NUM_GPUS = [0, 1, 2, 3, 4, 5, 6, 7]
visible_devices = ",".join([str(n) for n in NUM_GPUS]) # Delect devices to place workers
device_limit_frac = 0.7 # Spill GPU-Worker memory to host at this limit.
device_pool_frac = 0.8
part_mem_frac = 0.15
# Use total device size to calculate args.device_limit_frac
device_size = device_mem_size(kind="total")
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
part_size = int(part_mem_frac * device_size)
# Check if any device memory is already occupied
for dev in visible_devices.split(","):
fmem = _pynvml_mem_size(kind="free", index=int(dev))
used = (device_size - fmem) / 1e9
if used > 1.0:
warnings.warn(f"BEWARE - {used} GB is already occupied on device {int(dev)}!")
cluster = None # (Optional) Specify existing scheduler port
if cluster is None:
cluster = LocalCUDACluster(
protocol=protocol,
n_workers=len(visible_devices.split(",")),
CUDA_VISIBLE_DEVICES=visible_devices,
device_memory_limit=device_limit,
local_directory=dask_workdir,
dashboard_address=":" + dashboard_port,
rmm_pool_size=(device_pool_size // 256) * 256
)
# Create the distributed client
client = Client(cluster)
client
```
That's it. We initialized our Dask cluster and NVTabular will execute the workflow on multiple GPUs. Similar, we could define a cluster with multiple nodes.
### Defining our Preprocessing Pipeline
At this point, our data still isn't in a form that's ideal for consumption by neural networks. The most pressing issues are missing values and the fact that our categorical variables are still represented by random, discrete identifiers, and need to be transformed into contiguous indices that can be leveraged by a learned embedding. Less pressing, but still important for learning dynamics, are the distributions of our continuous variables, which are distributed across multiple orders of magnitude and are uncentered (i.e. E[x] != 0).
We can fix these issues in a conscise and GPU-accelerated manner with an NVTabular `Workflow`. We explained the NVTabular API in [Getting Started with Movielens notebooks](https://github.com/NVIDIA/NVTabular/tree/main/examples/getting-started-movielens) and hope you are familiar with the syntax.
#### Frequency Thresholding
One interesting thing worth pointing out is that we're using _frequency thresholding_ in our `Categorify` op. This handy functionality will map all categories which occur in the dataset with some threshold level of infrequency (which we've set here to be 15 occurrences throughout the dataset) to the _same_ index, keeping the model from overfitting to sparse signals.
```
# define our dataset schema
CONTINUOUS_COLUMNS = ["I" + str(x) for x in range(1, 14)]
CATEGORICAL_COLUMNS = ["C" + str(x) for x in range(1, 27)]
LABEL_COLUMNS = ["label"]
COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS + LABEL_COLUMNS
num_buckets = 10000000
categorify_op = Categorify(out_path=stats_path, max_size=num_buckets)
cat_features = CATEGORICAL_COLUMNS >> categorify_op
cont_features = CONTINUOUS_COLUMNS >> FillMissing() >> Clip(min_value=0) >> Normalize()
features = cat_features + cont_features + LABEL_COLUMNS
workflow = nvt.Workflow(features, client=client)
```
Now instantiate dataset iterators to loop through our dataset (which we couldn't fit into GPU memory). We need to enforce the required HugeCTR data types, so we set them in a dictionary and give as an argument when creating our dataset.
```
dict_dtypes = {}
for col in CATEGORICAL_COLUMNS:
dict_dtypes[col] = np.int64
for col in CONTINUOUS_COLUMNS:
dict_dtypes[col] = np.float32
for col in LABEL_COLUMNS:
dict_dtypes[col] = np.float32
train_dataset = nvt.Dataset(train_paths, engine="parquet", part_size=part_size)
valid_dataset = nvt.Dataset(valid_paths, engine="parquet", part_size=part_size)
```
Now run them through our workflows to collect statistics on the train set, then transform and save to parquet files.
```
output_train_dir = os.path.join(OUTPUT_DATA_DIR, "train/")
output_valid_dir = os.path.join(OUTPUT_DATA_DIR, "valid/")
! mkdir -p $output_train_dir
! mkdir -p $output_valid_dir
```
For reference, let's time it to see how long it takes...
```
%%time
workflow.fit(train_dataset)
# Add "write_hugectr_keyset=True" to "to_parquet" if using this ETL Notebook for training with HugeCTR
%%time
workflow.transform(train_dataset).to_parquet(
output_files=len(NUM_GPUS),
output_path=output_train_dir,
shuffle=nvt.io.Shuffle.PER_PARTITION,
dtypes=dict_dtypes,
cats=CATEGORICAL_COLUMNS,
conts=CONTINUOUS_COLUMNS,
labels=LABEL_COLUMNS,
)
# Add "write_hugectr_keyset=True" to "to_parquet" if using this ETL Notebook for training with HugeCTR
%%time
workflow.transform(valid_dataset).to_parquet(
output_path=output_valid_dir,
dtypes=dict_dtypes,
cats=CATEGORICAL_COLUMNS,
conts=CONTINUOUS_COLUMNS,
labels=LABEL_COLUMNS,
)
```
In the next notebooks, we will train a deep learning model. Our training pipeline requires information about the data schema to define the neural network architecture. We will save the NVTabular workflow to disk so that we can restore it in the next notebooks.
```
workflow.save(os.path.join(OUTPUT_DATA_DIR, "workflow"))
```
| github_jupyter |
# Keep Calm and Parquet
In this workshop we will be leveraging a number of analytics tools to show the diversity of the AWS platform. We will walk through querying unoptimized csv files and converting them to Parquet to improve performance. We also want to show how you can access data in your data lake with Redshift, Athena, and EMR giving you freedom of choice to choose the right tool for the job keeping a single source of truth of your data in S3.

```
import boto3
import botocore
import json
import time
import os
import getpass
import project_path # path to helper methods
from lib import workshop
from pandas import read_sql
glue = boto3.client('glue')
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
cfn = boto3.client('cloudformation')
redshift_client = boto3.client('redshift')
ec2_client = boto3.client('ec2')
session = boto3.session.Session()
region = session.region_name
account_id = boto3.client('sts').get_caller_identity().get('Account')
database_name = 'taxi' # AWS Glue Data Catalog Database Name
redshift_database_name = 'taxidb'
environment_name = 'taxi-workshop'
table_name = 'yellow'
redshift_node_type = 'ds2.xlarge'
redshift_port=5439
use_existing = True
```
### [Create S3 Bucket](https://docs.aws.amazon.com/AmazonS3/latest/gsg/CreatingABucket.html)
We will create an S3 bucket that will be used throughout the workshop for storing our data.
[s3.create_bucket](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.create_bucket) boto3 documentation
```
bucket = workshop.create_bucket_name('taxi-')
session.resource('s3').create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': region})
print(bucket)
```
### [Copy Sample Data to S3 bucket](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-download-file.html)
We will download some files from New York City Taxi and Limousine Commission (TLC) Trip Record Data dataset available on the [AWS Open Data Registry](https://registry.opendata.aws/nyc-tlc-trip-records-pds/).
```
!aws s3 cp s3://nyc-tlc/trip\ data/yellow_tripdata_2017-01.csv s3://$bucket/datalake/yellow/
!aws s3 cp s3://nyc-tlc/trip\ data/yellow_tripdata_2017-02.csv s3://$bucket/datalake/yellow/
```
### [Upload to S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/Welcome.html)
Next, we will upload the json file created above to S3 to be used later in the workshop.
[s3.upload_file](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.upload_file) boto3 documentation
```
file_name = 'paymenttype.csv'
session.resource('s3').Bucket(bucket).Object(os.path.join('datalake', 'paymenttype', file_name)).upload_file(file_name)
file_name = 'ratecode.csv'
session.resource('s3').Bucket(bucket).Object(os.path.join('datalake', 'ratecode', file_name)).upload_file(file_name)
file_name = 'taxi_zone_lookup.csv'
session.resource('s3').Bucket(bucket).Object(os.path.join('datalake', 'taxi_zone_lookup', file_name)).upload_file(file_name)
```
### [Create VPC](https://docs.aws.amazon.com/vpc/index.html)
We need a VPC for some of the resources in this workshop. You have the option to create a brand new VPC or use the VPC flaged as the default.
```
if use_existing:
vpc_filter = [{'Name':'isDefault', 'Values':['true']}]
default_vpc = ec2_client.describe_vpcs(Filters=vpc_filter)
vpc_id = default_vpc['Vpcs'][0]['VpcId']
subnet_filter = [{'Name':'vpc-id', 'Values':[vpc_id]}]
subnets = ec2_client.describe_subnets(Filters=subnet_filter)
subnet1_id = subnets['Subnets'][0]['SubnetId']
subnet2_id = subnets['Subnets'][1]['SubnetId']
else:
vpc, subnet1, subnet2 = workshop.create_and_configure_vpc()
vpc_id = vpc.id
subnet1_id = subnet1.id
subnet2_id = subnet2.id
print(vpc_id)
print(subnet1_id)
print(subnet2_id)
```
### Upload [CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/GettingStarted.html) template
In the interest of time we will leverage CloudFormation to launch EMR and Redshift instances to leverage on the analytics side after we have cataloged and transformed the data.
```
redshift_file = 'redshift.yaml'
session.resource('s3').Bucket(bucket).Object(os.path.join('cfn', redshift_file)).upload_file(redshift_file)
emr_file = 'emr.yaml'
session.resource('s3').Bucket(bucket).Object(os.path.join('cfn', emr_file)).upload_file(emr_file)
```
### Enter the user name used for the Redshift Cluster
```
admin_user = getpass.getpass()
```
### Enter the password used in creating the Redshift Cluster
```
#Password must be 8 characters long alphanumeric only 1 Upper, 1 Lower
admin_password = getpass.getpass()
import re
pattern = re.compile(r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)[a-zA-Z\d]{8,}$")
result = pattern.match(admin_password)
if result:
print('Valid')
else:
print('Invalid, Password must be 8 characters long alphanumeric only 1 Upper, 1 Lower')
```
### Execute CloudFormation Stack to generate Redshift Data Warehouse
Later in the workshop we will be using this [Redshift](https://aws.amazon.com/redshift/) cluster to run queries over data populated in our data lake with [Redshift Spectrum](https://aws.amazon.com/blogs/big-data/amazon-redshift-spectrum-extends-data-warehousing-out-to-exabytes-no-loading-required/).
```
cfn_template = 'https://s3-{0}.amazonaws.com/{1}/cfn/{2}'.format(region, bucket, redshift_file)
print(cfn_template)
redshift_stack_name = 'RedshiftTaxiStack'
response = cfn.create_stack(
StackName=redshift_stack_name,
TemplateURL=cfn_template,
Capabilities = ["CAPABILITY_NAMED_IAM"],
Parameters=[
{
'ParameterKey': 'EnvironmentName',
'ParameterValue': environment_name
},
{
'ParameterKey': 'AdministratorUser',
'ParameterValue': admin_user
},
{
'ParameterKey': 'AdministratorPassword',
'ParameterValue': admin_password
},
{
'ParameterKey': 'DatabaseName',
'ParameterValue': redshift_database_name
},
{
'ParameterKey': 'NodeType',
'ParameterValue': redshift_node_type
},
{
'ParameterKey': 'S3Bucket',
'ParameterValue': bucket
}
]
)
print(response)
```
### Execute CloudFormation Stack to generate EMR Cluster
We will also be querying data in the Data Lake from [EMR](https://aws.amazon.com/emr/) as well through the use of an [EMR Notebook](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks.html).
```
cfn_template = 'https://s3-{0}.amazonaws.com/{1}/cfn/{2}'.format(region, bucket, emr_file)
print(cfn_template)
emr_stack_name = 'EMRTaxiStack'
response = cfn.create_stack(
StackName=emr_stack_name,
TemplateURL=cfn_template,
Capabilities = ["CAPABILITY_NAMED_IAM"],
Parameters=[
{
'ParameterKey': 'EnvironmentName',
'ParameterValue': environment_name
},
{
'ParameterKey': 'VPC',
'ParameterValue': vpc_id
},
{
'ParameterKey': 'PublicSubnet',
'ParameterValue': subnet1_id
},
{
'ParameterKey': 'OutputS3Bucket',
'ParameterValue': bucket
}
]
)
print(response)
```
### Discover the data in your Data Lake
In this next section we will be using [AWS Glue](https://aws.amazon.com/glue/) to discover, catalog, and transform your data. Glue currently only supports `Python 2.7`, hence we'll write the script in `Python 2.7`.
### Permission setup for invoking AWS Glue from this Notebook
In order to enable this Notebook to run AWS Glue jobs, we need to add one additional permission to the default execution role of this notebook. We will be using SageMaker Python SDK to retrieve the default execution role and then you have to go to [IAM Dashboard](https://console.aws.amazon.com/iam/home) to edit the Role to add AWS Glue specific permission.
### Finding out the current execution role of the Notebook
We are using SageMaker Python SDK to retrieve the current role for this Notebook which needs to be enhanced to support the functionality in AWS Glue.
```
# Import SageMaker Python SDK to get the Session and execution_role
import sagemaker
from sagemaker import get_execution_role
sess = sagemaker.Session()
role = get_execution_role()
role_name = role[role.rfind('/') + 1:]
print(role_name)
```
### Adding AWS Glue as an additional trusted entity to this role
This step is needed if you want to pass the execution role of this Notebook while calling Glue APIs as well without creating an additional **Role**. If you have not used AWS Glue before, then this step is mandatory.
If you have used AWS Glue previously, then you should have an already existing role that can be used to invoke Glue APIs. In that case, you can pass that role while calling Glue (later in this notebook) and skip this next step.
On the IAM dashboard, please click on **Roles** on the left sidenav and search for this Role. Once the Role appears, click on the Role to go to its **Summary** page. Click on the **Trust relationships** tab on the **Summary** page to add AWS Glue as an additional trusted entity.
Click on **Edit trust relationship** and replace the JSON with this JSON.
```
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"sagemaker.amazonaws.com",
"glue.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}
```
Once this is complete, click on **Update Trust Policy** and you are done.

```
print("https://console.aws.amazon.com/iam/home?region={0}#/roles/{1}".format(region, role_name))
```
### Create the [AWS Glue Catalog Database](https://docs.aws.amazon.com/glue/latest/dg/define-database.html)
When you define a table in the AWS Glue Data Catalog, you add it to a database. A database is used to organize tables in AWS Glue. You can organize your tables using a crawler or using the AWS Glue console. A table can be in only one database at a time.
There is a central Glue Catalog for each AWS account. When creating the database you will use your account id declared above as `account_id`
[glue.create_database](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.create_database)
```
def create_db(glue_client, account_id, database_name, description):
"""Create the specified Glue database if it does not exist"""
try:
glue_client.get_database(
CatalogId=account_id,
Name=database_name
)
except glue_client.exceptions.EntityNotFoundException:
print("Creating database: %s" % database_name)
glue_client.create_database(
CatalogId=account_id,
DatabaseInput={
'Name': database_name,
'Description': description
}
)
create_db(glue, account_id, database_name, 'New York City Taxi and Limousine Commission (TLC) Trip Record Data')
```
### Use a [Glue Crawler](https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html) to Discover the transformed data
You can use a crawler to populate the AWS Glue Data Catalog with tables. This is the primary method used by most AWS Glue users. You add a crawler within your Data Catalog to traverse your data stores. The output of the crawler consists of one or more metadata tables that are defined in your Data Catalog. Extract, transform, and load (ETL) jobs that you define in AWS Glue use these metadata tables as sources and targets.
A crawler can crawl both file-based and table-based data stores. Crawlers can crawl the following data stores:
* Amazon Simple Storage Service (Amazon S3)
* [Built-in Classifiers](https://docs.aws.amazon.com/glue/latest/dg/add-classifier.html#classifier-built-in)
* [Custom Classifiers](https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html)
* Amazon Redshift
* Amazon Relational Database Service (Amazon RDS)
* Amazon Aurora
* MariaDB
* Microsoft SQL Server
* MySQL
* Oracle
* PostgreSQL
* Amazon DynamoDB
* Publicly accessible databases [Blog](https://aws.amazon.com/blogs/big-data/how-to-access-and-analyze-on-premises-data-stores-using-aws-glue/)
* Aurora
* MariaDB
* SQL Server
* MySQL
* Oracle
* PostgreSQL
[glue.create_crawler](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.create_crawler)
```
crawler_name = 'NY-Taxi-Crawler'
crawler_path = 's3://'+bucket+'/datalake/'
response = glue.create_crawler(
Name=crawler_name,
Role=role,
DatabaseName=database_name,
Description='Crawler for NY Taxi Data',
Targets={
'S3Targets': [
{
'Path': crawler_path
}
]
},
SchemaChangePolicy={
'UpdateBehavior': 'UPDATE_IN_DATABASE',
'DeleteBehavior': 'DEPRECATE_IN_DATABASE'
}
)
```
### Start the Glue Crawler
You can use a crawler to populate the AWS Glue Data Catalog with tables. This is the primary method used by most AWS Glue users. You add a crawler within your Data Catalog to traverse your data stores. The output of the crawler consists of one or more metadata tables that are defined in your Data Catalog. Extract, transform, and load (ETL) jobs that you define in AWS Glue use these metadata tables as sources and targets.
[glue.start_crawler](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.start_crawler)
```
response = glue.start_crawler(
Name=crawler_name
)
print ("Crawler: https://{0}.console.aws.amazon.com/glue/home?region={0}#crawler:name={1}".format(region, crawler_name))
```
### Checking Glue crawler status
We will now monitor the crawler status waiting for it to get back into the `READY` state meaning the crawler completed it's crawl. You can also look at the [CloudWatch logs](https://docs.aws.amazon.com/glue/latest/dg/console-crawlers.html#console-crawlers-details) for the crawler for more details.
```
crawler_status = glue.get_crawler(Name=crawler_name)['Crawler']['State']
while crawler_status not in ('READY'):
crawler_status = glue.get_crawler(Name=crawler_name)['Crawler']['State']
print(crawler_status)
time.sleep(30)
```
### View Crawler Results
Now that we have crawled the raw data available, we want to look at the results of the crawl to see the tables that were created. You will click on the link `Tables in taxi` to view the tables the crawler found. It will look like the image below:

```
print('https://{0}.console.aws.amazon.com/glue/home?region={0}#database:name={1}'.format(region, database_name))
```
### Create Parquet version of the yellow CSV table
From [Wikipedia](https://en.wikipedia.org/wiki/Apache_Parquet), "Apache Parquet is a free and open-source column-oriented data storage format of the Apache Hadoop ecosystem. It is similar to the other columnar-storage file formats available in Hadoop namely RCFile and ORC. It is compatible with most of the data processing frameworks in the Hadoop environment. It provides efficient data compression and encoding schemes with enhanced performance to handle complex data in bulk."
The key points in this code is how easy it is to get access to the AWS Glue Data Catalog leveraging the [Glue libraries](https://github.com/awslabs/aws-glue-libs). Some of the key concepts are below:
* [`glueContext.create_dynamic_frame.from_catalog`](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-crawler-pyspark-extensions-glue-context.html#aws-glue-api-crawler-pyspark-extensions-glue-context-create_dynamic_frame_from_catalog) - Read table metadata from the Glue Data Catalog using Glue libs to load tables into the pyspark job.
* Writing back S3 [`glueContext.write_dynamic_frame.from_options`](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-crawler-pyspark-extensions-glue-context.html#aws-glue-api-crawler-pyspark-extensions-glue-context-write_dynamic_frame_from_catalog) with options:
* Convert data to different format `format="parquet"`. This format is [columnar](https://docs.aws.amazon.com/athena/latest/ug/columnar-storage.html) and provides [Snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) compression by default.
You can find more best practices for Glue and Athena [here](https://docs.aws.amazon.com/athena/latest/ug/glue-best-practices.html)
```
%%writefile yellow_parquet_etl.py
import sys
import os
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'S3_OUTPUT_BUCKET', 'S3_OUTPUT_KEY_PREFIX', 'DATABASE_NAME', 'TABLE_NAME', 'REGION'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "taxi", table_name = "yellow", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database=args['DATABASE_NAME'], table_name=args['TABLE_NAME'], transformation_ctx = "datasource0")
## @type: ResolveChoice
## @args: [choice = "make_struct", transformation_ctx = "resolvechoice1"]
## @return: resolvechoice1
## @inputs: [frame = datasource0]
resolvechoice1 = ResolveChoice.apply(frame = datasource0, choice = "make_struct", transformation_ctx = "resolvechoice1")
## @type: DropNullFields
## @args: [transformation_ctx = "dropnullfields2"]
## @return: dropnullfields2
## @inputs: [frame = resolvechoice1]
dropnullfields2 = DropNullFields.apply(frame = resolvechoice1, transformation_ctx = "dropnullfields2")
parquet_output_path = 's3://' + os.path.join(args['S3_OUTPUT_BUCKET'], args['S3_OUTPUT_KEY_PREFIX'])
print(parquet_output_path)
## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": ""}, format = "parquet", transformation_ctx = "datasink3"]
## @return: datasink3
## @inputs: [frame = dropnullfields2]
datasink3 = glueContext.write_dynamic_frame.from_options(frame = dropnullfields2, connection_type = "s3", connection_options = {"path": parquet_output_path}, format = "parquet", transformation_ctx = "datasink4")
job.commit()
```
### Upload the ETL script to S3
We will be uploading the `yellow_parquet_etl` script to S3 so Glue can use it to run the PySpark job. You can replace it with your own script if needed. If your code has multiple files, you need to zip those files and upload to S3 instead of uploading a single file like it's being done here.
```
script_location = sess.upload_data(path='yellow_parquet_etl.py', bucket=bucket, key_prefix='codes')
# Output location of the data.
s3_output_key_prefix = 'datalake/yellow_parquet/'
```
### [Authoring jobs with AWS Glue](https://docs.aws.amazon.com/glue/latest/dg/author-job.html)
Next we'll be creating Glue client via Boto so that we can invoke the `create_job` API of Glue. `create_job` API will create a job definition which can be used to execute your jobs in Glue. The job definition created here is mutable. While creating the job, we are also passing the code location as well as the dependencies location to Glue.
`AllocatedCapacity` parameter controls the hardware resources that Glue will use to execute this job. It is measures in units of `DPU`. For more information on `DPU`, please see [here](https://docs.aws.amazon.com/glue/latest/dg/add-job.html).
[glue.create_job](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.create_job)
```
from time import gmtime, strftime
import time
timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
job_name = 'ny-yellow-parquet-' + timestamp_prefix
response = glue.create_job(
Name=job_name,
Description='PySpark job to convert yellow taxi csv data to parquet',
Role=role, # you can pass your existing AWS Glue role here if you have used Glue before
ExecutionProperty={
'MaxConcurrentRuns': 1
},
Command={
'Name': 'glueetl',
'ScriptLocation': script_location
},
DefaultArguments={
'--job-language': 'python',
'--job-bookmark-option': 'job-bookmark-disable'
},
AllocatedCapacity=5,
Timeout=60,
)
glue_job_name = response['Name']
print(glue_job_name)
```
The aforementioned job will be executed now by calling `start_job_run` API. This API creates an immutable run/execution corresponding to the job definition created above. We will require the `job_run_id` for the particular job execution to check for status. We'll pass the data and model locations as part of the job execution parameters.
[glue.start_job_run](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.start_job_run)
```
job_run_id = glue.start_job_run(JobName=job_name,
Arguments = {
'--S3_OUTPUT_BUCKET': bucket,
'--S3_OUTPUT_KEY_PREFIX': s3_output_key_prefix,
'--DATABASE_NAME': database_name,
'--TABLE_NAME': table_name,
'--REGION': region
})['JobRunId']
print(job_run_id)
```
### Checking Glue Job Status
Now we will check for the job status to see if it has `SUCCEEDED`, `FAILED` or `STOPPED`. Once the job is succeeded, we have the transformed data into S3 in Parquet format which we will use to query with Athena and visualize with QuickSight. If the job fails, you can go to AWS Glue console, click on **Jobs** tab on the left, and from the page, click on this particular job and you will be able to find the CloudWatch logs (the link under **Logs**) link for these jobs which can help you to see what exactly went wrong in the job execution.
[glue.get_job_run](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glue.html#Glue.Client.get_job_run)
```
job_run_status = glue.get_job_run(JobName=job_name,RunId=job_run_id)['JobRun']['JobRunState']
while job_run_status not in ('FAILED', 'SUCCEEDED', 'STOPPED'):
job_run_status = glue.get_job_run(JobName=job_name,RunId=job_run_id)['JobRun']['JobRunState']
print (job_run_status)
time.sleep(60)
print(job_run_status)
```
### Create Crawler to populate Parquet formated table in Glue Data Catalog
We will create another crawler for the curated dataset we created converting the CSV files into Parquet formatted data.
```
parq_crawler_name = 'NY-Curated-Crawler'
parq_crawler_path = 's3://'+bucket+'/datalake/yellow_parquet/'
response = glue.create_crawler(
Name=parq_crawler_name,
Role=role,
DatabaseName=database_name,
Description='Crawler for the Parquet transformed yellow taxi data',
Targets={
'S3Targets': [
{
'Path': parq_crawler_path
}
]
},
SchemaChangePolicy={
'UpdateBehavior': 'UPDATE_IN_DATABASE',
'DeleteBehavior': 'DEPRECATE_IN_DATABASE'
}
)
```
### Start Crawler
Much like we did with the raw data crcawler we will start the curated crawler pointing to the new data set created from the Glue job.
```
response = glue.start_crawler(
Name=parq_crawler_name
)
print ("Crawler: https://{0}.console.aws.amazon.com/glue/home?region={0}#crawler:name={1}".format(region, parq_crawler_name))
```
### Monitor the status of the Parquet crawler
```
crawler_status = glue.get_crawler(Name=parq_crawler_name)['Crawler']['State']
while crawler_status not in ('READY'):
crawler_status = glue.get_crawler(Name=parq_crawler_name)['Crawler']['State']
print(crawler_status)
time.sleep(30)
print('https://{0}.console.aws.amazon.com/glue/home?region={0}#database:name={1}'.format(region, database_name))
```
### [Query the Data Lake with Athena](https://aws.amazon.com/athena/)
For the self-serve end users that need the ability to create ad-hoc queries against the data Athena is a great choice the utilizes Presto and ANSI SQL to query a number of file formats on S3.
To query the tables created by the crawler we will be installing a python library for querying the data in the Glue Data Catalog with Athena. For more information jump to [PyAthena](https://pypi.org/project/PyAthena/). You can also use the AWS console by browsing to the Athena service and run queries through the browser. Alternatively, you can also use the [JDBC/ODBC](https://docs.aws.amazon.com/athena/latest/ug/athena-bi-tools-jdbc-odbc.html) drivers available.
```
!pip install PyAthena
```
### Simple Select Query
In this first query we will create a simple query to show the ability of Athena to query the raw CSV data.
```
%%time
from pyathena import connect
from pyathena.util import as_pandas
cursor = connect(region_name=region, s3_staging_dir='s3://'+bucket+'/athena/temp').cursor()
cursor.execute('select * from ' + database_name + '.yellow limit 10')
df = as_pandas(cursor)
df.head(5)
```
### Complex Join Query
Now we will get more complex and create a query that utilizes multiple joins using Athena.
```
%%time
cursor.execute('''SELECT * FROM ''' + database_name + '''.yellow
JOIN ''' + database_name + '''.paymenttype ON yellow.payment_type = paymenttype.id
JOIN ''' + database_name + '''.ratecode ON yellow.ratecodeid = ratecode.id
JOIN ''' + database_name + '''.taxi_zone_lookup AS pu_taxizone ON yellow.pulocationid = pu_taxizone.locationid
JOIN ''' + database_name + '''.taxi_zone_lookup AS do_taxizone ON yellow.dolocationid = do_taxizone.locationid
limit 10;''')
df = as_pandas(cursor)
df.head(5)
```
### Complex Join Query With Where Clause
Taking it a step further, we will now utilize the query with multiple joins and aggregate the number of entries by vendor looking at just the data found in the first 10 days of Jan. 2017.
In our Glue job we could have taken it a step further to optimze queries like this using data partitioning by date.
#### What is data partitioning?
A partition is a division of a logical database or its constituent elements into distinct independent parts. Database partitioning is normally done for manageability, performance or availability reasons, or for load balancing.
Examples in S3 would utilize prefixes in the bucket for the partitions in key=value pairs.
* s3://datalake/taxi/yellow/year=2018/month=1/<objects>
* s3://datalake/taxi/yellow/year=2018/month=1/day=1/<objects>
**Optional Exercise**
If you would like to try this for yourself you can change the Glue Job above when writing the data to S3 you select how to partition the data.
#### Glue context writing patitions
* Extract `year`, `month`, and `day` from the `tpep_pickup_datetime`. Look at [Pyspark documentation](http://spark.apache.org/docs/2.1.0/api/python/pyspark.sql.html#pyspark.sql.DataFrame.withColumn) for help.
* Writing back S3 [`glueContext.write_dynamic_frame.from_options`](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-crawler-pyspark-extensions-glue-context.html#aws-glue-api-crawler-pyspark-extensions-glue-context-write_dynamic_frame_from_catalog) with options:
* [Partition](https://docs.aws.amazon.com/athena/latest/ug/partitions.html) the data based on columns `connection_options = {"path": parquet_output_path, "partitionKeys": ["year, month, day"]}`
* Convert data to a [columnar format](https://docs.aws.amazon.com/athena/latest/ug/columnar-storage.html) `format="parquet"`
```
%%time
cursor.execute('''WITH yellow AS (SELECT date_parse(yellow.tpep_pickup_datetime,'%Y-%m-%d %H:%i:%s') AS pu_datetime, yellow.* FROM ''' + database_name + '''.yellow )
SELECT count(yellow.vendorid) as cnt FROM yellow
JOIN ''' + database_name + '''.paymenttype ON yellow.payment_type = paymenttype.id
JOIN ''' + database_name + '''.ratecode ON yellow.ratecodeid = ratecode.id
JOIN ''' + database_name + '''.taxi_zone_lookup AS pu_taxizone ON yellow.pulocationid = pu_taxizone.locationid
JOIN ''' + database_name + '''.taxi_zone_lookup AS do_taxizone ON yellow.dolocationid = do_taxizone.locationid
WHERE year(pu_datetime) = 2017
AND month(pu_datetime) = 1
AND day(pu_datetime) BETWEEN 1 AND 10''')
df = as_pandas(cursor)
df.head(1)
```
### Optimized queries using the Parquet yellow taxi data
We will run the same queries again but this time we will use the dataset utilizing the parquet format to show the performance gains you get when converting.
```
%%time
cursor.execute('select * from ' + database_name + '.yellow_parquet limit 10')
df = as_pandas(cursor)
df.head(5)
```
Same complex queries using the `yellow_parquet` table instead.
```
%%time
cursor.execute('''
WITH yellow AS (SELECT date_parse(yellow.tpep_pickup_datetime,'%Y-%m-%d %H:%i:%s') AS pu_datetime, yellow.* FROM ''' + database_name + '''.yellow_parquet as yellow )
select count( yellow.vendorid)
FROM yellow
Inner JOIN ''' + database_name + '''.paymenttype ON yellow.payment_type = paymenttype.id
Inner JOIN ''' + database_name + '''.ratecode ON yellow.ratecodeid = ratecode.id
Inner JOIN ''' + database_name + '''.taxi_zone_lookup AS pu_taxizone ON yellow.pulocationid = pu_taxizone.locationid
Inner JOIN ''' + database_name + '''.taxi_zone_lookup AS do_taxizone ON yellow.dolocationid = do_taxizone.locationid
WHERE year(pu_datetime) = 2017
AND month(pu_datetime) = 1
AND day(pu_datetime) BETWEEN 1 AND 10''')
df = as_pandas(cursor)
df.head(5)
```
### Check status Redshift Cloudformation Stacks
Let's check in on the status of the EMR and Redshift CloudFormation stacks. Now that we showed how you can leverage Athena for querying the raw and curated data we want to dive into using other analytics engines to show the capability of keeping all your data in your data lake and leverage the right tools for the job.
Separating your storage from your compute allows you to scale each component independently. This gives you the flexibility needed when making tool selection as well providing agility in upgrading to new tools and services as they come out helping future proof your data lake solution.
```
response = cfn.describe_stacks(
StackName=redshift_stack_name
)
if response['Stacks'][0]['StackStatus'] == 'CREATE_COMPLETE':
for output in response['Stacks'][0]['Outputs']:
if (output['OutputKey'] == 'RedshiftAddress'):
redshift_cluster_name = output['OutputValue'].split('.')[0]
print(redshift_cluster_name)
else:
print('Not yet complete.')
response = redshift_client.describe_clusters(
ClusterIdentifier=redshift_cluster_name
)
status = response['Clusters'][0]['ClusterStatus']
if status == 'available':
redshift_address = response['Clusters'][0]['Endpoint']['Address']
print(redshift_address)
jdbc_url = 'jdbc:redshift://' + redshift_address + ':' + str(redshift_port) + '/' + redshift_database_name
print(jdbc_url)
iam_role = response['Clusters'][0]['IamRoles'][0]['IamRoleArn']
print(iam_role)
else:
print('Not yet available. Current status is {}'.format(status))
```
### Install the psycopg2 library to connect to Redshift
Psycopg is the most popular PostgreSQL database adapter for the Python programming language. Its main features are the complete implementation of the Python DB API 2.0 specification and the thread safety.
[psycopg2](http://initd.org/psycopg/)
```
!pip install psycopg2
```
### Create connection attributes
```
conn_string = { 'dbname': redshift_database_name,
'user': admin_user,
'pwd':admin_password,
'host': redshift_address,
'port':redshift_port
}
import psycopg2
def create_conn(config):
try:
# get a connection, if a connect cannot be made an exception will be raised here
con=psycopg2.connect(dbname=config['dbname'], host=config['host'],
port=config['port'], user=config['user'],
password=config['pwd'])
return con
except Exception as err:
print(err)
con = create_conn(config=conn_string)
print("Connected to Redshift!\n")
```
### Create Redshift Spectrum external table
```
statement = '''create external schema spectrum
from data catalog
database \'''' + database_name + '''\'
iam_role \'''' + iam_role + '''\'
create external database if not exists;'''
print(statement)
# con.cursor will return a cursor object, you can use this cursor to perform queries
cur = con.cursor()
cur.execute(statement)
con.commit()
Get the count by vendor for Jan 1st - 10th on 2017 using the CSV formatted data.
%%time
## Unoptimized
statement = '''select count(yellow.vendorid)
from spectrum.yellow
Inner JOIN spectrum.paymenttype ON yellow.payment_type = paymenttype.id
Inner JOIN spectrum.ratecode ON yellow.ratecodeid = ratecode.id
Inner JOIN spectrum.taxi_zone_lookup AS pu_taxizone ON yellow.pulocationid =
pu_taxizone.locationid
Inner JOIN spectrum.taxi_zone_lookup AS do_taxizone ON yellow.dolocationid =
do_taxizone.locationid
where extract(month from cast(tpep_pickup_datetime as date)) = 1 and
extract(year from cast(tpep_pickup_datetime as date)) = 2017 and
extract(day from cast(tpep_pickup_datetime as date)) between 1 and 10;'''
df = read_sql(statement, con=con)
```
### View results
```
df
```
Get the count by vendor for Jan 1st - 10th on 2017 using the Parquet formatted data.
```
%%time
## Optimized
statement = '''select count(yellow.vendorid)
from spectrum.yellow_parquet as yellow
Inner JOIN spectrum.paymenttype ON yellow.payment_type = paymenttype.id
Inner JOIN spectrum.ratecode ON yellow.ratecodeid = ratecode.id
Inner JOIN spectrum.taxi_zone_lookup AS pu_taxizone ON yellow.pulocationid =
pu_taxizone.locationid
Inner JOIN spectrum.taxi_zone_lookup AS do_taxizone ON yellow.dolocationid =
do_taxizone.locationid
where extract(month from cast(tpep_pickup_datetime as date)) = 1 and
extract(year from cast(tpep_pickup_datetime as date)) = 2017 and
extract(day from cast(tpep_pickup_datetime as date)) between 1 and 10;'''
df = read_sql(statement, con=con)
df
```
### Check status EMR Cloudformation Stacks
Let's check in on the status of the EMR cluster. If it's not yet finished please wait until it's ready.
```
response = cfn.describe_stacks(
StackName=emr_stack_name
)
if response['Stacks'][0]['StackStatus'] == 'CREATE_COMPLETE':
for output in response['Stacks'][0]['Outputs']:
if (output['OutputKey'] == 'EMRClusterId'):
cluster_id = output['OutputValue']
print(cluster_id)
else:
print('Not yet complete.')
notebook_prefix = 's3://{0}/notebooks/'.format(bucket)
emr_notebooks_file = 'TaxiEMRNotebook.ipynb'
print('Notebook Name: {}'.format(emr_notebooks_file.split('.')[0]))
print('Notebook Location: {}'.format(notebook_prefix))
print('Notebook Cluster: {}'.format(cluster_id))
```
### Create an EMR Notebook
Create a notebook in EMR to run Spark queries in based on the attributes above.
```
print('https://{0}.console.aws.amazon.com/elasticmapreduce/home?region={0}#create-notebook:'.format(region))
```
### Find Notebook id and import TaxiEMRNotebook into EMR Notebook
There is a notebook `TaxiEMRNotebook.ipynb` that you will want to download and import into the EMR notebook you just created and walk through the cells comparing the optimized vs. unoptimized schema format.
```
#Get Notebook Id
notebook_id = '{{notebook_id}}'
session.resource('s3').Bucket(bucket).Object(os.path.join('notebooks', notebook_id, emr_notebooks_file)).upload_file(emr_notebooks_file)
```
### Open EMR Notebook and execute queries
```
print('https://{0}.console.aws.amazon.com/elasticmapreduce/home?region={0}#notebooks-list:'.format(region))
```
**Congratulations!!!!** You have completed the workshops showing the capabilities of leveraging a Data Lake on AWS and the flexibility of choice when using analytics tools in AWS. Before you run the cleanup please delete the EMR Notebook you created above by selecting the notebook and clicking `Delete` in the toolbar on the EMR Notebook console.
### Cleanup
```
response = cfn.delete_stack(StackName=redshift_stack_name)
response = cfn.delete_stack(StackName=emr_stack_name)
response = glue.delete_crawler(Name=parq_crawler_name)
response = glue.delete_crawler(Name=crawler_name)
response = glue.delete_job(JobName=glue_job_name)
response = glue.delete_database(
CatalogId = account_id,
Name = database_name
)
!aws s3 rb s3://$bucket --force
waiter = cfn.get_waiter('stack_delete_complete')
waiter.wait(
StackName=emr_stack_name
)
print('The wait is over for {0}'.format(emr_stack_name))
waiter = cfn.get_waiter('stack_delete_complete')
waiter.wait(
StackName=redshift_stack_name
)
print('The wait is over for {0}'.format(redshift_stack_name))
if not use_existing:
workshop.vpc_cleanup(vpc_id)
```
| github_jupyter |
## Introduction
(You can also read this article on our website, [Easy-TensorFlow](http://www.easy-tensorflow.com/basics/graph-and-session))
Why do we need tensorflow? Why are people crazy about it? In a way, it is lazy computing and offers flexibility in the way you run your code. What is this thing with flexbility and laze computing? We are glad, you asked!
Lazy Computing: TensorFlow is a way of representing computation without actually performing it until asked. The first step to learn Tensorflow is to understand its main key feature, the __"computational graph"__ approach. Basically, all Tensorflow codes contain two important parts:
__Part 1:__ building the __GRAPH__, it represents the data flow of the computations
__Part 2:__ running a __SESSION__, it executes the operations in the graph
First step you create the graph i.e. what you want to do with the data, then you run it seperately using a session (don't struggle to wrap your head around it, it will come to you eventually).
Flexibility: When you create a graph, you are not bound to run the whole graph and can control the parts of the graph that are executed separately. This provides a huge flexibility with your models.
Bonus: One of the biggest advantages of TensorFlow is its visualizations of the computation graph. Its called TensorBoard and will be discussed in future. Now that we have discussed what and why about TensorFlow, lets dive in to the actual thing.
TensorFlow separates the definition of computations from their execution. These two parts are explained in more detail in the following sections. Before that, remember that the first step is to import the Tensorflow library!
```
import tensorflow as tf
```
This gives Python access to all of TensorFlow's classes, methods, and symbols. Using this command, TensorFlow library will be imported under the alias __tf__ so that later we can use it instead of typing the whole term __tensorflow__ each time.
__What is a Tensor?__
TensorFlow programs use a data structure called tensor to represent all the data. Any type of data you plan to use for your model can be stored in Tensors. Simply put, a Tensor is a multi-dimensional array (0-D tensor: scalar, 1-D tensor: vector, 2-D tensor: matrix, and so on). Hence, TensorFlow is simply referring to the flow of the Tensors in the computational graph.
<img src="files/files/1_1.gif">
___Fig1. ___ A sample computational graph in TensorFlow (Source: TensorFlow website)
## GRAPH
The biggest idea about Tensorflow is that all the numerical computations are expressed as a computational graph. In other words, the backbone of any Tensorflow program is a __Graph__. Anything that happens in your model is represented by the computational graph. This makes it, the to go place for anything related to your model. Quoted from the TensorFlow website, "A __computational graph__ (or graph in short) is a series of TensorFlow operations arranged into a graph of nodes". Basically, it means a graph is just an arrangement of nodes that represent the operations in your model.
So First let's see what does a node and operation mean? The best way to explain it is by looking at a simple example. Suppose we want to write the code for function $f(x,y)=x^2y+y+2$. The Graph in TensorFlow will be something like:
<img src="files/files/1_2.png" width="500" height="1000" >
___Fig2. ___ Schematic of the constructed computational graph in TensorFlow
The graph is composed of a series of nodes connected to each other by edges (from the image above). Each __node__ in the graph is called __op__ (short for operation). So we'll have one node for each operation; either for operations on tensors (like math operations) or generating tensors (like variables and constants). Each node takes zero or more tensors as inputs and produces a tensor as an output.
Now Let's build a simple computational graph.
### Example 1:
Let's start with a basic arithmatic operation like addition to demonstrate a graph. The code adds two values, say a=2 and b=3, using TensorFlow. To do so, we need to call __tf.add()__. From here on, we recommend you to check out the documentation of each method/class to get a clear idea of what it can do(documentation can be found at tensorflow.org or you can just use google to get to the required page in the documentation). The __tf.add()__ has three arugments 'x', 'y', and 'name' where x and y are the values to be added together and __name__ is the operation name, i.e. the name associated to the addition node on the graph.
If we call the operation __"Add"__, the code will be as follows:
```
import tensorflow as tf
a = 2
b = 3
c = tf.add(a, b, name='Add')
print(c)
```
The generated graph and variables are:
__*Note__: The graph is generated using __Tensorboard__. As discussed earlier, it is a visualization tool for the graph and will be discussed in detail in future.
<img src="files/files/1_3.png" width="800" height="1500">
___Fig3. ___ __Left:__ generated graph visualized in Tensorboard, __Right:__ generated variables (screenshot captured from PyCharm debugger when running in debug mode)
This code creates two input nodes (for inputs a=2 and b=3) and one output node for the addition operation (named Add). When we print out the variable __c__ (i.e. the output Tensor of the addition operation), it prints out the Tensor information; its name (Add), shape (__()__ means scalar), and type (32-bit integer). However, It does not spit out the result (2+3=5). Why?!
Remember earlier in this post, we talked about the two parts of a TensorFlow code. First step is to create a graph and to actually evaluate the nodes, we must run the computational graph within a __Session__. In simple words, the written code only generates the graph which only determines the expected sizes of Tensors and operations to be executed on them. However, it doesn't assign a numeric value to any of the Tensors i.e. TensorFlow does not execute the graph unless it is specified to do so with a session. Hence, to assign these values and make them flow through the graph, we need to create and run a session.
Therefore a TensorFlow Graph is something like a function definition in Python. It __WILL NOT__ do any computation for you (just like a function definition will not have any execution result). It __ONLY__ defines computation operations.
## Session
To compute anything, a graph must be launched in a session. Technically, session places the graph ops on hardware such as CPUs or GPUs and provides methods to execute them. In our example, to run the graph and get the value for c the following code will create a session and execute the graph by running 'c':
```
sess = tf.Session()
print(sess.run(c))
sess.close()
```
This code creates a Session object (assigned to __sess__), and then (the second line) invokes its run method to run enough of the computational graph to evaluate __c__. This means that it only runs that part of the graph which is necessary to get the value of __c__ (remember the flexibility of using TensorFlow? In this simple example, it runs the whole graph). Remember to close the session at the end of the session. That is done using the last line in the above code.
The following code does the same thing and is more commonly used. The only difference is that there is no need to close the session at the end as it gets closed automatically.
```
with tf.Session() as sess:
print(sess.run(c))
```
Now let's look at the created graph one more time. Don't you see anything weird?
<img src="files/files/1_4.png" width="500" height="1000">
___Fig4. ___ The generated graph visualized by Tensorboard
Exactly! What is x and y?! Where did these two thing come from? We didn't define any x or y variables!
Well... To explain clearly, let's make up two names; say __"Python-name"__ and __"TensorFlow-name"__. In this piece of code, we generated 3 variables (look at the right panel of Fig. 3) with __"Python-name"__s of _a_, _b_, and _c_. Here, _a_ and _b_ are Python variables, thus have no __"TensorFlow-name"__; while _c_ is a Tensor with ___Add___ as its __"TensorFlow-name"__.
Clear? Okay, let's get back to our question, what is x and y then?
In an ideal Tensorflow case, __tf.add()__ receives two __Tensors__ with defined __"TensorFlow-name"__ as input (these names are separate from __Python-name__). For example, by writing $c = tf.add(a, b, name='Add')$, we're actually creating a variable (or Tensor) with __c__ as its Python-name and __Add__ as the TensorFlow-name.
In the above code, we passed two Python variables (a=2 and b=3) which only have Python-names (a and b), but they have no TensorFlow-names. TensorFlow uses the TensorFlow-names for visualizing the graphs. Since a and b have no TensorFlow-names, it uses some default names, x and y.
__*Note:__ This name mismatch can easily be solved by using tf.constant() for creating the input variables as Tensors instead of simply using Python variables (a=2, b=3). This is explained thoroughly in the next tutorial where we talk about TensorFlow DataTypes.
For now, we'll continue using Python variables and change the Python variable names __a__ and __b__ into __x__ and __y__ to solve the name mismatch temporarily.
Now let's look at a more complicated example.
### Example 2:
Creating a graph with multiple math operations
```
import tensorflow as tf
x = 2
y = 3
add_op = tf.add(x, y, name='Add')
mul_op = tf.multiply(x, y, name='Multiply')
pow_op = tf.pow(add_op, mul_op, name='Power')
useless_op = tf.multiply(x, add_op, name='Useless')
with tf.Session() as sess:
pow_out, useless_out = sess.run([pow_op, useless_op])
```
The created graph and the defined variables (Tensors and Python variables) are:
<img src="files/files/1_5.png" width="1000" height="2000">
___Fig5. ___ __Left:__ generated graph visualized in Tensorboard, __Right:__ generated variables (screenshot captured from PyCharm debugger when running in debug mode)
I called one of the operations useless_op because it's output is not used by other operations. Lets talk about an __IMPORTANT__ point. Given this graph, if we fetch the __pow_op__ operation, it will first run the __add_op__ and __mul_op__ to get their output tensor and then run __pow_op__ on them to compute the required output value. In other words __useless_op__ will not be executed as it's output tensor is not used in executing the __pow_op__ operation.
__This is one of the advantages of defining a graph and running a session on it! It helps running only the required operations of the graph and skip the rest (remember flexibility). This specially saves a significant amount of time for us when dealing with huge networks with hundreds and thousands of operations.__
In the above code, in the defined session, we're fetching the value of two tensors (i.e. output tensors of __pow_op__ and __useless_op__) at the same time. This will run the whole graph to get the required output tensors.
I hope this post has helped you to understand the concept of __Graph__ and __Session__ in TensorFlow. Thank you so much for reading! If you have any questions, feel free to leave a comment in our [webpage](http://www.easy-tensorflow.com/basics/graph-and-session). You can also send us feedback through the [__contacts__](http://www.easy-tensorflow.com/contacts) page.
| github_jupyter |
<div style="text-align:center">
<h1> Datatypes </h1>
<h2> CS3100 Monsoon 2020 </h2>
</div>
## Review
Previously
* Function definition and application
* Anonymous and recursive functions
* Tail call optimisation
This lecture,
* Data types
## Type aliases
OCaml supports the definition of aliases for existing types. For example,
```
type int_float_pair = int * float
let x = (10, 3.14)
let y : int_float_pair = x
```
## Records
* Records in OCaml represent a collection of named elements.
* A simple example is a point record containing x, y and z fields:
```
type point = {
x : int;
y : int;
z : int;
}
```
## Records: Creation and access
We can create instances of our point type using `{ ... }`, and access the elements of a point using the `.` operator:
```
let origin = { y = 0; x = 0;z = 0 }
let get_y r = r.y
```
## Records: Functional update
* New records can also be created from existing records using the `with` keyword.
```
let p = { origin with z = 10 }
```
* `p` is a new record with the same fields as `origin` except `z`.
* `origin` remains unchanged!
```
origin
```
## Records: Field punning
Another useful trick with records is field punning, which allows you to replace:
```
let mk_point x y z = { x = x; y = y; z = z }
```
with
```
let mk_point x y z = { x; y; z }
```
## Product Types
* Records and tuples are known as **product types**.
+ Each value of a product type includes all of the values that constitute the product.
```ocaml
type person_r = {name: string; age: int; height: float}
type person_t = string * int * float
```
* Records are indexed by *names* whereas *tuples* are indexed by positions (1st, 2nd, etc.).
## Is there a _sum_ type?
<center>
<h1> VARIANTS </h1>
</center>
## Defining variants
The type definition syntax is:
```ocaml
type t =
| C1 of t1
| C2 of t2
| C3 of t3
| ...
```
* C1, C2, C2 are known as constructors
* t1, t2 and t3 are optional data carried by constructor
* Also known as **Algebraic Data Types**
```
type color =
| Red
| Green
| Blue
let v = (Green , Red)
type point = {x : int; y : int}
type shape =
| Circle of point * float (* center, radius *)
| Rect of point * point (* lower-left, upper-right *)
| ColorPoint of point * color
Circle ({x=4;y=3}, 2.5)
Rect ({x=3;y=4}, {x=7;y=9})
```
## Recursive variant types
Let's define an integer list
```
type intlist =
| INil
| ICons of int * intlist
ICons (1, ICons (2, ICons (3, INil)))
```
* `Nil` and `Cons` originate from Lisp.
## String List
```ocaml
type stringlist =
| SNil
| Scons of string * stringlist
```
* Now what about `pointlist`, `shapelist`, etc?
## Parameterized Variants
```
type 'a lst =
Nil
| Cons of 'a * 'a lst
Cons (1, Cons (2, Nil))
Cons ("Hello", Cons("World", Nil))
```
## 'a is a Type Variable
* **Variable**: name standing for an unknown value
* **Type Variable**: name standing for an unknown type
* Java example is `List<T>`
* C++ example is `std::vector<T>`
* OCaml syntax for type variable is a single quote followed by an identifier
+ '`foo`, `'key`, '`value`
* Most often just `'a`, `'b`.
+ Pronounced "alpha", "beta" or "quote a", "quote b".
## Polymorphism
* The type `'a lst` that we had defined earlier is a **polymorphic data type**.
+ poly = many, morph = change.
* Write functionality that works for many data types.
+ Related to Java Generics and C++ template instantiation.
* In `'a lst`, `lst` is known as a **type constructor**.
+ constructs types such as `int lst`, `string lst`, `shape lst`, etc.
## OCaml built-in lists are just variants
OCaml effectively codes up lists as variants:
```ocaml
type 'a list = [] | :: of 'a * 'a list
```
* `[]` and `::` are constuctors.
* Just a bit of syntactic magic to use `[]` and `::` as constructors rather than alphanumeric identifiers.
## OCaml Lists
```
[]
1::2::[]
```
can also write is as
```
[1;2]
```
## `::` is right-associative
```
1::2::[]
```
is equivalent to
```
1::(2::[])
```
## Binary Trees
```
type 'a tree =
Leaf
| Node of 'a tree (* left *) * 'a (* value *) * 'a tree (* right *)
```
In OCaml, `(* ... *)` is how you write down comments in OCaml
```
let t = Node (Leaf, 0.0, Node (Leaf, 1.0, Leaf))
```
## Null
"I call it my billion-dollar mistake. It was the invention
of the null reference in 1965. At that time, I was
**designing the first comprehensive type system for
references in an object-oriented language**. My goal was
to ensure that all use of references should be
**absolutely safe**, with checking performed automatically
by the compiler. **But I couldn’t resist the temptation to
put in a null reference, simply because it was so easy to
implement.** This has led to innumerable errors,
vulnerabilities, and system crashes, which have
probably caused a **billion dollars of pain and damage** in
the last forty years."
<h2 style="text-align:right"> - Sir Tony Hoare </h2>
## Option: A Built-in Variant
* OCaml does not have a null value.
```ocaml
type 'a option = None | Some of 'a
```
<center>
<img src="images/box.jpg" width="150">
</center>
```
None
Some 10
Some "Hello"
```
## When to use option types
```ocaml
type student = { name : string; rollno : string;
marks : int}
```
* what value will you assign for `marks` field before the exams are taken?
+ `0` is not a good answer since it might also be the case that the student actually scored 0.
```ocaml
type student = { name : string; rollno : string;
marks : int option }
```
* Use `None` to indicate the exam has not been taken.
## Question
Given records, variants and tuples, which one would you pick for the following cases?
1. Represent currency denominations 10, 20, 50, 100, 200, 500, 2000.
2. Students who have name and roll numbers.
3. A dessert which has a sauce, a creamy component, and a crunchy component.
## Question
Given records, variants and tuples, which one would you pick for the following cases?
1. Represent currency denominations 10, 20, 50, 100, 200, 500, 2000.
```ocaml
type denomination = D10 | D20 | D50 | D100 | D200 | D500 | D2000
```
2. Students who have name and roll numbers.
```ocaml
type student = {name : string; roll_no : string}
```
3. A dessert which has a sauce, a creamy component, and a crunchy component.
```ocaml
type dessert = {sauce : string; creamy: string; crunchy: string}
```
## What about tuples?
Could have used tuples for
2. Students who have name and roll numbers.
3. A dessert which has a sauce, a creamy component, and a crunchy component.
but
* Tuples are convenient for local uses
+ Returning a pair of values
* Records useful to global uses
+ _Tuples with documentation_
<center>
<h1 style="text-align:center"> Fin. </h1>
</center>
| github_jupyter |
# OpenMM Introduction
## User Guide
* [OpenMM Users Manual and Theory Guide](http://docs.openmm.org/latest/userguide/index.html) <br>
The place where you should look first! Good Explanations!
* Theory: [Standard Forces](http://docs.openmm.org/latest/userguide/theory.html#standard-forces) <br>
If you want to know the formulas of the used forces.
* [OpenMM Python API](http://docs.openmm.org/latest/api-python/index.html) <br>
Only the python interface
## additional tools
* [OpenMMTools](https://openmmtools.readthedocs.io/en/0.17.0/) <br>
A lot of additional implementations
* [ParmEd](https://parmed.github.io/ParmEd/html/index.html) <br>
general tool for aiding in investigations of biomolecular systems
* [choderalab/openmm-forcefields](https://github.com/choderalab/openmm-forcefields) <br>
Conversion tools for and repository of CHARMM and AMBER forcefields for OpenMM.
# How is OpenMM structured?
* Layered structure
* Public Interface ( **OpenMM Public API** ) <span style='color:red'>**<- thats where we work**</span>
* Platform Interdependent Code ( OpenMM Implementation Layer )
* Platform Abstraction Layer ( OpenMM Low Level API )
* Computational Kernels ( OpenCL/CUDA/MPI/etc. )
## OpenMM Public API ?
* Python or C interface to control OpenMM
* Script like language
* requires to build the simulation from single components
## The General ClassDiagram in OpenMM

Just an overview how everything is connected
## Let's focus on the crucial elements

# System
## What's a system ?

## Forces
In a broader sense: `ALL interactions`
* non-bonded forces
* bonded forces
* constraints
* ANY kind of user defined `CustomForces`
* <span style='color:red'>thermostat and barostat </span>
### How does this actually look like?
```
# Let's import everything
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
# read the pdb file
pdb = PDBFile('spce.pdb')
# read the force field file
forcefield = ForceField('spce.xml')
```
**How does this force field file looks like ?**
```xml
<ForceField>
<AtomTypes>
<Type name="spce-O" class="OW" element="O" mass="15.99943"/>
<Type name="spce-H" class="HW" element="H" mass="1.007947"/>
</AtomTypes>
<Residues>
<Residue name="HOH">
<Atom name="O" type="spce-O"/>
<Atom name="H1" type="spce-H"/>
<Atom name="H2" type="spce-H"/>
<Bond atomName1="O" atomName2="H1"/>
<Bond atomName1="O" atomName2="H2"/>
</Residue>
</Residues>
<HarmonicBondForce>
<Bond class1="OW" class2="HW" length="0.1" k="462750.4"/>
</HarmonicBondForce>
<HarmonicAngleForce>
<Angle class1="HW" class2="OW" class3="HW" angle="1.91061193216" k="836.8"/>
</HarmonicAngleForce>
<NonbondedForce coulomb14scale="0.833333" lj14scale="0.5">
<Atom type="spce-O" charge="-0.8476" sigma="0.31657195050398818" epsilon="0.6497752"/>
<Atom type="spce-H" charge="0.4238" sigma="1" epsilon="0"/>
</NonbondedForce>
</ForceField>
```
create the system
```
system = forcefield.createSystem(pdb.topology,
nonbondedMethod=PME,
nonbondedCutoff=1*nanometer,
constraints=HAngles)
```
**constraints**
|Value|Meaning|
|-----|-------|
| None | No constraints are applied. This is the default value. |
| HBonds | The lengths of all bonds that involve a hydrogen atom are constrained. |
| AllBonds | The lengths of all bonds are constrained. |
| HAngles | The lengths of all bonds are constrained. In addition, all angles of the form H-X-H or H-O-X (where X is an arbitrary atom) are constrained. |
# Integrator
"Just" a integrator. Some integrators also can adjust temperatures and work as thermostat simultaneously.
```
# LangevinIntegrator(temperature, friction coefficient, step size)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond,0.002*picoseconds)
```
## Example for adding a `integrator` + `thermostat`
```python
system.addForce(AndersenThermostat(300*kelvin, 1/picosecond))
integrator = VerletIntegrator(0.002*picoseconds)
```
a `thermostat` is a `Force` so it belongs to the `system`
## Example for adding a barostat
```python
system.addForce(MonteCarloBarostat(1*bar, 300*kelvin))
integrator = LangevinIntegrator(300*kelvin, 1/picosecond, 0.002*picoseconds)
```
<p style='color:red; font-weight:bold'>Only MonteCarloBarostat available!</p>
# Simulations

Let's create a simulation
```
simulation = Simulation(pdb.topology, system, integrator)
# set positions
simulation.context.setPositions(pdb.positions)
```
# Recap
## What did we do till now?
```python
# Let's import everything
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
# read the pdb file
pdb = PDBFile('spce.pdb')
# read the force field file
forcefield = ForceField('spce.xml')
# create a system
system = forcefield.createSystem(pdb.topology, nonbondedMethod=PME,
nonbondedCutoff=1*nanometer, constraints=HAngles)
# define a integrator (with integrated thermostat)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond,0.002*picoseconds)
# create a simulation
simulation = Simulation(pdb.topology, system, integrator)
# set positions
simulation.context.setPositions(pdb.positions)
```
## What did we got?

# Reporters
```
# report thermodynamical properties
simulation.reporters.append(StateDataReporter('thermo.csv', 1000,
step=True, potentialEnergy=True, temperature=True))
# report positions
simulation.reporters.append(DCDReporter('trajectory.dcd', 1000))
```
## Overview reporters
<div style='width:100%; float:left'>
<div style='width:49%; float:left'>
<h5>OpenMM</h5>
<ul>
<li> CheckpointReporter </li>
<li> DCDReporter </li>
<li> PDBReporter </li>
<li> PDBxReporter </li>
<li> StateDataReporter</li>
</ul>
</div>
<div style='width:49%; float:left'>
<h5>MDTraj</h5>
<ul>
<li> HDF5Reporter </li>
<li> NetCDFReporter </li>
<li> DCDReporter </li>
</ul>
</div>
</div>
<div style='width:100%; float:left'>
<div style='width:49%; float:left'>
<h5></h5>
</div>
<div style='width:49%; float:left'>
<h5>ParmED</h5>
<ul>
<li> StateDataReporter </li>
<li> NetCDFReporter </li>
<li> MdcrdReporter </li>
<li> RestartReporter </li>
<li> ProgressReporter</li>
</ul>
</div>
</div>
# Energy mimization
```
# simulation.minimizeEnergy(tolerance=10*kilojoule/mole, maxIterations=0)
simulation.minimizeEnergy()
```
# Do a simulation
```
# Let's set some Velocities
simulation.context.setVelocitiesToTemperature(300*kelvin)
# Let's simulate
simulation.step(1000)
```
# The whole picture
```python
# Let's import everything
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
# read the pdb file
pdb = PDBFile('spce.pdb')
# read the force field file
forcefield = ForceField('spce.xml')
# create a system
system = forcefield.createSystem(pdb.topology, nonbondedMethod=PME,
nonbondedCutoff=1*nanometer, constraints=HAngles)
# define a integrator (with integrated thermostat)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond,0.002*picoseconds)
# create a simulation
simulation = Simulation(pdb.topology, system, integrator)
# set positions
simulation.context.setPositions(pdb.positions)
# report thermodynamical properties
simulation.reporters.append(StateDataReporter('thermo.csv', 1000, step=True,
potentialEnergy=True, temperature=True))
# report positions
simulation.reporters.append(DCDReporter('trajectory.dcd', 1000))
# minimize the system
simulation.minimizeEnergy()
# generate velocities
simulation.context.setVelocitiesToTemperature(300*kelvin)
# run the simulation
simulation.step(1000)
```
# What did we skip?
* modifying structures before we create a system
* `Platforms`
* getting data from the `simulation` object
# Platforms
* `Reference` <br>
clean user-readable code (not performance in mind)
* `CPU` <br>
optimized CPU code (multithreaded - OpenMM)
* `CUDA` <br>
Nvidia-GPU's
* `OpenCL` <br>
variety of types of GPU's and CPU's
* <span style='color:red; font-weight:900'>no MPI</span>
## How to use a platform?
```
# Let's import everything
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
# read the pdb file
pdb = PDBFile('spce.pdb')
# read the force field file
forcefield = ForceField('spce.xml')
# define a integrator (with integrated thermostat)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond,0.002*picoseconds)
platform = Platform.getPlatformByName('CUDA')
simulation = Simulation(pdb.topology, system, integrator, platform)
```
How to change properties of a platform?
```
# Let's import everything
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
# read the pdb file
pdb = PDBFile('spce.pdb')
# read the force field file
forcefield = ForceField('spce.xml')
# define a integrator (with integrated thermostat)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond,0.002*picoseconds)
platform = Platform.getPlatformByName('CUDA')
properties = {'DeviceIndex': '0,1', 'Precision': 'double'}
simulation = Simulation(pdb.topology, system, integrator, platform, properties)
```
## Precision aka Thing's one should stress

<p>
<div style='color:red; font-weight:bold; font-size:large;'>ALLWAYS check your default options of your MD engine</div><p>
| github_jupyter |
```
(require gamble
racket/list)
```
#The littlest radar blip problem.
This should illustrate the problems of representing uncertainty about existence, number, and origin.
I think a reasonable way to generate the blips at each time step is
1. generate the real blips, with each airplane having an independent, high probability of generating a blip, which has as its location a noisified version of the generating airplane's true location;
2. generate the number of false blips;
3. pick a random location for each false blip;
4. concatenate the true and false blips; and
5. randomly permute the order of the full blip list, to yield the observed blips.
We could achive this by randomly permuting the integers 0 to n-1, where n is the total number of blips, and using list-ref to get the right coordinate tuple.
To avoid possible type confusion, maybe the airplanes and blips should be data types, or at least gensyms.
A trickier variant could have new planes entering the box at random times.
```
;; Worlds--sets of things, represented as integer indices.
(defmodel things
(deflazy n-airplanes (poisson 5))
(defmem (n-false-alarms time) (poisson 2))
(deflazy airplanes (range n-airplanes))
(defmem (false-alarms time) (range (n-false-alarms time))))
(open-model things)
(displayln airplanes)
(displayln (false-alarms 1))
```
#Airplane dynamics
Initial locations (at time 0) are chosen uniformly in a (-1,1) box. Initial velocities are random normal.
Location at time $t$ is location at $t-1$ plus velocity at $t-1$ plus zero-mean noise.
Velocity at $t$ is velocity at $t-1$ plus zero-mean noise.
```
(defmodel dynamics
(define location-noise-std-dev 0.05)
(define velocity-noise-std-dev 0.1)
(defmem (location plane dimension time)
(if (= time 0)
(uniform -1 1)
(normal (+ (location plane dimension (- time 1))
(velocity plane dimension (- time 1)))
location-noise-std-dev)))
(defmem (velocity plane dimension time)
(+ (normal 0 velocity-noise-std-dev)
(if (= time -1)
0
(velocity plane dimension (- time 1))))))
```
# Observation model
This is tricky. We observe not only the locations of a number of blips, but also the fact there are exactly $n$ of them. Not sure how to get this in, yet.
I'm recalling some insight we had with "seismic", that once we sample the number of real observations, the number of false alarms is fixed by our data. What did we do there?
```
(defmodel observation
(open-model things)
(open-model dynamics)
;; On each dimension, the observed position will be the true position plus
;; zero-mean noise.
(define obs-noise-std-dev 0.01)
;; Each airplane may or may not generate a blip
(define observation-prob 0.9)
;; Randomly select a subset of planes for observation.
;; NEEDS "airplanes" from "things" d-model.
(defmem (observed-planes time)
(filter (lambda (airplane) (flip observation-prob)) airplanes))
;; Randomly perturb the true locations of the observed planes to give
;; observed locations.
;; NEEDS "location ..." from "dynamics" d-model.
;; BAD: this forces evaluation of *all* observations for the dim and time
;; (over all planes).
(defmem (observation-blip airplane dimension time)
(normal (location airplane dimension time) obs-noise-std-dev))
;; Generate locations for false blips.
;; NEEDS "false-alarms" from "things" d-model.
(defmem (hallucination-blip h-number dimension time)
(uniform -1 1))
;; Randomly permute indices to assign observations to real or hallucinated
;; sources
;; NEEDS "n-false-alarms" from "things" d-model.
(defmem (blip-order time)
(sample (permutation-dist (+ (length (observed-planes time))
(n-false-alarms time)))))
;; A "soft" observation of the number of blips, so we don't throw out too many
;; rather good blip sets for not matching on exact number.
;; Ideally, the standard deviation would anneal to near zero.
(defmem (about-how-many-blips time)
(normal (vector-length (blip-order time)) 0.5))
;; Use blip order, observations, and hallucinations to define an observed-blip
;; function.
;; Forces blip-order, which forces observed-planes and n-false-alarms, which can
;; lead to a blip number that is out of bounds.
;; ??!!! the act of observing the coordinates of a blip number is an implicit
;;; observation that there are
;; at least that many blips!!!!
(define (observed-blip blip-number dimension time)
(let ([raw-idx (vector-ref (blip-order time) blip-number)])
(if (< raw-idx (length (observed-planes time)))
(observation-blip raw-idx dimension time)
(let ([idx (- raw-idx (length (observed-planes time)))])
(hallucination-blip idx dimension time)))))
(define (blip-source-plane blip-number time)
(let ([raw-idx (vector-ref (blip-order time) blip-number)])
(if (< raw-idx (length (observed-planes time)))
raw-idx
"false alarm"))))
```
Let's make sure everything behaves as expected:
Ideally, we could test our beliefs not only about the what we expect some variables to look like, but also our beliefs about what variables have and do not have values, having asked for others. I think this means being able to peek in to the memo tables. I can imagine a standard display for rvs and indexed sets of rvs. Though textual output might be fine.
```
(open-model observation)
(printf "observed planes for step 3: ~a\n" (observed-planes 3))
(printf "\nobserved coordinates of observed planes at step 3:\n")
(map (lambda (pair) (printf "~a\n" pair))
(map (lambda (n)
(list (observation-blip n 0 3 )
(observation-blip n 1 3))) (observed-planes 3)))
(printf "\nobserved coordinates of hallucinated planes at step 3:\n")
(map (lambda (pair) (printf "~a\n" pair))
(map (lambda (n)
(list (hallucination-blip n 0 3 )
(hallucination-blip n 1 3))) (false-alarms 3)))
(printf "\nblip-order for step 3: ~a\n" (blip-order 3))
(printf "\nabout how many blips for step 3: ~a\n" (about-how-many-blips 3))
(printf "\nobserved blip locations at step 3:\n")
(map (lambda (pair) (printf "~a\n" pair))
(map (lambda (n) (list (observed-blip n 0 3 ) (observed-blip n 1 3)))
(range (vector-length (blip-order 3)))))
(printf "\nblip sources for blips at step 3:\n")
(map (lambda (blip-n)
(printf "~a\n" (blip-source-plane blip-n 3)))
(range (vector-length (blip-order 3))))
```
#Some data
Let's have blip data in a table (list of lists) with columns "time", "blip number", "x", and "y".
Four real planes are in play:
1. Starts at the top left ((-1,1)) and moves right and slightly down.
2. Starts at the origin and moves right and up.
3. Starts at (1,0) and moves left and down.
4. Starts at (1,-1) and moves left and up.
Zero to three hallucinations are thrown in on each time step.
A function in the sampler makes an observation about the number of blips at each time, and a pair of observations for each blip's x and y coordinates at each time.
```
(define blip-data
'((1 2 -1.0 0.99)
(1 1 -0.01 -0.02)
(1 3 0.99 0.001)
(1 5 0.985 -0.99)
(1 4 -0.9 0.2)
(2 5 -0.96 0.97)
(2 2 0.01 0.03)
(2 6 0.96 -0.01)
(2 3 0.97 -0.95)
(2 4 -0.4 0.86)
(2 1 0.2 0.67)
(3 4 -0.9 0.91)
(3 2 0.05 0.08)
(3 3 0.91 -0.055)
(3 1 0.92 -0.9)
(3 5 0.2 -0.6)
(4 6 -0.84 0.87)
(4 3 0.09 0.11)
(4 1 0.88 -0.15)
(4 5 0.85 -0.84)
(4 4 -0.2 0.99)
(4 2 -0.5 -0.9)
(5 1 -0.8 0.834)
(5 2 0.13 0.17)
(5 3 0.84 -0.19)
(5 4 0.8 -0.8)
(5 5 0.3 0.7)
))
(define times (sort (remove-duplicates (map first blip-data)) <))
(define (blips-for t)
(filter (lambda (b) (= (first b) t)) blip-data))
(define (n-blips t)
(length (blips-for t)))
(define blip-n (lambda (blp) (- (second blp) 1)))
(define x-val third)
(define y-val fourth)
```
#The sampler
What would we like to find out, given our blip sequence?
1. Which blips are hallucinations (probably) and which are real?
2. Which blips from different times are generated by the same object? [hard]
Because of symmetries (...more), it won't do to query for which plane blips are generated by. It is more robust (if more costly and verbose) to ask which blips in consecutive time steps are generated by the same object (whichever that is). We're going to need a function to get the source object for a blip. [done--check]
We could show a sequence of plots, each of which shows
1. The blips recieved at that time, each color-coded for probability of true (blue) vs. hallucinated (red). For this, we need to save out the source of each blip, on each sample.
2. The estimated position of each numbered aircraft--just a cloud of dots, for now, since the numbers are not identifiable. For this, we need to save out a list (of variable length) of all the airplane locations, on each sample. We aren't averaging these, so the variable length should not be a problem.
```
(define radar-sampler
(mh-sampler
(open-model observation) ; this opens "things" and "dynamics"
(for ([t times])
(begin
(observe (+ (length (observed-planes t)) (n-false-alarms t)) (n-blips t))
(for ([blp (blips-for t)])
(observe (observed-blip (blip-n blp) 0 t) (x-val blp))
(observe (observed-blip (blip-n blp) 1 t) (y-val blp)))))
(location 0 0 1)))
(generate-samples radar-sampler 10 #:burn 10 #:thin 10)
;; junk
(defmem (source-plane blip)
(let ([p-real (/ n-airplanes (+ n-airplanes (n-false-alarms))])
(if (flip p-real) (discrete n-airplanes) "false alarm")))
(defmem (blip-location blip dimension)
(let ([s (source-plane blip)])
(if (string=? s "false alarm")
(uniform -1 1)
(normal (location s dimension (time-of blip)) 0.01))))
(observe (+ (length (observed-planes 1)) (n-false-alarms 1)) 5)
(observe (observed-blip 0 0 1) 0.5)
(observe (observed-blip 0 1 1) 0.7)
(observe (observed-blip 1 0 1) -0.5)
(observe (observed-blip 1 1 1) -0.2)
(observe (observed-blip 2 0 1) 0.15)
(observe (observed-blip 2 1 1) 0.79)
(observe (observed-blip 3 0 1) -0.95)
(observe (observed-blip 3 1 1) -0.02)
(observe (+ (length (observed-planes 2)) (n-false-alarms 2)) 5)
;(observe (about-how-many-blips 2) 7)
(observe (observed-blip 0 0 2) 0.53)
(observe (observed-blip 0 1 2) 0.67)
(observe (observed-blip 3 0 2) -0.45)
(observe (observed-blip 3 1 2) -0.22)
(observe (observed-blip 1 0 2) 0.18)
(observe (observed-blip 1 1 2) 0.89)
(observe (observed-blip 2 0 2) -0.85)
(observe (observed-blip 2 1 2) -0.11)
(sort (remove-duplicates '(5 2 3 3 5 2 3)) <)
```
| github_jupyter |
```
#@title Copyright 2019 Google LLC. { display-mode: "form" }
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Introduction
This is a demonstration notebook. Suppose you have developed a model the training of which is constrained by the resources available to the notbook VM. In that case, you may want to use the [Google AI Platform](https://cloud.google.com/ml-engine/docs/tensorflow/) to train your model. The advantage of that is that long-running or resource intensive training jobs can be performed in the background. Also, to use your trained model in Earth Engine, it needs to be [deployed as a hosted model](https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models) on AI Platform. This notebook uses previously created training data (see [this example notebook](https://colab.sandbox.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/UNET_regression_demo.ipynb)) and AI Platform to train a model, deploy it and use it to make predictions in Earth Engine. To do that, code [needs to be structured as a python package](https://cloud.google.com/ml-engine/docs/tensorflow/packaging-trainer) that can be uploaded to AI Platform. The following cells produce that package programatically.
# Setup software libraries
Install needed libraries to the notebook VM. Authenticate as necessary.
```
# Cloud authentication.
from google.colab import auth
auth.authenticate_user()
# Earth Engine install to notebook VM, authenticate.
!pip install earthengine-api
# Import and initialize the Earth Engine library.
import ee
ee.Authenticate()
ee.Initialize()
# Tensorflow setup.
import tensorflow as tf
tf.enable_eager_execution()
print(tf.__version__)
# Folium setup.
import folium
print(folium.__version__)
# Define the URL format used for Earth Engine generated map tiles.
EE_TILES = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'
```
# Training code package setup
It's necessary to create a Python package to hold the training code. Here we're going to get started with that by creating a folder for the package and adding an empty `__init__.py` file.
```
PACKAGE_PATH = 'ai_platform_demo'
!ls -l
!mkdir {PACKAGE_PATH}
!touch {PACKAGE_PATH}/__init__.py
!ls -l {PACKAGE_PATH}
```
## Variables
These variables need to be stored in a place where other code can access them. There are a variety of ways of accomplishing that, but here we'll use the `%%writefile` command to write the contents of the code cell to a file called `config.py`.
**Note:** You need to insert the name of a bucket (below) to which you have write access!
```
%%writefile {PACKAGE_PATH}/config.py
import tensorflow as tf
# INSERT YOUR BUCKET HERE!
BUCKET = 'your-bucket-name'
# Specify names of output locations in Cloud Storage.
FOLDER = 'fcnn-demo'
JOB_DIR = 'gs://' + BUCKET + '/' + FOLDER + '/trainer'
MODEL_DIR = JOB_DIR + '/model'
LOGS_DIR = JOB_DIR + '/logs'
# Pre-computed training and eval data.
DATA_BUCKET = 'ee-docs-demos'
TRAINING_BASE = 'training_patches'
EVAL_BASE = 'eval_patches'
# Specify inputs (Landsat bands) to the model and the response variable.
opticalBands = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7']
thermalBands = ['B10', 'B11']
BANDS = opticalBands + thermalBands
RESPONSE = 'impervious'
FEATURES = BANDS + [RESPONSE]
# Specify the size and shape of patches expected by the model.
KERNEL_SIZE = 256
KERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE]
COLUMNS = [
tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES
]
FEATURES_DICT = dict(zip(FEATURES, COLUMNS))
# Sizes of the training and evaluation datasets.
TRAIN_SIZE = 16000
EVAL_SIZE = 8000
# Specify model training parameters.
BATCH_SIZE = 16
EPOCHS = 50
BUFFER_SIZE = 3000
OPTIMIZER = 'SGD'
LOSS = 'MeanSquaredError'
METRICS = ['RootMeanSquaredError']
```
Verify that the written file has the expected contents and is working as intended.
```
!cat {PACKAGE_PATH}/config.py
from ai_platform_demo import config
print('\n\n', config.BATCH_SIZE)
```
## Training data, evaluation data and model
The following is code to load training/evaluation data and the model. Write this into `model.py`. Note that these functions are developed and explained in [this example notebook](https://colab.sandbox.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/UNET_regression_demo.ipynb). The source of the model code is [this demonstration notebook](https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb).
```
%%writefile {PACKAGE_PATH}/model.py
from . import config
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
# Dataset loading functions
def parse_tfrecord(example_proto):
return tf.io.parse_single_example(example_proto, config.FEATURES_DICT)
def to_tuple(inputs):
inputsList = [inputs.get(key) for key in config.FEATURES]
stacked = tf.stack(inputsList, axis=0)
stacked = tf.transpose(stacked, [1, 2, 0])
return stacked[:,:,:len(config.BANDS)], stacked[:,:,len(config.BANDS):]
def get_dataset(pattern):
glob = tf.io.gfile.glob(pattern)
dataset = tf.data.TFRecordDataset(glob, compression_type='GZIP')
dataset = dataset.map(parse_tfrecord)
dataset = dataset.map(to_tuple)
return dataset
def get_training_dataset():
glob = 'gs://' + config.DATA_BUCKET + '/' + config.FOLDER + '/' + config.TRAINING_BASE + '*'
dataset = get_dataset(glob)
dataset = dataset.shuffle(config.BUFFER_SIZE).batch(config.BATCH_SIZE).repeat()
return dataset
def get_eval_dataset():
glob = 'gs://' + config.DATA_BUCKET + '/' + config.FOLDER + '/' + config.EVAL_BASE + '*'
dataset = get_dataset(glob)
dataset = dataset.batch(1).repeat()
return dataset
# A variant of the UNET model.
def conv_block(input_tensor, num_filters):
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
encoder = layers.BatchNormalization()(encoder)
encoder = layers.Activation('relu')(encoder)
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
encoder = layers.BatchNormalization()(encoder)
encoder = layers.Activation('relu')(encoder)
return encoder
def encoder_block(input_tensor, num_filters):
encoder = conv_block(input_tensor, num_filters)
encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters):
decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.BatchNormalization()(decoder)
decoder = layers.Activation('relu')(decoder)
return decoder
def get_model():
inputs = layers.Input(shape=[None, None, len(config.BANDS)]) # 256
encoder0_pool, encoder0 = encoder_block(inputs, 32) # 128
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16
encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512) # 8
center = conv_block(encoder4_pool, 1024) # center
decoder4 = decoder_block(center, encoder4, 512) # 16
decoder3 = decoder_block(decoder4, encoder3, 256) # 32
decoder2 = decoder_block(decoder3, encoder2, 128) # 64
decoder1 = decoder_block(decoder2, encoder1, 64) # 128
decoder0 = decoder_block(decoder1, encoder0, 32) # 256
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
model = models.Model(inputs=[inputs], outputs=[outputs])
model.compile(
optimizer=optimizers.get(config.OPTIMIZER),
loss=losses.get(config.LOSS),
metrics=[metrics.get(metric) for metric in config.METRICS])
return model
```
Verify that `model.py` is functioning as intended.
```
from ai_platform_demo import model
eval = model.get_eval_dataset()
print(iter(eval.take(1)).next())
model = model.get_model()
print(model.summary())
```
## Training task
At this stage, there should be `config.py` storing variables and `model.py` which has code for getting the training/evaluation data and the model. All that's left is code for training the model. The following will create `task.py`, which will get the training and eval data, train the model and save it when it's done in a Cloud Storage bucket.
```
%%writefile {PACKAGE_PATH}/task.py
from . import config
from . import model
import tensorflow as tf
if __name__ == '__main__':
training = model.get_training_dataset()
evaluation = model.get_eval_dataset()
m = model.get_model()
m.fit(
x=training,
epochs=config.EPOCHS,
steps_per_epoch=int(config.TRAIN_SIZE / config.BATCH_SIZE),
validation_data=evaluation,
validation_steps=int(config.EVAL_SIZE),
callbacks=[tf.keras.callbacks.TensorBoard(config.LOGS_DIR)])
tf.contrib.saved_model.save_keras_model(m, config.MODEL_DIR)
```
# Submit the package to AI Platform for training
Now there's everything to submit this job, which can be done from the command line. First, define some needed variables.
**Note:** You need to insert the name of a Cloud project (below) you own!
```
import time
# INSERT YOUR PROJECT HERE!
PROJECT = 'your-project'
JOB_NAME = 'demo_training_job_' + str(int(time.time()))
TRAINER_PACKAGE_PATH = 'ai_platform_demo'
MAIN_TRAINER_MODULE = 'ai_platform_demo.task'
REGION = 'us-central1'
```
Now the training job is ready to be started. First, you need to enable the ML API for your project. This can be done from [this link to the Cloud Console](https://console.developers.google.com/apis/library/ml.googleapis.com). See [this guide](https://cloud.google.com/ml-engine/docs/tensorflow/training-jobs) for details. Note that the Python and Tensorflow versions should match what is used in the Colab notebook.
```
!gcloud ai-platform jobs submit training {JOB_NAME} \
--job-dir {config.JOB_DIR} \
--package-path {TRAINER_PACKAGE_PATH} \
--module-name {MAIN_TRAINER_MODULE} \
--region {REGION} \
--project {PROJECT} \
--runtime-version 1.14 \
--python-version 3.5 \
--scale-tier basic-gpu
```
## Monitor the training job
There's not much more to do until the model is finished training (~24 hours), but it's fun and useful to monitor its progress. You can do that progamatically with another `gcloud` command. The output of that command can be read into an `IPython.utils.text.SList` from which the `state` is extracted and ensured to be `SUCCEEDED`. Or you can monitor it from the [AI Platform jobs page](http://console.cloud.google.com/ai-platform/jobs) on the Cloud Console.
```
desc = !gcloud ai-platform jobs describe {JOB_NAME} --project {PROJECT}
state = desc.grep('state:')[0].split(':')[1].strip()
print(state)
```
# Inspect the trained model
Once the training job has finished, verify that you can load the trained model and print a summary of the fitted parameters. It's also useful to examine the logs with [TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard). There's a convenient notebook extension that will launch TensorBoard in the Colab notebook. Examine the training and testing learning curves to ensure that the training process has converged.
```
%load_ext tensorboard
%tensorboard --logdir {config.LOGS_DIR}
```
# Prepare the model for making predictions in Earth Engine
Before we can use the model in Earth Engine, it needs to be hosted by AI Platform. But before we can host the model on AI Platform we need to *EEify* (a new word!) it. The EEification process merely appends some extra operations to the input and outputs of the model in order to accomdate the interchange format between pixels from Earth Engine (float32) and inputs to AI Platform (base64). (See [this doc](https://cloud.google.com/ml-engine/docs/online-predict#binary_data_in_prediction_input) for details.)
## `earthengine model prepare`
The EEification process is handled for you using the Earth Engine command `earthengine model prepare`. To use that command, we need to specify the input and output model directories and the name of the input and output nodes in the TensorFlow computation graph. We can do all that programmatically:
```
from tensorflow.python.tools import saved_model_utils
meta_graph_def = saved_model_utils.get_meta_graph_def(config.MODEL_DIR, 'serve')
inputs = meta_graph_def.signature_def['serving_default'].inputs
outputs = meta_graph_def.signature_def['serving_default'].outputs
# Just get the first thing(s) from the serving signature def. i.e. this
# model only has a single input and a single output.
input_name = None
for k,v in inputs.items():
input_name = v.name
break
output_name = None
for k,v in outputs.items():
output_name = v.name
break
# Make a dictionary that maps Earth Engine outputs and inputs to
# AI Platform inputs and outputs, respectively.
import json
input_dict = "'" + json.dumps({input_name: "array"}) + "'"
output_dict = "'" + json.dumps({output_name: "impervious"}) + "'"
# Put the EEified model next to the trained model directory.
EEIFIED_DIR = config.JOB_DIR + '/eeified'
# You need to set the project before using the model prepare command.
!earthengine set_project {PROJECT}
!earthengine model prepare --source_dir {config.MODEL_DIR} --dest_dir {EEIFIED_DIR} --input {input_dict} --output {output_dict}
```
Note that you can also use the TensorFlow saved model command line tool to do this manually. See [this doc](https://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel) for details. Also note the names we've specified for the new inputs and outputs: `array` and `impervious`, respectively.
# Perform inference using the trained model in Earth Engine
Before it's possible to get predictions from the trained and EEified model, it needs to be deployed on AI Platform. The first step is to create the model. The second step is to create a version. See [this guide](https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models) for details. Note that models and versions can be monitored from the [AI Platform models page](http://console.cloud.google.com/ai-platform/models) of the Cloud Console.
```
MODEL_NAME = 'fcnn_demo_model'
VERSION_NAME = 'v' + str(int(time.time()))
print('Creating version: ' + VERSION_NAME)
!gcloud ai-platform models create {MODEL_NAME} --project {PROJECT}
!gcloud ai-platform versions create {VERSION_NAME} \
--project {PROJECT} \
--model {MODEL_NAME} \
--origin {EEIFIED_DIR} \
--runtime-version=1.14 \
--framework "TENSORFLOW" \
--python-version=3.5
```
There is now a trained model, prepared for serving to Earth Engine, hosted and versioned on AI Platform. We can now connect Earth Engine directly to the trained model for inference. You do that with the `ee.Model.fromAiPlatformPredictor` command.
## `ee.Model.fromAiPlatformPredictor`
For this command to work, we need to know a lot about the model. To connect to the model, you need to know the name and version.
### Inputs
You need to be able to recreate the imagery on which it was trained in order to perform inference. Specifically, you need to create an array-valued input from the scaled data and use that for input. (Recall that the new input node is named `array`, which is convenient because the array image has one band, named `array` by default.) The inputs will be provided as 144x144 patches (`inputTileSize`), at 30-meter resolution (`proj`), but 8 pixels will be thrown out (`inputOverlapSize`) to minimize boundary effects.
### Outputs
The output (which you also need to know), is a single float band named `impervious`.
```
# Use Landsat 8 surface reflectance data.
l8sr = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Cloud masking function.
def maskL8sr(image):
cloudShadowBitMask = ee.Number(2).pow(3).int()
cloudsBitMask = ee.Number(2).pow(5).int()
qa = image.select('pixel_qa')
mask1 = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And(
qa.bitwiseAnd(cloudsBitMask).eq(0))
mask2 = image.mask().reduce('min')
mask3 = image.select(config.opticalBands).gt(0).And(
image.select(config.opticalBands).lt(10000)).reduce('min')
mask = mask1.And(mask2).And(mask3)
return image.select(config.opticalBands).divide(10000).addBands(
image.select(config.thermalBands).divide(10).clamp(273.15, 373.15)
.subtract(273.15).divide(100)).updateMask(mask)
# The image input data is a cloud-masked median composite.
image = l8sr.filterDate(
'2015-01-01', '2017-12-31').map(maskL8sr).median().select(config.BANDS).float()
# Load the trained model and use it for prediction.
model = ee.Model.fromAiPlatformPredictor(
projectName = PROJECT,
modelName = MODEL_NAME,
version = VERSION_NAME,
inputTileSize = [144, 144],
inputOverlapSize = [8, 8],
proj = ee.Projection('EPSG:4326').atScale(30),
fixInputProj = True,
outputBands = {'impervious': {
'type': ee.PixelType.float()
}
}
)
predictions = model.predictImage(image.toArray())
# Use folium to visualize the input imagery and the predictions.
mapid = image.getMapId({'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3})
map = folium.Map(location=[38., -122.5], zoom_start=13)
folium.TileLayer(
tiles=EE_TILES.format(**mapid),
attr='Google Earth Engine',
overlay=True,
name='median composite',
).add_to(map)
mapid = predictions.getMapId({'min': 0, 'max': 1})
folium.TileLayer(
tiles=EE_TILES.format(**mapid),
attr='Google Earth Engine',
overlay=True,
name='predictions',
).add_to(map)
map.add_child(folium.LayerControl())
map
```
| github_jupyter |
```
from pycuGMRES import *
# //extern "C" {
# void *pycumalloc(unsigned int amount, size_t unit_size, cudaError_t *err)
# {
# void *dev_array;
# unsigned int size = amount * unit_size;
# *err = cudaMalloc(&dev_array, size);
# return dev_array;
# }
# //}
# pycumalloc = get_function('pycumalloc', path_to_so)
# pycumalloc.argtypes = [c_uint, c_size_t, POINTER(c_uint)]
# pycumalloc.restype = c_void_p
# cudaError_t * - POINTER(c_uint)
# void pycuInitSolution(
# cuComplex *dev_solution,
# const float h_sigma,
# const unsigned int N,
# const float wavenumber,
# const float eps_ex
# )
# pycuInitSolution.argtypes = [
# POINTER(c_complex), # cuComplex *dev_solution
# c_float, # const float h_sigma
# c_uint, # const unsigned int N
# c_float, # const float wavenumber
# c_float # const float epsilon_external
# ]
def set_params(N, Nsqrd, maxiter = 30, tolerance = 0.00001, wavelength_per_domain = 6):
wavenumber = 2 * np.pi / ( N / wavelength_per_domain )
cylinder_mask = get_cylinder_mask(N) # bool *dev_mask
h_mask = (c_bool * Nsqrd)()
h_cuda_err = (c_uint * 1)(0)
visualize(np.abs(cylinder_mask).reshape(N, N), show_cbar=False)
py_to_ctype(cylinder_mask.reshape(N, N), h_mask)
dev_mask = pycumalloc(Nsqrd, c_size_t(sizeof(c_bool)), h_cuda_err)
pycuhost2gpu(h_mask, dev_mask, Nsqrd, c_size_t(sizeof(c_bool)))
dev_mask = cast(dev_mask, POINTER(c_bool))
dev_solution = pycumalloc(Nsqrd, c_size_t(sizeof(c_complex)), h_cuda_err) # cuComplex *dev_solution
dev_solution = cast(dev_solution, POINTER(c_complex))
h_for_gradient = c_bool(False) # const bool for_gradient
h_index_of_max = c_uint(0) # const unsigned int h_index_of_max
h_tolerance = c_float(tolerance) # const float tolerance
h_GMRES_n = (c_uint * 1)(0) # unsigned int *GMRES_n
dev_actual_residual = pycumalloc(maxiter+1, c_size_t(sizeof(c_float)), h_cuda_err) # float *dev_actual_residual
dev_actual_residual = cast(dev_actual_residual, POINTER(c_float))
h_res_vs_tol_p = c_bool(True) # bool *h_res_vs_tol_p
h_N = c_uint(N) # const unsigned int N
h_sigma = c_float(400/1024 * N)
h_wavenumber = c_float(2 * 3.14 / ( N / wavelength_per_domain)) # 6 wavelengths per domain
h_epsilon_internal = c_float(2.25)
h_epsilon_external = c_float(1.)
pycuInitSolution(dev_solution, h_sigma, h_N, h_wavenumber, h_epsilon_external)
h_gamma_array = (c_complex * (2 * N - 1) ** 2 )() # cuComplex *dev_gamma_array
gamma_array = get_gamma_array(wavenumber, N).reshape(2 * N - 1, 2 * N - 1).T.reshape(-1)
# # print(gamma_array.shape)
py_to_ctype(gamma_array, h_gamma_array)
memmove(h_gamma_array, gamma_array.ctypes.data, gamma_array.nbytes)
dev_gamma_array = pycumalloc((2 * N - 1) ** 2, c_size_t(sizeof(c_complex)), h_cuda_err)
pycuhost2gpu(h_gamma_array, dev_gamma_array, (2 * N - 1) ** 2, c_size_t(sizeof(c_complex)))
dev_gamma_array = cast(dev_gamma_array, POINTER(c_complex))
h_plan = pycuGetPlan(h_N) # const cufftHandle plan
h_handle_p = pycuHandleBlas() # cublasHandle_t handle
h_cusolverH_p = pycuHandleSolverDn() # cusolverDnHandle_t *cusolverH_p
h_maxiter = c_uint(maxiter) # unsigned int maxiter
dev_subs = (c_devSubsidiary * 1)() # dev_subsidiary *dev_subs
pycuGetSubsidiary(dev_subs, h_N, h_maxiter)
n_timestamps = get_n_timestamps_val(maxiter) # timespec *computation_times
h_computation_times = (c_timespec * n_timestamps)()
pycuFFTC2C(dev_gamma_array, dev_gamma_array, h_plan)
return dev_mask,\
dev_solution,\
h_sigma,\
h_for_gradient,\
h_index_of_max,\
h_tolerance,\
h_GMRES_n,\
dev_actual_residual,\
h_res_vs_tol_p,\
h_N,\
dev_gamma_array,\
h_plan,\
h_handle_p,\
h_cusolverH_p,\
h_maxiter,\
dev_subs,\
h_computation_times,\
h_wavenumber,\
h_epsilon_internal,\
h_epsilon_external,\
h_cuda_err
maxiter = 30
visible_device = 0
h_visible_device = c_uint(visible_device)
pycuSetDevice(h_visible_device)
for repetition in range(1):
for pow_given in range(10, 11):
N = 1 << pow_given
Nsqrd = 1 << (pow_given << 1)
file_an_sol = '/mnt/779188A965FF7E0F/Archive/analytical_solution_' + \
str (N) + (False) * '_T' + '.txt'
N = 1 << pow_given
dev_mask,\
dev_solution,\
h_sigma,\
h_for_gradient,\
h_index_of_max,\
h_tolerance,\
h_GMRES_n,\
dev_actual_residual,\
h_res_vs_tol_p,\
h_N,\
dev_gamma_array,\
h_plan,\
h_handle_p,\
h_cusolverH_p,\
h_maxiter,\
dev_subs,\
h_computation_times,\
h_wavenumber,\
h_epsilon_internal,\
h_epsilon_external,\
h_cuda_err = set_params(N, Nsqrd, maxiter)
pycuSetPointerMode(h_handle_p, CUBLAS_POINTER_MODE_DEVICE())
time_c = time()
devh_debug1 = c_void_p()
devh_debug2 = c_void_p()
devh_debug3 = c_void_p()
pycuGMRESimproved(
dev_mask,
dev_solution,
h_for_gradient,
h_index_of_max,
h_sigma,
h_maxiter,
h_tolerance,
h_GMRES_n,
dev_actual_residual,
h_res_vs_tol_p,
h_N,
dev_gamma_array,
h_plan,
h_handle_p,
h_cusolverH_p,
dev_subs,
h_computation_times,
h_wavenumber, #
h_epsilon_internal, #
h_epsilon_external, #
devh_debug1,
devh_debug2,
devh_debug3
)
# break
time_c = time() - time_c
h_actual_residual = (c_float * (maxiter + 1))()
pycugpu2host(h_actual_residual, dev_actual_residual, maxiter + 1, c_size_t(sizeof(c_float)))
print("time_c for GMRES = ", time_c)
h_analytical_solution = (c_complex * Nsqrd )() # cuComplex *dev_analytical_solution
analytical_solution = get_complex_array(file_an_sol)
py_to_ctype(analytical_solution, h_analytical_solution)
dev_analytical_solution = pycumalloc(Nsqrd, c_size_t(sizeof(c_complex)), h_cuda_err)
pycuhost2gpu(h_analytical_solution, dev_analytical_solution, Nsqrd, c_size_t(sizeof(c_complex)))
dev_analytical_solution = cast(dev_analytical_solution, POINTER(c_complex))
pycuSetPointerMode(h_handle_p, CUBLAS_POINTER_MODE_HOST())
h_rel_err = pycuRelErr(
dev_solution,
dev_analytical_solution,
h_N,
h_handle_p
)
print("h_rel_err = ", h_rel_err)
pycuDestroyPlan(h_plan)
pycuDestroyBlas(h_handle_p)
pycuDestroySolverDn(h_cusolverH_p)
pycuFree(dev_gamma_array)
pycuFree(dev_mask)
pycuFree(dev_actual_residual)
pycuDestroySubsidiary(dev_subs)
pycuFree(dev_analytical_solution)
# pycuFree(dev_solution)
# pycuDeviceReset()
print(h_actual_residual[-2])
print("Total time = ", np.sum(get_nano_time(h_computation_times)/1e9))
K = len(computation_times)
diff_sec_times = computation_times[1:K]['tv_sec'] - computation_times[0:K - 1]['tv_sec']
diff_nsec_times = computation_times[1:K]['tv_nsec'] - computation_times[0:K - 1]['tv_nsec']
diff_nano_times = 1e9 * diff_sec_times + diff_nsec_times
computation_times = np.asarray(h_computation_times)
print(type(computation_times['tv_sec']))
K = len(computation_times)
print(np.sum(computation_times[1:K]['tv_nsec'] - computation_times[0:K - 1]['tv_nsec']))
18446744073080933754 / 1e9 / 60
get_nano_time(h_computation_times)
dev_mask,\
dev_solution,\
h_for_gradient,\
h_index_of_max,\
h_tolerance,\
h_GMRES_n,\
dev_actual_residual,\
h_res_vs_tol_p,\
h_N,\
dev_gamma_array,\
h_plan,\
h_handle_p,\
h_cusolverH_p,\
h_maxiter,\
dev_subs,\
h_computation_times = set_params(N, Nsqrd, maxiter)
h_solution = (c_complex * Nsqrd)()
pycugpu2host(h_solution, dev_solution, Nsqrd, c_size_t(sizeof(c_complex)))
solution = np.zeros((N, N), dtype = np.complex64)
solution = np.ctypeslib.as_array(h_solution)
solution = solution['x'] + 1j * solution['y']
np.max(np.abs(solution))
visualize(np.abs(solution).reshape(N, N).T, wavelength_per_domain = 6 )
h_analytical_solution = (c_complex * Nsqrd )() # cuComplex *dev_analytical_solution
analytical_solution = get_complex_array(file_an_sol)
py_to_ctype(analytical_solution, h_analytical_solution)
dev_analytical_solution = pycumalloc(Nsqrd, c_size_t(sizeof(c_complex)))
pycuhost2gpu(h_analytical_solution, dev_analytical_solution, Nsqrd, c_size_t(sizeof(c_complex)))
dev_analytical_solution = cast(dev_analytical_solution, POINTER(c_complex))
pycuSetPointerMode(h_handle_p, CUBLAS_POINTER_MODE_HOST())
pycuRelErr(
dev_solution,
dev_analytical_solution,
h_N,
h_handle_p
)
pycuDestroyPlan(h_plan)
pycuDestroyBlas(h_handle_p)
pycuDestroySolverDn(h_cusolverH_p)
pycuFree(dev_gamma_array)
pycuFree(dev_mask)
pycuFree(dev_actual_residual)
pycuDestroySubsidiary(dev_subs)
pycuFree(dev_analytical_solution)
print("Total time = ", np.sum(get_nano_time(h_computation_times)/1e9))
# visualize(np.abs(gamma_array).reshape(2 * N - 1, 2 * N - 1), wavelength_per_domain = 6)
h_analytical_solution = (c_complex * Nsqrd )() # cuComplex *dev_analytical_solution
analytical_solution = get_complex_array(file_an_sol)
py_to_ctype(analytical_solution, h_analytical_solution)
dev_analytical_solution = pycumalloc(Nsqrd, c_size_t(sizeof(c_complex)))
pycuhost2gpu(h_analytical_solution, dev_analytical_solution, Nsqrd, c_size_t(sizeof(c_complex)))
dev_analytical_solution = cast(dev_analytical_solution, POINTER(c_complex))
pycuSetPointerMode(h_handle_p, CUBLAS_POINTER_MODE_HOST())
pycuRelErr(
dev_solution,
dev_analytical_solution,
h_N,
h_handle_p
)
pycuDestroyPlan(h_plan)
pycuDestroyBlas(h_handle_p)
pycuDestroySolverDn(h_cusolverH_p)
pycuFree(dev_gamma_array)
pycuFree(dev_mask)
pycuFree(dev_actual_residual)
pycuDestroySubsidiary(dev_subs)
pycuFree(dev_analytical_solution)
visible_device = 0
h_visible_device = c_uint(visible_device)
pycuSetDevice(h_visible_device)
dev_solution = pycumalloc(Nsqrd, c_size_t(sizeof(c_complex))) # cuComplex *dev_solution
dev_solution = cast(dev_solution, POINTER(c_complex))
h_N = c_uint(N)
pycuInitSolution(dev_solution, h_N)
h_solution = (Nsqrd * c_complex)()
pycugpu2host(h_solution, dev_solution, c_uint(Nsqrd), c_size_t(sizeof(c_complex)))
solution = np.ctypeslib.as_array(h_solution)
solution = solution['x'] + 1j * solution['y']
visualize(np.abs(solution).reshape(N, N))
x0 = get_plane_wave(k, size).reshape(size * size)
#np.ones((N, N)) + 1j* np.ones((N, N)) #np.ones((N, N), dtype = np.complex64)
A_x = matvec(x0, eps, k)
r0 = x0.reshape(-1) - A_x.reshape(-1)
normr0 = np.linalg.norm(r0)
v = r0 / normr0
GMRES_i = 0
residual = 1
tol = 1e-12
V = v
if (residual > tol):
H = np.zeros((2, 1), dtype = np.complex64)
w = matvec(v, eps, k).reshape(-1)
H[0, 0] = np.inner(w, v.conj())
w = w - H[0, 0] * v
H[1, 0] = np.linalg.norm(w)
v = w / H[1, 0]
V = np.hstack((V.reshape(N**2, 1), v.reshape(N**2, 1)))
Htemp = H
J = np.zeros((2, 2), dtype = np.complex64)
denominator = np.linalg.norm(Htemp)
J[1, 1] = J[0, 0] = Htemp[0, 0] / denominator
J[0, 1] = Htemp[1, 0] / denominator
J[1, 0] = - Htemp[1, 0].conj() / denominator
Jtotal = J
# HH = np.dot(Jtotal, H)
bb = np.zeros((2, 1), dtype = np.complex64)
bb[0] = normr0
c = np.dot(Jtotal, bb)
residual = abs(c[0, 0])
print(residual)
GMRES_i = 1
while ((residual > tol) and (GMRES_i < 100)):
print(GMRES_i)
H_new = np.zeros((GMRES_i + 2, GMRES_i + 1), dtype = np.complex64)
H_new[0:GMRES_i + 1, 0:GMRES_i] = H
H = H_new
w = matvec(v, eps, k).reshape(-1)
for j in range(GMRES_i + 1):
H[j, GMRES_i] = np.inner(w, V[:, j].conj())
w = w - H[j, GMRES_i] * V[:, j]
H[GMRES_i + 1, GMRES_i] = np.linalg.norm(w)
v = w / H[GMRES_i + 1, GMRES_i]
V = np.hstack((V.reshape(N**2, GMRES_i + 1), v.reshape(N**2, 1)))
Jtotal = np.hstack((Jtotal, np.zeros(GMRES_i+1).reshape(GMRES_i+1, 1)))
Jtotal = np.vstack((Jtotal, np.zeros(GMRES_i+2).reshape(1, GMRES_i+2)))
Jtotal[GMRES_i+1, GMRES_i+1] = 1
Htemp = np.dot(Jtotal, H)
J = np.eye(GMRES_i + 2, dtype = np.complex64)
denominator = np.linalg.norm(np.asarray([Htemp[GMRES_i, GMRES_i], Htemp[GMRES_i + 1, GMRES_i]]))
J[GMRES_i + 1, GMRES_i + 1] = J[GMRES_i, GMRES_i] = Htemp[GMRES_i , GMRES_i] / denominator
J[GMRES_i, GMRES_i + 1] = Htemp[GMRES_i + 1, GMRES_i] / denominator
J[GMRES_i + 1, GMRES_i] = - Htemp[GMRES_i + 1, GMRES_i].conj() / denominator
Jtotal = np.dot(J, Jtotal)
bb = np.zeros((GMRES_i + 2, 1), dtype = np.complex64)
bb[0] = normr0
c = np.dot(Jtotal, bb)
residual = abs(c[GMRES_i, 0])
print(residual)
GMRES_i += 1
HH = np.dot(Jtotal, H)
HH = HH[0 : GMRES_i, :]
cc = c[0 : GMRES_i, 0:1]
cc_new = np.linalg.solve(HH.reshape(GMRES_i, GMRES_i), cc.reshape(GMRES_i, 1))
V = V[:, 0:GMRES_i]
print(V.shape)
print(cc_new.shape)
x_add = np.dot(V, cc_new)
x = x0 + x_add.reshape(-1)
print("Total time = ", np.sum(get_nano_time(h_computation_times)/1e9))
```
*
* Complex functions: <u> rename </u> and deal with <u> 2 * float </u>
* as_ctype quickly:
https://stackoverflow.com/questions/47127575/copy-data-from-numpy-to-ctypes-quickly
https://stackoverflow.com/questions/3195660/how-to-use-numpy-array-with-ctypes
* <strong>windows</strong>/linux CDLL/DLL, no objdump by using extern "C"
* free function
* dependant on GPU number
* MIT license
* module(Digits to python variables)
* <strong> pip install </strong>
* malloc(3 vars) - separately
* clear pycuFree / pyFree / del - observe vars
* github load
* var order
* checking CUDA upgrade and code
* \_36\_ finish is zero-nono-time
* PyPI example
| github_jupyter |
.. R33_:
.. title:: R33
# Analysis of R33 Data
```
# Created on Sat May 1 15:12:38 2019
# @author: Semeon Risom
# @email: semeon.risom@gmail.com
# @url: https://semeon.io/d/R33-analysis
# @purpose: Hub for running processing and analysis.
```
## local import
# parameters
## set current date
## load passwords from yaml
```
import yaml
with open('/Users/mdl-admin/Desktop/mdl/docs/source/examples/analysis/private.yaml', 'r') as _file:
p = yaml.safe_load(_file)
```
## Download data from UTWeb SFTP server
Here you can download data from a remote server using SFTP. In this case, we are accessing the University of Texas UTWeb server to get online eyetracking data.
```
# login parameters
host = p['r33']['utweb']['hostname']
user = p['r33']['utweb']['username']
pwd = p['r33']['utweb']['password']
# download partial data backups
filetype = '.csv' # get only csvs
s = p['r33']['utweb']['path'] + 'part/' # path of backup data on server
d = '/Users/mdl-admin/Desktop/r33/data/raw/part/'
log, start, end, now = data.Download.SFTP(source=s, destination=d, hostname=host, username=user, password=pwd, filetype=filetype)
# download full data
filetype = '.csv' # get only csvs
s = p['r33']['utweb']['path'] # path of data on server
d = '/Users/mdl-admin/Desktop/r33/data/raw/full/'
log, start, end, now = data.Download.SFTP(source=s, destination=d, hostname=host, username=user, password=pwd, filetype=filetype)
```
## Download data from REDCap
Data from Research Electronic Data Capture (REDCap) can be downloaded. Here we are accessing participant
```
# login, path parameters
d = '/Users/mdl-admin/Desktop/r33/data/redcap/'
token = p['r33']['redcap']['token']
url = p['r33']['redcap']['url']
content = 'report'
report_id = '6766'
# export
log, start, end, now = data.Download.REDCap(path=d, token=token, url=url, content=content, report_id=report_id)
```
##### Preprocessing
Clean up variable names, correct screensize for processing.
```
source = '/Users/mdl-admin/Desktop/r33/data/raw/full/'
errors = Processing.preprocessing(source=source, isMultiprocessing=True, cores=6)
```
##### Summary data
```
# parameters
source = "/Users/mdl-admin/Desktop/r33/data/preprocessed/"
destination = "/Users/mdl-admin/Desktop/r33/data/processed/summary.xlsx"
metadata = "/Users/mdl-admin/Desktop/r33/data/metadata.csv"
# Processing
df, errors, _ = Processing.summary(source=source, destination=destination, metadata=metadata, isHTML=True)
```
##### Definitions
```
source = "/Users/mdl-admin/Desktop/r33/data/preprocessed/53_0abc.csv"
destination = "/Users/mdl-admin/Desktop/r33/data/processed/variables.xlsx"
df_variables, _ = Processing.variables(source=source, destination=destination, isHTML=True)
```
##### Device characteristics
```
source = "/Users/mdl-admin/Desktop/r33/data/processed/summary.xlsx"
destination = "/Users/mdl-admin/Desktop/r33/data/processed/device.xlsx"
df_device, _ = Processing.device(source=source, destination=destination, isHTML=True)
```
##### demographics characteristics
```
source = "/Users/mdl-admin/Desktop/r33/data/redcap/report.xlsx"
destination = "/Users/mdl-admin/Desktop/r33/data/processed/demographics.xlsx"
df_demographics = Processing.demographics(source=source, destination=destination, isHTML=True)
```
| github_jupyter |
#Klassifikation von Song-Texten
Die Aufgabe, oder auch das Ziel besteht darin, dass man anhand von Song-Texten vorhersagt von welchem Genre ein Lied ist. Bei der Umsetzung soll man unterschiedliche Methoden verwenden und diese untereinander auch vergleichen, dabei sollen die unterschiedlichen Methoden aus dem Machine Learning beziehungsweise aus dem Deep Learning Bereichen kommen. Die Umsetzung des Projektes ist in folgenden Schritten entwickelt worden:
1. Im ersten Schritt werden die Daten so weit vorbereitet Beziehungsweise bereinigt, sodass man mit ihnen die zuvor genannten Ziele auch erreichen kann. Die verwendeten Roh-Daten stammen von “https://www.kaggle.com/neisse/scrapped-lyrics-from-6-genres”.
2. Es wurde eine Baseline erstellt, um die Genauigkeit des neurnales Netzwerkes zu vergleichen. Dazu wurde Naive Bayes mit additiver Glättung verwendet.
3. Im dritten Schritt wurde ein Feed-Forward-Netzwerk erstellt.
4. Zum Schluss wurde noch ein Recurrent-Neural-Network trainiert.
Es werden die Genres Rock, Pop, Hip Hop, Samba, Sertanejo und Funk Carioca benutzt.
```
import pandas as pd
from sklearn.model_selection import train_test_split
from keras import models
from keras import layers
from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import MultinomialNB
from keras.layers import Dropout
pd.options.mode.chained_assignment = None
# load dataset
pathLyrics = 'lyrics-data.csv'
pathArtists = 'artists-data.csv'
dataframe = pd.read_csv(pathLyrics)
dataframeArtists = pd.read_csv(pathArtists)
```
#Vorverarbeitung der Daten
Wie schon oben in der Kurzbeschreibung erwähnt, findet im ersten Schritt die Verarbeitung von Rohdaten statt, die so weit vorbereitet bzw. bereinigt werden müssen, sodass man diese für die verschiedene Modelle verwenden kann. Da es sich bei den Kaggle-Daten um große Datenmengen handelt, wurden die irrelevanten Datensätze entfernt. Einer der Bereinigungsschritte ist es, dass man sich nur auf englische Songtexte fokussiert, sodass die anderen Sprachen dabei entfernt werden. Die zwei importierten CSV-Dateien zu Genre und Texten werden zusammengeführt, sodass aus Tabelle Artist und der Tabelle Songs eine gemeinsame Tabelle erstellt wird. Diese Tabelle wird auf die nötigsten Informationen verkleinert um die zu verarbeitende Daten kleinzuhalten. Songtexte die irrelevante Sonderzeichen, Noten oder auch keine Texte beinhalten, werden auch entfernt. Des Weiteren ist einer der Schritte, dass alle Songtexte in kleingeschriebene Wörter umgewandelt werden, dadurch wird eine bessere Verarbeitung der Texte erreicht.
Ein Problem, welches bei der Datenvorbereitung aufgetreten ist, dass zwei identische Songtexte unterschiedliche Genres besitzen. Die Datensätze befinden sich in zwei getrennten CSV-Dateien. In der Datei "artists-data.csv" befinden sich neben weiteren Informationen der Artist und das Genre. Es sind ebenfalls in der Spalte "Genres" mehrere Genres angegeben, da ein Song auch mehrere Gernes wie zum Beispiel Pop und Hip Hop haben kann. In der CSV-Datei "lyrics-data.csv" befinden unter anderem der Songtext und das Idiom. Der Join der beiden CSV-Dateien wird über die Spalte "ALink" bei der CSV-Datei "lyrics-data.csv" und über die Spalte "Link" gemacht.
Die Zuordnung zwischen Artist und Song wird über die Spalte "ALink" bei der CSV-Datei "lyrics-data.csv" und über die Spalte "Link" bei der CVS-Datei "artists-data.csv" gemacht. Es kann nun vorkommen, dass ein Künstler, z.B. 10000 Maniacs, jeweils mit unterschiedlichen "Genre" aber gleichem "Link" im Datensatz vorkommt. Dies führt dazu, dass bei einem Join ein Song zweimal mit einem anderem Genre vorkommt. Zum Beheben dieses Problems wird nach dem Zusammenführen der CSV-Dateien über alle Zeilen iteriert und das Genre sowie der erste Eintrag in der Spalte "Genres" verglichen. Sollten diese nicht übereinstimmen, werden die entsprechenden Zeilen gelöscht.
```
from sklearn.utils import shuffle
#Filtert alle Songtext, die nicht Englisch sind
dataframe = dataframe[dataframe['Idiom'] == 'ENGLISH']
#Lowercase alle Songtexte
dataframe['Lyric'] = dataframe['Lyric'].apply(lambda lyric: lyric.lower())
#Herausfiltern der Instrumentals
patternDel = ["---", "instrumental","==="]
for patt in patternDel:
filter = dataframe['Lyric'].str.contains(patt)
dataframe = dataframe[~filter]
#Join der zwei CSV Dateien
dataframe.rename(columns={'ALink':'Link','SName':'SName', 'SLink':'SLink','Lyric':'Lyric','Idiom':'Idiom'},inplace=True)
mergeData=pd.merge(dataframe, dataframeArtists, on='Link')
#Löschen aller Zeilen bei denen "Genre" und erstes Genre in "Genres" nicht übereinstimmen. Es wird durch die
#Zeilen iteriert. Dabei wird der String in "Genres" gesplittet und in eine Liste geschrieben. Anschließend wird
#verglichen. In listOfIndex stehen alle zu löschenden Indexe.
listOfIndex=[]
for ind in mergeData.index:
if type(mergeData['Genres'][ind]) is str:
s_gerne = mergeData['Genre'][ind]
#Splitten von "Genres"
s_gernres = mergeData['Genres'][ind].split(";")
if s_gerne != s_gernres[0]:
listOfIndex.append(ind)
else:
listOfIndex.append(ind)
mergeData.drop(listOfIndex,inplace=True )
#Die Zeilen sind sortiert, deshalb muss noch gemischt werden.
rawData = shuffle(mergeData)
```
Durch die Entfernung aller nicht englischen Songtexten, dem Löschen alle Instrumentals und ähnliches sowie die Entfernung der mehrfach vorhandenen Songtexten stehen 61699 Datensätze zum Trainieren zur Verfügung.
#Baseline
Als Baseline zum Vergleich des neuronalen Netzwerkes wird Naive Bayes mit additiver Glättung verwendet. Die Datensätze wurden zu 80% Trainings- und Validierungsdaten sowie 20% für die Evaluierung aufgeteilt.
```
vectorizer = CountVectorizer()
X_cnt = vectorizer.fit_transform(rawData['Lyric'])
label_encoder = LabelEncoder()
y_clean = label_encoder.fit_transform(rawData['Genre'])
X_train, X_test, y_train, y_test = train_test_split(X_cnt, y_clean, test_size=0.2, random_state=0)
## Naive Bayes mit additiver Glättung trainieren
nb = MultinomialNB(alpha=1.0)
nb.fit(X_train, y_train)
## Vorhersagen berechnen
y_predicted = nb.predict(X_test)
## Konfusionsmatrix ausgeben
print("Kofusionsmatrix:\n", confusion_matrix(y_true=y_test, y_pred=y_predicted))
## Gütemaße ausgeben
print("Korrektklassifizierungsrate:\n", accuracy_score(y_true=y_test, y_pred=y_predicted))
print("Präzision (mikro):\n", precision_score(y_true=y_test, y_pred=y_predicted, average='micro'))
print("Ausbeute (mikro):\n", recall_score(y_true=y_test, y_pred=y_predicted, average='micro'))
print("F1 (mikro):\n", f1_score(y_true=y_test, y_pred=y_predicted, average='micro'))
```
In diesem liefert der Klassifikator eine Genauigkeit von ca. 69%.
#Feed Forward Network (FFN)
Im Folgenden wird ein neuronales Netzwerk für die Vorhersage der Genres genutzt. Zuerst werden zwei Funktionen definiert, die später mehrfach benötigt werden. Die Funktion "vectorize_sequences" ist dafür zuständig, den Songtext als binären Vektor darzustellen, der One-Hot-Encoded ist. Dies ist notwendig, da das FFN immer die gleiche Input-Länge benötigt. Es werden 100000 Dimensionen verwendet. Zusätzlich werden alle Sonderzeichen gefiltert.
Die zweite Funktion "vectorize_labels" sorgt dafür, dass die Genres bzw. die Labels One-Hot-Encoded werden.
```
def vectorize_sequences (dataframe, dimensions=10000):
vect = CountVectorizer()
X = vect.fit_transform(dataframe)
tokenizer = Tokenizer(num_words=dimensions, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts(dataframe)
sequences = tokenizer.texts_to_sequences(dataframe)
one_hot_results = tokenizer.texts_to_matrix(dataframe, mode='binary')
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return one_hot_results
def vectorize_labels(labels):
le = preprocessing.LabelEncoder()
encodedlabels = le.fit(labels)
encodedlabels_transformed= le.transform(labels)
return to_categorical(encodedlabels_transformed)
```
Die Liedtext sowie die Genres werden in einen Vektor umgewandelt. Die Aufteilung in Trainings- und Evaluierungsdaten ist 80/20.
```
X_cnt = vectorize_sequences(rawData['Lyric'])
y_clean = vectorize_labels(rawData['Genre'])
X_train, X_test, y_train, y_test = train_test_split(X_cnt, y_clean, test_size=0.2, random_state=0)
```
Das Modell besteht aus insgesamt drei verschiedenen Schichten. Die Ausgabeschicht besteht insgesamt aus sechs Neuronen, für jede Kategorie eine. Da es sich um ein multi-class Klassifizierungsproblem handelt ist die Loss-Funktion "categorical_crossentropy". Die Aktivierungsfunktion ist "softmax". Die Ausgabewerte der Ausgabeschicht sind die Wahrscheinlichkeiten mit welchem die Genres klassifiziert werden.
```
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(6, activation='softmax'))
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=50,batch_size=100, validation_split=0.2)
```
Die Grafik "Training and validation loss" zeigt, dass das Modell ab ca. 20 Epochen die Loss-Funktion nicht mehr dem Training-Loss annähert. Es hat keine Überanpassung stattgefunden. Bei einer Überanpassung bewegt sich das Training-Loss gegen 0, während die Validation-Loss immer größer wird. In Grafik "Training and validation accuracy" ist zusehen, dass die Validation-Accuracy ab der Epoche 25 sich nicht mehr signifikant verändert.
```
history_dict = history.history
train_loss = history_dict['loss']
val_loss = history_dict['val_loss']
train_acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
epochs = range(1, len(train_loss) + 1)
plt.title('Training and validation loss')
plt.plot(epochs, train_loss, label='Training loss')
plt.plot(epochs, val_loss, label='Validation loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid()
plt.show()
plt.title('Training and validation accuracy')
plt.plot(epochs, train_acc, label='Training accuracy')
plt.plot(epochs, val_acc, label='Validation accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid()
plt.show()
#compute loss and accuracy on test data
score = model.evaluate( X_test, y_test, verbose=1)
print("Test score:", score[0])
print("Test accuracy:", score[1])
```
Nachdem evaluieren des Vorhersagemodell, kann eine Genauigkeit von 77% festgestellt werden. Im Vergleich zu Naive Bayes, das eine Genauigkeit von 69% besitzt, ist dies eine Steigerung um 8%.
#Recurrent Neural Networks (RNN)
Im Folgenden wird noch zusätzlich ein RNN trainiert und mit dem FFN verglichen. Es wurde zusätzlich ein vortrainierter Embedding-Layer benutzt. Dazu wurde GloVe für die Word-Embeddings benutzt. Die txt-Datei kann unter dem Link "http://nlp.stanford.edu/data/glove.6B.zip" heruntergeladen werden. Die Datei "glove.6B.300d.txt" muss sich im gleichen Verzeichnis wie das jpyter Notbook befinden.
Es werden die 10000 meist benutzen Wörter betrachtet. Die "embedding dimensionality" ist auf 300 gesetzt.
```
import numpy as np
import nltk
from keras import models
from keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Flatten
from keras.preprocessing import sequence
from keras.initializers import Constant
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
#k meisten Wörter werden betrachtet
num_words = 10000
# maximale Länge
max_len = 500
# embedding dimensionality
emb_dim = 300
```
#Vorverarbeitung der Daten
Die Daten werden nochmals vorverarbeitet. Alle Stoppwörter werden aus dem Songtexten gelöscht. Dazu wird
Natural Language Toolkit (NLTK) benutzt.
```
#Download die Stoppwörter und Zeichen
nltk.download('stopwords')
nltk.download("punkt")
stop_words = stopwords.words('english')
#Es wird über alle Songtext iteriert. Mithilfe des Tokenizers wird der String in Wörter zerlegt, anschließend
#wird überprüft, ob das Word in stop_words ist. Wenn nicht wird es der Liste hinzugefügt und am Ende wieder zu
#einem String zusammen geklebt.
for ind in rawData.index:
word_tokens = word_tokenize(rawData['Lyric'][ind])
filtered_sentence = [w for w in word_tokens if not w in stop_words]
rawData['Lyric'][ind] = ' '.join(filtered_sentence)
#Initialisieren des Tokenizers.
tokenizer = Tokenizer(num_words=num_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts(rawData['Lyric'])
sequences = tokenizer.texts_to_sequences(rawData['Lyric'])
X_clean = sequence.pad_sequences(sequences,maxlen=max_len)
Y_clean = vectorize_labels(rawData['Genre'])
X_train, X_test, y_train, y_test = train_test_split(X_clean, Y_clean, test_size=0.1, random_state=0, shuffle=True)
```
In diesem Abschnitt wird der vortrainierten Embedding-Layer erstellt. Der dazu notwendige Code wurde in einem [Tutorial](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html) von Francois Chollet veröffentlicht. Der Codeabschnitt befinden sich auf [GitHub](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html).
```
embeddings_index = {}
f = open('glove.6B.300d.txt',encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
word_index= tokenizer.word_index
embedding_matrix = np.zeros((len(word_index) + 1, emb_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
num_words = min(num_words, len(word_index) + 1)
embedding_matrix = np.zeros((num_words, emb_dim))
for word, i in word_index.items():
if i >= num_words:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
```
Als eine spezielle Variante des RNN wird ein Long short-term memory (LSTM) verwendet. LSTM eignen sich sehr gut für Textklassifizierungsprobleme. Wie auch FFN wird "categorical_crossentropy" als Loss-Funktion genutzt. Als Aktivierungsfunktion wird 'softmax' genutzt.
Als Optimizer wird RMSprop benutzt.
```
model = Sequential()
model.add((Embedding(num_words,emb_dim,
embeddings_initializer=Constant(embedding_matrix),input_length=max_len,
trainable=False)))
model.add(LSTM(128, recurrent_dropout=0.8, dropout=0.8))
model.add(LSTM(128))
model.add(Dense(6, activation='softmax'))
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc'])
history = model.fit(X_train,y_train, epochs=4, batch_size=100, validation_split=0.2)
history_dict = history.history
train_loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(train_loss) + 1)
plt.title('Training and validation loss')
plt.plot(epochs, train_loss, label='Training loss')
plt.plot(epochs, val_loss, label='Validation loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid()
plt.show()
train_acc = history_dict['acc']
val_acc = history_dict['val_acc']
epochs = range(1, len(train_loss) + 1)
plt.title('Training and validation accuracy')
plt.plot(epochs, train_acc, label='Training accuracy')
plt.plot(epochs, val_acc, label='Validation accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid()
plt.show()
# compute loss and accuracy on test data
score = model.evaluate(X_test, y_test, verbose=1)
print("Test score:", score[0])
print("Test accuracy:", score[1])
history_dict = history.history
train_acc = history_dict['acc']
val_acc = history_dict['val_acc']
epochs = range(1, len(train_loss) + 1)
plt.title('Training and validation accuracy')
plt.plot(epochs, train_acc, label='Training accuracy')
plt.plot(epochs, val_acc, label='Validation accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid()
plt.show()
```
| github_jupyter |
# Import Library
```
import pandas as pd
import numpy as np
import fasttext
import fasttext.util
from keras.layers import Input, Dense, Embedding, Conv2D, MaxPool2D
from keras.layers import Reshape, Flatten, Dropout, Concatenate
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.models import Model
from sklearn.model_selection import train_test_split
from keras import layers
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.preprocessing.text import Tokenizer, one_hot
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import classification_report, f1_score
from keras.layers.pooling import MaxPooling2D
```
# Data Import and Preprocess
```
# Reading CSV from link
def read_csv_from_link(url):
path = 'https://drive.google.com/uc?export=download&id='+url.split('/')[-2]
df = pd.read_csv(path,delimiter="\t",error_bad_lines=False, header=None)
return df
# Loading All Data
tamil_train = read_csv_from_link('https://drive.google.com/file/d/15auwrFAlq52JJ61u7eSfnhT9rZtI5sjk/view?usp=sharing')
tamil_dev = read_csv_from_link('https://drive.google.com/file/d/1Jme-Oftjm7OgfMNLKQs1mO_cnsQmznRI/view?usp=sharing')
tamil_test = read_csv_from_link('https://drive.google.com/file/d/10RHrqXvIKMdnvN_tVJa_FAm41zaeC8WN/view?usp=sharing')
# Tamil Preprocess
tamil_train = tamil_train.iloc[:, 0:2]
tamil_train = tamil_train.rename(columns={0: "text", 1: "label"})
tamil_dev = tamil_dev.iloc[:, 0:2]
tamil_dev = tamil_dev.rename(columns={0: "text", 1: "label"})
# Stats
tamil_train['label'] = pd.Categorical(tamil_train.label)
tamil_dev['label'] = pd.Categorical(tamil_dev.label)
print(tamil_train['label'].value_counts())
```
# Training Fasttext
```
import emoji
characters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','t','u','v','w','x','y','z']
def convert_emoticons(text):
for emot in EMOTICONS:
text = re.sub(u'('+emot+')', "_".join(EMOTICONS[emot].replace(",","").split()), text)
return text
def preprocess(text):
text = emoji.demojize(text, delimiters=("", ""))
#text = convert_emoticons(text)
res = text.lower()
res = res.replace('_', ' ')
res = res.replace('.', ' ')
res = res.replace(',', ' ')
res = res.strip()
words = res.split()
for i,word in enumerate(words):
if(word[0] in characters):
if(len(word)<3): continue
while words[i][-1]==words[i][-2]:
if(len(words[i])<2): break
words[i] = words[i][:-1]
if(len(words[i])<2): break
sen = " ".join(words)
return sen
train_text = []
for key, value in tamil_train['text'].iteritems():
train_text.append(preprocess(value))
dev_text = []
for key, value in tamil_dev['text'].iteritems():
dev_text.append(preprocess(value))
tamil_train['text'] = pd.DataFrame(train_text)
tamil_dev['text'] = pd.DataFrame(dev_text)
corpus = []
for i,sen in enumerate(tamil_train['text']):
if(tamil_train[label][i]=='not-Tamil '):
continue
if i==0: continue
corpus.append(preprocess(tamil_train['text'][i]))
for i,sen in enumerate(tamil_dev['text']):
if(tamil_train[label][i]=='not-Tamil '):
continue
if i==0: continue
corpus.append(preprocess(tamil_dev['text'][i]))
with open("corpus.txt", "w") as output:
output.write(str(corpus))
# Train unsupervised skipgram model
unsuper_model = fasttext.train_unsupervised('/home/punyajoy/corpus.txt',"skipgram", dim=300)
```
# Train and Test set
```
# function to build vocabulary and inverse vocabulary dictionary
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
vocabulary_inv = list(sorted(vocabulary_inv))
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
# Prepare X_train by replacing text with fasttext embeddings
def build_input_data(sentences, labels):
"""
Maps sentences and labels to vectors based on a vocabulary.
"""
x = np.array([np.array([unsuper_model.get_word_vector(word) for word in sentence]) for sentence in sentences])
y = np.array(labels)
return [x, y]
# padding sentence for uniform input size
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = 15
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i].strip()
sentence = sentence.split(" ")
if(len(sentence)> sequence_length):
sentence = sentence[0:15]
padded_sentences.append(sentence)
else:
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def load_data(train_text,label):
"""
Loads and preprocessed data for the dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences_padded = pad_sentences(train_text)
#vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, label)
print(type(x))
return [x, y]
# Loading train set
x_train, y_train = load_data(tamil_train["text"],tamil_train["label"])
# Encoding labels
x_train = np.asarray(x_train)
coded = dict({'Not_offensive':0, 'Offensive_Targeted_Insult_Group':1,
'Offensive_Targeted_Insult_Individual':2,
'Offensive_Untargetede':3,
'not-Tamil' :4,
'Offensive_Targeted_Insult_Other':5})
for i,j in enumerate(y_train):
y_train[i] = coded[j]
x_train = x_train.reshape(x_train.shape[0], 15, 300,1)
from keras.utils import to_categorical
y_train = to_categorical(y_train)
# Loading dev set
x_dev, y_dev = load_data(tamil_dev["text"],tamil_dev["label"])
for i,j in enumerate(y_dev):
y_dev[i] = coded[j]
x_dev = x_dev.reshape(x_dev.shape[0], 15, 300, 1)
y_dev = to_categorical(y_dev)
# Loading test set
x_test, y_test = load_data(tamil_test[0],tamil_test[1])
x_test = x_test.reshape(x_test.shape[0],15,300,1)
for i,j in enumerate(y_test):
y_test[i] = coded[j]
y_test = to_categorical(y_test)
```
# CNN Model
```
from keras.layers.pooling import MaxPooling2D
inputs = Input(shape=(15, 300, 1))
#embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=sequence_length)(inputs)
conv_0 = Conv2D(64, kernel_size=5, activation='relu', padding='valid')(inputs)
conv_1 = Conv2D(32, kernel_size=3, activation='relu', padding='valid')(conv_0)
conv_2 = Conv2D(32, kernel_size=3, activation='relu', padding='valid')(conv_1)
drop = Dropout(0.6)(conv_2)
conv_3 = Conv2D(16, kernel_size=3, activation='relu')(drop)
pool0 = MaxPooling2D(pool_size=(2, 2), padding='valid')(conv_1)
conv_4 = Conv2D(16, kernel_size=3, activation='relu')(pool0)
pool1 = MaxPooling2D(pool_size=(2, 2), padding='valid')(conv_2)
# pool2 = MaxPooling2D(pool_size=(2, 2), padding='valid')(conv_3)
# maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)
# maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)
# maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)
# concatenated_tensor = Concatenate(axis=1)([pool0, pool1, pool2])
flatten = Flatten()(pool1)
hidden1 = Dense(128, activation='relu')(flatten)
output = Dense(6, activation='softmax')(hidden1)
# this creates a model that includes
model1 = Sequential() # To train the model on dataset
model2 = Sequential() # To extract embeddings from cnn layer
model1 = Model(inputs=inputs, outputs=output)
model2 = Model(inputs=inputs, outputs=hidden1)
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Training the model
epoch = 50
cnt=0
best_f1 = 0
for i in range(epoch):
model1.fit(x_train, y_train, epochs=1,validation_data = (x_dev,y_dev))
pred = model1.predict(x_train)
prediction = []
for i,j in enumerate(pred):
a = np.argmax(j)
prediction.append(a)
y_true =[]
for i,j in enumerate(y_train):
a = np.argmax(j)
y_true.append(a)
train_f1 = f1_score(y_true, prediction, average='weighted')
print("train f1 - ",train_f1)
pred = model1.predict(x_dev)
prediction = []
for i,j in enumerate(pred):
a = np.argmax(j)
prediction.append(a)
y_true =[]
for i,j in enumerate(y_dev):
a = np.argmax(j)
y_true.append(a)
val_f1 = f1_score(y_true, prediction, average='weighted')
# Updating best F1 socre and saving corresponding embeddings
if(val_f1>best_f1):
cnt =0
best_f1 = val_f1
x_train_dense_cnn = model2.predict(x_train)
x_dev_dense_cnn = model2.predict(x_dev)
x_test_dense_cnn = model2.predict(x_test)
np.save('/home/punyajoy/Dravidian_Offensive_Classification/sentence_embeddings/cnn_emb_dev_128_tamil.npy',x_dev_dense_cnn)
np.save('/home/punyajoy/Dravidian_Offensive_Classification/sentence_embeddings/cnn_emb_train_128_tamil.npy',x_train_dense_cnn)
np.save('/home/punyajoy/Dravidian_Offensive_Classification/sentence_embeddings/cnn_emb_test_128_tamil.npy',x_test_dense_cnn)
else:
cnt+=1
# loop break condition
if(cnt>=7):
print("NO increase for 5 itr, breaking....")
break
```
| github_jupyter |
# Introduction to Data Science – Text Munging Exercises
*COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/*
## NLP
### Exercise 1.1: Frequent Words
Find the most frequently used words in Moby Dick which are not stopwords and not punctuation. Hint: [`str.isalpha()`](https://docs.python.org/3/library/stdtypes.html#str.isalpha) could be useful here.
```
import nltk
from nltk.corpus import stopwords
stopwords = nltk.corpus.stopwords.words('english')
from nltk.book import *
frequency_dist = FreqDist(text1)
print(frequency_dist)
most_common = frequency_dist.most_common(500)
filtered_words = [word_tuple for word_tuple in most_common if word_tuple[0].lower() not in stopwords]
print(filtered_words[0:50])
filtered_words = [word_tuple for word_tuple in filtered_words if word_tuple[0].isalpha()]
filtered_words[0:50]
```
## Exercise 2.1
You're an evil Spammer who's observed that many people try to obfuscate their e-mail using this notation: "`alex at utah dot edu`". Below are three examples of such e-mails text. Try to extract "alex at utah dot edu", etc. Start with the first string. Then extend your regular expression to work on all of them at the same time. Note that the second and third are slightly harder to do!
```
import re
html_smart = "You can reach me: alex at utah dot edu"
html_smart2 = "You can reach me: alex dot lex at utah dot edu"
html_smart3 = "You can reach me: alex dot lex at sci dot utah dot edu"
def testRegex(regex):
for html in (html_smart, html_smart2, html_smart3):
print(re.search(regex, html).group())
# TODO write your regex here
mail_regex = "\w+\sat\s\w+\sdot\s\w+"
testRegex(mail_regex)
better_regex = "((\w+\s)+(\sdot)*)+at\s\w+\sdot\s\w+"
testRegex(better_regex)
best_regex = "((\w+\s)+(\sdot)*)+at(\s\w+\sdot)+\s\w+"
testRegex(best_regex)
```
## Exercise 2.2: Find Adverbs
Write a regular expression that finds all adverbs in a sentence. Adverbs are characterized by ending in "ly".
```
text = "He was carefully disguised but captured quickly by police."
re.findall(r"\w+ly", text)
```
### Exercise 2.3: Phone Numbers
Extract the phone numbers that follow a (xxx) xxx-xxxx pattern from the text:
```
phone_numbers = "(857) 131-2235, (801) 134-2215, but this one (12) 13044441 shouldnt match. Also, this is common in twelve (12) countries and one (1) state"
re.findall(r"\([0-9]{3}\)\s[0-9]{3}-[0-9]{4}", phone_numbers)
```
### Exercise 2.4: HTML Content
Extract the content between the `<b>` and `<i>` tags but not the other tags:
```
html_tags = "This is <b>important</b> and <u>very</u><i>timely</i>"
re.findall(r"<[bi]>(.*?)<\/[bi]>", html_tags)
```
| github_jupyter |
<div class="alert alert-block alert-info" style="margin-top: 20px">
<a href="https://cocl.us/topNotebooksPython101Coursera">
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png" width="750" align="center">
</a>
</div>
<h1>Reading Files Python</h1>
<p><strong>Welcome!</strong> This notebook will teach you about reading the text file in the Python Programming Language. By the end of this lab, you'll know how to read text files.</p>
<h2>Table of Contents</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ul>
<li><a href="download">Download Data</a></li>
<li><a href="read">Reading Text Files</a></li>
<li><a href="better">A Better Way to Open a File</a></li>
</ul>
<p>
Estimated time needed: <strong>40 min</strong>
</p>
</div>
<hr>
<h2 id="download">Download Data</h2>
```
# Download Example file
!wget -O /resources/data/Example1.txt https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt
```
<hr>
<h2 id="read">Reading Text Files</h2>
One way to read or write a file in Python is to use the built-in <code>open</code> function. The <code>open</code> function provides a <b>File object</b> that contains the methods and attributes you need in order to read, save, and manipulate the file. In this notebook, we will only cover <b>.txt</b> files. The first parameter you need is the file path and the file name. An example is shown as follow:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadOpen.png" width="500" />
The mode argument is optional and the default value is <b>r</b>. In this notebook we only cover two modes:
<ul>
<li><b>r</b> Read mode for reading files </li>
<li><b>w</b> Write mode for writing files</li>
</ul>
For the next example, we will use the text file <b>Example1.txt</b>. The file is shown as follow:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadFile.png" width="200" />
We read the file:
```
# Read the Example1.txt
example1 = "/resources/data/Example1.txt"
file1 = open(example1, "r")
```
We can view the attributes of the file.
The name of the file:
```
# Print the path of file
file1.name
```
The mode the file object is in:
```
# Print the mode of file, either 'r' or 'w'
file1.mode
```
We can read the file and assign it to a variable :
```
# Read the file
FileContent = file1.read()
FileContent
```
The <b>/n</b> means that there is a new line.
We can print the file:
```
# Print the file with '\n' as a new line
print(FileContent)
```
The file is of type string:
```
# Type of file content
type(FileContent)
```
We must close the file object:
```
# Close file after finish
file1.close()
```
<hr>
<h2 id="better">A Better Way to Open a File</h2>
Using the <code>with</code> statement is better practice, it automatically closes the file even if the code encounters an exception. The code will run everything in the indent block then close the file object.
```
# Open file using with
with open(example1, "r") as file1:
FileContent = file1.read()
print(FileContent)
```
The file object is closed, you can verify it by running the following cell:
```
# Verify if the file is closed
file1.closed
```
We can see the info in the file:
```
# See the content of file
print(FileContent)
```
The syntax is a little confusing as the file object is after the <code>as</code> statement. We also don’t explicitly close the file. Therefore we summarize the steps in a figure:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadWith.png" width="500" />
We don’t have to read the entire file, for example, we can read the first 4 characters by entering three as a parameter to the method **.read()**:
```
# Read first four characters
with open(example1, "r") as file1:
print(file1.read(4))
```
Once the method <code>.read(4)</code> is called the first 4 characters are called. If we call the method again, the next 4 characters are called. The output for the following cell will demonstrate the process for different inputs to the method <code>read()</code>:
```
# Read certain amount of characters
with open(example1, "r") as file1:
print(file1.read(4))
print(file1.read(4))
print(file1.read(7))
print(file1.read(15))
```
The process is illustrated in the below figure, and each color represents the part of the file read after the method <code>read()</code> is called:
<img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%204/Images/ReadChar.png" width="500" />
Here is an example using the same file, but instead we read 16, 5, and then 9 characters at a time:
```
# Read certain amount of characters
with open(example1, "r") as file1:
print(file1.read(16))
print(file1.read(5))
print(file1.read(9))
```
We can also read one line of the file at a time using the method <code>readline()</code>:
```
# Read one line
with open(example1, "r") as file1:
print("first line: " + file1.readline())
```
We can use a loop to iterate through each line:
```
# Iterate through the lines
with open(example1,"r") as file1:
i = 0;
for line in file1:
print("Iteration", str(i), ": ", line)
i = i + 1;
```
We can use the method <code>readlines()</code> to save the text file to a list:
```
# Read all lines and save as a list
with open(example1, "r") as file1:
FileasList = file1.readlines()
```
Each element of the list corresponds to a line of text:
```
# Print the first line
FileasList[0]
# Print the second line
FileasList[1]
# Print the third line
FileasList[2]
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Hkherdekar/Covid19/blob/master/Covid19.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#!pip install apache-airflow
#importing libraries
import pandas as pd #Data processing
import numpy as np #Matrix/Linear algebraic operations
import random as rd #Random number
from datetime import datetime
from datetime import date, timedelta #date formats
import pytz
#Visualization packages
from matplotlib import pyplot as plt
import seaborn as sns
#Ignore Warnings
import warnings
warnings.filterwarnings("ignore")
from pandas import Series
#Visualization using dash and plotly
import dash
import dash_html_components as html
import dash_core_components as dcc
import requests
from pandas import DataFrame as df
import plotly.graph_objects as go
import dash_bootstrap_components as dbc
import plotly.express as px
from async_pull import fetch_historic
from datetime import datetime as dt #created alias for datetime
pst = pytz.timezone('America/Los_Angeles')
path_to_directory = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
#importing data (only last updated daily data file) and converting to pandas dataframe
date = datetime.now(pst) - timedelta(days=1)
date = date.strftime('%m-%d-%Y')
url = path_to_directory + date + '.csv'
CovidToday = pd.read_csv(url);
CovidToday.head()
#importing timeseries data and converting to pandas dataframe
#The file links are hardcoded as they are auto-appended to the same file location
CovidCasesUS = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv')
CovidCasesGlobal = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
CovidDeathsUS = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv')
CovidDeathsGlobal = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
CovidRecoveredGlobal = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
app = dash.Dash()
#Colors for the front end
colors = {
'background': '#2D2D2D',
'text': '#E1E2E5',
'testForfigure': '#ffffff',
'textColorForConfirmed':'#3CA4FF',
'textColorForDeaths':'#f44336',
'texColorForRecovered':'#5A9E6F',
'textColorForHighestCases':'#393939',
}
#Create local styles for customization
divBorderStyle = {
'backgroundColor' : '#393939',
'borderRadius': '12px',
'lineHeight': 0.9,
}
def getGlobalMapData():
dataFrames = [CovidDeathsGlobal, CovidCasesGlobal, CovidRecoveredGlobal]
result = pd.concat(dataFrames)
return result
#Data about coronavirus testing taken from our world in data github account to be updated daily.
CovidTesting = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv')
CovidTesting.head()
#Data about population, keeping two copies for options.
WorldPopulation1 = pd.read_csv('https://raw.githubusercontent.com/Hkherdekar/Covid19/master/WPP2019_TotalPopulationBySex.csv')
WorldPopulation2 = pd.read_csv('https://raw.githubusercontent.com/Hkherdekar/Covid19/master/population-figures-by-country-csv_csv.csv')
WorldPopulation1.head()
WorldPopulation2.head()
#Using date value to filter data for a join
date = datetime.now(pst) - timedelta(days=1)
date = date.strftime('%m/%d/%y')
print(date[1:])
#Breakup the data to get limited columns
print(CovidCasesGlobal.head())
print(CovidDeathsGlobal.head())
GlobalCases = CovidCasesGlobal.filter(items=['Country/Region', date[1:]])
GlobalDeaths = CovidDeathsGlobal.filter(items=['Country/Region', date[1:]])
GlobalCases.join(GlobalDeaths.set_index('Country/Region'), on='Country/Region', lsuffix='_cases', rsuffix='_deaths')
CovidTesting.head()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/aayushkumar20/ML-based-projects./blob/main/Lane%20Detection/Lane%20Detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!import os
!import numpy as np
!import tkinter as tk
!import cv2
#Importing all the required modules on the local python environment.
#Please make sure that you have installed all the modules properly.
#Missing these all modules or a part of modules cam create some error.
from tkinter import *
from PIL import Image, ImageTk
#Importing the required part from the insatlled modules.
#type or copy it correctly.
global last_frame1 #global variable to store the last frame
last_frame1 = np.zeros((480,640,3), np.uint8)
global last_frame2 #global variable to store the last frame
last_frame2 = np.zeros((480,640,3), np.uint8)
global cap1 #global variable to store the video capture object
global cap2 #global variable to store the video capture object
cap1=cv2.VideoCapture("./videos/video1.mp4") #Change the path to your video file
cap2=cv2.VideoCapture("./videos/video2.mp4") #Change the path to your video file
#In case you are using a linux machine or a unix based machine
#Please specify the camera module location or the port associated with the camera module.
#Defining the first camera module for video capture and the properties related to the camera module.
def show_vid():
if not cap1.isOpened():
print("Error opening video stream or file")
flag1, frame1 = cap1.read()
frame1=cv2.resize(frame1,(640,480))
if flag1 is None:
print("No frame read")
elif flag1:
global last_frame1
last_frame1=frame1.copy()
pic=cv2.cvtColor(frame1,cv2.COLOR_BGR2RGB)
img=Image.fromarray(pic)
imgtk=ImageTk.PhotoImage(image=img)
lmain1.imgtk=imgtk #Shows frame for first video
lmain1.configure(image=imgtk)
lmain1.after(10,show_vid)
#Defining the second camera modules for video capture and the properties related to the second modules.
def show_vid2():
if not cap2.isOpened():
print("Error opening video stream or file")
flag2, frame2 = cap2.read()
frame2=cv2.resize(frame2,(640,480))
if flag2 is None:
print("No frame read")
elif flag2:
global last_frame2
last_frame2=frame2.copy()
pic=cv2.cvtColor(frame2,cv2.COLOR_BGR2RGB)
img=Image.fromarray(pic)
imgtk=ImageTk.PhotoImage(image=img)
lmain2.imgtk=imgtk #Shows frame for second video
lmain2.configure(image=imgtk)
lmain2.after(10,show_vid2)
if __name__ == '__main__':
root=tk.Tk()
img=ImageTk.PhotoImage(Image.open("logo.png"))
heading=Label(root,image=img,bg="black",text="Video Comparison",fg="white",font=("Helvetica",20))
#heading.pack(background="black",fill=BOTH)
heading.pack()
heading2=Label(root,pady=20,text="Video 1",font=("Helvetica",20))
heading2.configure(background="black",fg="white")
heading2.pack()
lmain1=tk.Label(master=root)
lmain2=tk.Label(master=root)
lmain1.pack(side=LEFT)
lmain2.pack(side=RIGHT)
root.title("Lanne Detector")
root.geometry("1280x720")
exitbutton=Button(root,text="Exit",command=root.destroy,font=("Helvetica",20),fg="red").pack(side=BOTTOM)
show_vid()
show_vid2()
root.mainloop()
cap1.release()
```
| github_jupyter |
作业五 相似度计算
任务描述:采用word2vec方法,进行句子相似度计算训练。
给出一个有关句子相似度的二分类数据集msr_paraphrase(包含train、test、README三个文件),其中第一列数字1代表相似,0代表不相似。
选择文件train中的string1&2部分作为训练语料,选择文件test计算句子相似度,然后与标注结果比较,输出你认为合适的分类阈值,以及该阈值下的准确率Accuracy,精确率Precision,召回率Recall和F1值(精确到小数点后两位)。
句向量相似度计算方式:
首先对句子分词,获取每个词的词向量,然后将所有的词向量相加求平均,得到句子向量,最后计算两个句子向量的余弦值(余弦相似度)。
Word2vec部分,使用Gensim的Word2Vec训练模型,自行调整合适的参数。
注意可能会出现word xx not in vocabulary的情况,这是由于训练时默认参数min_count=5,会过滤掉低频词。可以手动设定min_count=1,或者在计算句向量时,遇到低频词则忽略。自行选择其一,并注释。
gensim镜像安装方式
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple gensim
导入方式from gensim.models import word2vec
**1.读取文本并对句子分词**
```
import pandas as pd
import nltk.tokenize as tk
import re
data = pd.read_csv('msr_train.csv',keep_default_na=False) # 读取train文件,设置忽略NAN,否则后期会出问题
num_row, num_col = (data.shape) # 记录train中的行列值
sentences= [] # 记录train中读到的句子
pattern = re.compile("[^a-zA-Z0-9\n ]")# 定义去标点用的模式
def formating(words): # 定义一个格式化函数(对传入列表匹配英文单词并全部转小写)
new_words = []
for i in words:
pattern = re.compile("[^a-zA-Z\n ]")
word = re.sub(pattern, "", i).lower()
if(word!=''):
new_words.append(re.sub(pattern, "", i).lower())
return new_words
for i in range(num_row): # 分词
words_1 = tk.word_tokenize(data['#1 String'].loc[i])
words_2 = tk.word_tokenize(data['#2 String'].loc[i])
sentences.append(formating(words_1))
sentences.append(formating(words_2))
print(sentences[:5]) # 随便输出几个
```
**2.用Word2vec训练出每个词的词向量**
```
from gensim.models import word2vec
model = word2vec.Word2Vec(sentences, size=200, min_count=1) # 训练skip-gram模型; 默认window=5
print(model['who'][:5]) # 输出某个词词向量的一部分,以确认模型生成好了. 由于方法快过时了,所以会有红色的警告
```
**3.将所有的词向量相加求平均,得到句子向量,最后计算两个句子向量的余弦值(余弦相似度)。**
```
import numpy as np
def sentence_vector(s): #计算句子向量
vec = np.zeros(200)
for word in s:
vec += model[word]
vec /= len(s)
return vec
def sentence_similarity(s1, s2): #计算两个句子向量的余弦值(用numpy可以写的很优雅)
v1, v2 = sentence_vector(s1), sentence_vector(s2)
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
for i in range(0, 6, 2): #测试几个句子的相似值
print(sentence_similarity(sentences[i],sentences[i + 1]))
```
**4.输出你认为合适的分类阈值**
```
import csv
# 寻找最好的分类阈值理论上可以用多种方法来做,比如决策树等人工智能方法
# 由于时间有限我选择通过平均值选取
# 把相似度和标签写入到csv里面
csv_file = open("msr_classify.csv","w",newline = "")
classify_data = csv.writer(csv_file)
for i in range (num_row):
sentence_1 = formating(tk.word_tokenize(data['#1 String'].loc[i]))
sentence_2 = formating(tk.word_tokenize(data['#2 String'].loc[i]))
classify_data.writerow([data['Quality'].loc[i], sentence_similarity(sentence_1,sentence_2)])
csv_file.close()
```
由msr_classify.csv得到:

**正数据的平均相似度**

**负数据的平均相似度**
**将阈值取值为:0.998663876**
**5.该阈值下的准确率Accuracy,精确率Precision,召回率Recall和F1值(精确到小数点后两位)**
$$Accuracy = \frac{TP+TN}{TP+TN+FP+FN}$$
$$Precision = \frac{TP}{TP+FP}$$
$$Recall = \frac{TP}{TP+FN}$$
$$F_1 = \frac{2TP}{2TP+FP+FN}$$
```
Accuracy_image = []
Precision_image = []
Recall_image = []
F1_image = []
# 定义TP、FP、TN、FN
def find_num(number):
csv_file = csv.reader(open("msr_classify.csv", 'r'))
true_pos, false_pos, true_neg, false_neg = 0, 0, 0, 0
for i in csv_file:
if float(i[1]) > number:
tag = '1'
else:
tag = '0'
if(tag == i[0]): # True类
if(i[0]=='0'):
true_neg+=1 # TN
else:
true_pos+=1 # TP
else:
if(i[0]=='0'):
false_pos+=1 # FP
else:
false_neg+=1 # FN
#print(true_pos, false_pos, true_neg, false_neg)
Accuracy = (true_pos + true_neg)/(true_pos + false_pos + true_neg + false_neg)
Precision = true_pos / (true_pos + false_pos)
Recall = true_pos / (true_pos + false_neg)
F1 = 2*true_pos / (2*true_pos + false_pos + false_neg)
Accuracy_image.append(Accuracy)
Precision_image.append(Precision)
Recall_image.append(Recall)
F1_image.append(F1)
#print("%.2f"%Accuracy)
#print("%.2f"%Precision)
#print("%.2f"%Recall)
#print("%.2f"%F1)
# 阈值选的不太成功(lll¬ω¬)
import matplotlib.pyplot as plt
indexs = []
for i in range(100):
find_num(0.99+0.0001*i)
indexs.append(0.99+0.0001*i)
plt.plot(indexs,Accuracy_image,color='red')
plt.plot(indexs,Precision_image,color='blue')
plt.plot(indexs,Recall_image,color='green')
plt.plot(indexs,F1_image,color='yellow')
plt.show()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import folium
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
%matplotlib inline
data = pd.read_csv("../data/StockX-Data-Consolidated.csv")
data.info(verbose=True)
```
# 1. EDA on Target Value
```
y = pd.DataFrame(data[['Pct_change']])
x = data.loc[:, ~data.columns.isin(['Pct_change'])]
plt.hist(y['Pct_change'], color = 'blue', edgecolor = 'black',
bins = 50)
plt.show()
plt.hist(x['Sale Price'], color = 'blue', edgecolor = 'black',
bins = 50)
plt.show()
```
## 1.1 target value voilin plot and box plot
```
sns.set(style="whitegrid")
ax = sns.violinplot(x=y['Pct_change'])
```
## 1.2 Anomaly Dectection: Train an Isolation Forest on log(Y)
```
model=IsolationForest(n_estimators=100, max_samples='auto', contamination= 0.05 ,max_features=1.0)
model.fit(y[['Pct_change']])
y['scores']=model.decision_function(y[['Pct_change']])
y['anomaly']=model.predict(y[['Pct_change']])
```
#### Print Anomoly Datapoints
```
anomaly=y.loc[y['anomaly']==-1]
anomaly_index=list(anomaly.index)
anomaly[0:19]
mean = np.array([[np.mean(y['Pct_change']),np.mean(y.loc[y['anomaly']==1]['Pct_change']),np.mean(anomaly['Pct_change'])]])
median = np.array([[np.median(y['Pct_change']),np.median(y.loc[y['anomaly']==1]['Pct_change']),np.median(anomaly['Pct_change'])]])
comp = pd.DataFrame(index = ['mean','median'],columns= ['whole','normal','anamaly'])
comp.iloc[0] = mean
comp.iloc[1] = median
comp
y['scores']=model.decision_function(y[['Pct_change']])
y['anomaly']=model.predict(y[['Pct_change']])
sns.set(style="whitegrid")
ax = sns.violinplot(x=anomaly["Pct_change"])
```
## 1.3 Compare group and the anomaly
```
plt.hist(y['Pct_change'], bins = 50, label = 'whole group', alpha = 0.5)
plt.hist(anomaly['Pct_change'], bins = 50, label = 'anomaly', alpha = 0.5)
plt.legend(loc = "best")
plt.show()
```
## 1.4 Explore Anomaly Points
```
anomaly_data = data[data.index.isin(anomaly_index)]
## Get brand, region and color count in the dataset
def get_brand(row):
for brand in data.columns[4:14]:
if row[brand] == 1:
return brand
def get_region(row):
for region in data.columns[14:20]:
if row[region] == 1:
return region
def get_col(row):
for color in data.columns[21:32]:
if row[color] == 1:
return color
anomaly_data['brand'] = anomaly_data.apply(get_brand, axis=1)
anomaly_data['region'] = anomaly_data.apply(get_region, axis=1)
anomaly_data['color'] = anomaly_data.apply(get_col, axis=1)
anomaly_data.brand.value_counts().plot(kind = 'bar',color = sns.color_palette("YlGnBu"))
anomaly_data.color.value_counts().plot(kind = 'bar',color = sns.color_palette("YlGnBu"))
anomaly_data.region.value_counts().plot(kind = 'bar',color = sns.color_palette("YlGnBu"))
```
# 2. EDA on feature variables
### EDA on Timing features
```
## group brand, region and color variables
data['week_since_release'] = (data['Days Since Release']/7).round(1)
data['brand'] = data.apply(get_brand, axis=1)
data['region'] = data.apply(get_region, axis=1)
data['color'] = data.apply(get_col, axis=1)
# create a new dataframe on grouped input features
timing = data[['Days Since Release',"week_since_release",'region', "brand",'color','Pct_change']]
timing = timing.rename(columns = {'Days Since Release':"days_since_release"})
# explore price premium change throughout weeks
np.random.seed(19680801)
N = 99956
#colors = np.random.rand(N)
cmap1 = sns.color_palette("YlGnBu")
plt.scatter(timing['week_since_release'],timing['Pct_change'], c = cmap1[0],alpha=0.5)
plt.title('Price premium on Weeks since release')
plt.xlabel('weeks since release')
plt.ylabel('price premium')
plt.show()
# Price Premium on different states over time
fig, ax = plt.subplots(figsize = (8,5))
sc = ax.scatter(timing.region,timing.week_since_release, c=timing.Pct_change,linewidths = 1.5, cmap="YlGnBu")
fig.colorbar(sc, ax=ax)
plt.ylabel('Week Since Release')
plt.title('Price Premium on different states over time', fontsize = 'large')
plt.show()
## Price Premium on different styles over time
fig, ax = plt.subplots(figsize = (11,5))
sc = ax.scatter(timing.brand,timing.week_since_release, c=timing.Pct_change, cmap="YlGnBu")
fig.colorbar(sc, ax=ax)
plt.ylabel('Week Since Release')
plt.title('Price Premium on different styles over time', fontsize = 'large')
plt.show()
# explore those heavy weighted features in anomaly points
#first group data by their brands
offwhite= timing.loc[timing['brand'] != 'yeezy']
#since white is heavily weighted, we explore color effect by first
#excluding white color
ow_nowhite = offwhite.loc[offwhite['color'] != 'White']
ow_white = offwhite.loc[offwhite['color'] == 'White']
ow_color = ow_nowhite.groupby(['color'])
img = plt.imread('../data/media/nike.jpg')
# Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-80, 800, 0, 8), zorder=-1,alpha = 0.5)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap2[-1],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(ow_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.2)
# ax.spines['bottom'].set_color('white')
# ax.xaxis.label.set_color('white')
# ax.tick_params(axis='x', colors='white')
# ax.spines['left'].set_color('white')
# ax.yaxis.label.set_color('white')
# ax.tick_params(axis='y', colors='white')
#ax.patch.set_visible(False)
plt.title('Nike: Off-White(without white)', fontsize = 'large')
plt.xlabel('Days Since Release', )
plt.ylabel('Price Premium')
plt.legend()
plt.show()
offwhite['brand'].value_counts(sort=True, ascending=False, bins=None, dropna=True)
```
### Explore Top 3 Most-Selling Nike Sneakers
```
## Nike Off white
aj = offwhite.loc[offwhite['brand'] == 'airjordan']
aj_color = aj.groupby(['color'])
presto = offwhite.loc[offwhite['brand'] == 'presto']
presto_color = presto.groupby(['color'])
zoom = offwhite.loc[offwhite['brand'] == 'zoom']
zoom_color = zoom.groupby(['color'])
blazer = offwhite.loc[offwhite['brand'] == 'blazer']
blazer_color = blazer.groupby(['color'])
af = offwhite.loc[offwhite['brand'] == 'airforce']
af_color = af.groupby(['color'])
# Explore airjordan subbrand in Nike
# AJ Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, -2, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[0],cmap2[-1],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(aj_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.4)
plt.title('Nike: Off-White Air Jordan', fontsize = 'large')
plt.xlabel('Days Since Release')
plt.ylabel('Price Premium')
plt.legend()
plt.show()
# Presto Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, 0, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap1[0],cmap1[7],cmap1[4],'brown']
for i, (name, group) in enumerate(presto_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Nike: Off-White Presto', fontsize = 'large')
plt.xlabel('Days Since Release')
plt.ylabel('Price Premium')
plt.legend()
plt.show()
# Zoom Plot
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto', extent=(-20, 500, -2, 8), zorder=-1,alpha = 0.4)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap1[7],cmap1[4],cmap1[0]]
for i, (name, group) in enumerate(zoom_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Nike: Off-White Zoom', fontsize = 'large')
plt.xlabel('Days Since Release')
plt.ylabel('Price Premium')
plt.legend()
plt.show()
```
### Explore Yeezy Sneakers
```
yeezy= timing.loc[timing['brand'] == 'yeezy']
img2 = plt.imread('../data/media/yeezy.jpg')
yeezy_color = yeezy.groupby(['color'])
# Plot
fig, ax = plt.subplots()
ax.imshow(img2, aspect='auto', extent=(-5, 1500, -2, 12), zorder=-1,alpha = 0.5)
ax.yaxis.tick_left()
#ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
cmap1 = sns.color_palette("Paired")
cmap2 = sns.color_palette("Set2")
colors = [cmap1[1],cmap2[-1],cmap1[-1],cmap1[4],cmap1[0]]
for i, (name, group) in enumerate(yeezy_color):
ax.plot(group.days_since_release, group.Pct_change, marker='o', linestyle='',
c = colors[i], ms=4, label=name, alpha = 0.3)
plt.title('Adidas: Yeezy', fontsize = 'large')
plt.xlabel('Days Since Release')
plt.ylabel('Price Premium')
plt.legend()
plt.show()
plt.scatter(x = yeezy['week_since_release'], y = yeezy['Pct_change'], c=sns.color_palette("YlGnBu")[1], alpha=0.5)
plt.title('Yeezy: Price premium on Weeks since release')
plt.xlabel('weeks since release')
plt.ylabel('price premium')
plt.show()
```
| github_jupyter |
# SR-SAN
> Session-based Recommendation with Self-Attention Networks.
Session-based recommendation aims to predict user's next behavior from current session and previous anonymous sessions. Capturing long-range dependencies between items is a vital challenge in session-based recommendation. A novel approach is proposed for session-based recommendation with self-attention networks (SR-SAN) as a remedy. The self-attention networks (SAN) allow SR-SAN capture the global dependencies among all items of a session regardless of their distance. In SR-SAN, a single item latent vector is used to capture both current interest and global interest instead of session embedding which is composed of current interest embedding and global interest embedding. Some experiments have been performed on some open benchmark datasets. Experimental results show that the proposed method outperforms some state-of-the-arts by comparisons.
Firstly, a self-attention based model which captures and reserves the full dependencies among all items regardless of their distance is proposed without using RNNs or GNNs. Secondly, to generate session-based recommendations, the proposed method use a single item latent vector which jointly represents current interest and global interest instead of session embedding which is composed of current interest embedding and global interest embedding. In RNNs or GNNs based methods, the global interest embedding usually obtained by aggregating all items in the session with attention mechanism which is based on current interest embedding. However, this is redundant in SR-SAN which last item embedding is aggregating all items in the session with self-attention mechanism. In this way, the last item embedding in session can jointly represent current interest and global interest.
It utilizes the self-attention to learn global item dependencies. The multi-head attention mechanism is adopted to allow SR-SAN focus on different important part of the session. The latent vector of the last item in the session is used to jointly represents current interest and global interest with prediction layer.
## Architecture
<p><center><img src='_images/C306687_1.png'></p></center>
Session-based recommender system makes prediction based upon current user sessions data without accessing to the long-term preference profile. Let V = \{v_1, v_2, . . ., v_{|V|}\} denote the set consisting of all unique items involved in all the sessions. An anonymous session sequence S can be represented by a list S = [s_1, s_2, . . ., s_n], where s_i ∈ V represents a clicked item of the user within the session S. The task of session-based recommendation is to predict the next click s_{n+1} for session S. Our models are constructed and trained as a classifier that learns to generate a score for each of the candidates in V. Let \hat{y} = \{\hat{y}_1, \hat{y}_2, . . ., \hat{y}_{|V|}\} denote the output score vector, where \hat{y}_i corresponds to the score of item v_i. The items with top-K values in \hat{y} will be the candidate items for recommendation.
The proposed model is made up of two parts. The first part is obtaining item latent vectors with self-attention networks, the second part of the proposed model is making recommendation with prediction layer.
## Performance
<p><center><img src='_images/C306687_2.png'></p></center>
| github_jupyter |
# <b>Object Detection with AutoML Vision</b>
<br>
## <b>Learning Objectives</b> ##
1. Learn how to create and import an image dataset to AutoML Vision
1. Learn how to train an AutoML object detection model
1. Learn how to evaluate a model trained with AutoML
1. Learn how to deploy a model trained with AutoML
1. Learn how to predict on new test data with AutoML
In this notebook we will use AutoML Vision Object Detection to train a machine learning model capable of detecting multiple objects in a given image and provides information about the objects and their location within the image.
We will start by creating a dataset for AutoML Vision and then import a publicly available set of images into it. After that we will train, evaluate and deploy the AutoML model trained for this dataset. Ultimately we show how to send prediction requests to our model through the deployed API.
## <b>AutoML Vision Setup</b> ##
Before we begin make sure you have [created a project on the GCP Console](https://cloud.google.com/vision/automl/object-detection/docs/before-you-begin) and enabled the AutoML and Cloud Storage APIs
### <b> Install AutoML and Cloud Storage package </b> ###
<b>Caution: Run the following command and restart the kernel afterwards.</b>
```
pip freeze | grep google-cloud-automl==1.0.1 || pip install google-cloud-automl==1.0.1
pip freeze | grep google-cloud-storage==1.27.0 || pip install google-cloud-storage==1.27.0
import os
import tensorflow as tf
from google.cloud import automl
```
<br>
### <b>Set the correct environment variables </b> ###
The following variables should be updated according to your own environment:
```
PROJECT_ID = "YOUR_PROJECT_ID" # Replace with your PROJECT ID
SERVICE_ACCOUNT = (
"YOUR_SERVICE_ACCOUNT_NAME" # Replace with a name of your choice
)
ZONE = "us-central1" # Make sure the zone is set to "us-central1"
```
<br>
The following variables are computed from the one you set above, and should not be modified:
```
PWD = os.path.abspath(os.path.curdir)
SERVICE_KEY_PATH = os.path.join(PWD, f"{SERVICE_ACCOUNT}.json")
SERVICE_ACCOUNT_EMAIL = "{}@{}.iam.gserviceaccount.com".format(
SERVICE_ACCOUNT, PROJECT_ID
)
print(SERVICE_ACCOUNT_EMAIL)
print(PROJECT_ID)
# Exporting the variables into the environment to make them available to all the subsequent cells
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["SERVICE_ACCOUNT"] = SERVICE_ACCOUNT
os.environ["SERVICE_KEY_PATH"] = SERVICE_KEY_PATH
os.environ["SERVICE_ACCOUNT_EMAIL"] = SERVICE_ACCOUNT_EMAIL
os.environ["ZONE"] = ZONE
```
<br>
### <b>Switching the right project and zone</b> ###
```
%%bash
gcloud config set project $PROJECT_ID
gcloud config set compute/region $ZONE
```
<br>
### <b>Create a service account and generate service key</b> ###
Before we can run our program we need to get it authenticated. For that, we first need to generate a service account.
A service account is a special type of Google account intended for non-human users (i.e., services) that need to authenticate and be authorized to access data through Google APIs (in our case the AutoML and Cloud Storage API). After the service account has been created it needs to be associated with a service account key, which is a json file holding everything that the client needs to authenticate with the service endpoint.
```
%%bash
gcloud iam service-accounts list | grep $SERVICE_ACCOUNT ||
gcloud iam service-accounts create $SERVICE_ACCOUNT
%%bash
test -f $SERVICE_KEY_PATH ||
gcloud iam service-accounts keys create $SERVICE_KEY_PATH \
--iam-account $SERVICE_ACCOUNT_EMAIL
echo "Service key: $(ls $SERVICE_KEY_PATH)"
```
<br>
### <b>Make the key available to google clients for authentication</b> ###
AutoML API will check this environement variable to see where the key is located and use it to authenticate
```
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = SERVICE_KEY_PATH
```
<br>
### <b>Grant service account required role permissions</b> ###
After we have created our service account and associated it with the service key we need to assign some permissions through a role. For this example we only need to grant our service account the automl and storage admin role so it has permission to complete specific actions on the resources of your project.
```
%%bash
gcloud projects add-iam-policy-binding $PROJECT_ID \
--member "serviceAccount:$SERVICE_ACCOUNT_EMAIL" \
--role "roles/automl.admin" \
--role "roles/storage.admin"
```
<br>
## <b>Step 1: Preparing and formatting training data</b> ##
The first step in creating a custom model with the AutoML Vision is to prepare the training data. In this case the training dataset that is composed of images along with information identifying the location (through bounding boxes coordinates) and type of objects (through labels) in the images.
Here are some constraints some general rules for preparing an AutoML object detection dataset:
* The following image formats are supported: JPEG, PNG, GIF, BMP, or ICO. Maximum file size is 30MB per image.
* AutoML Vision models can not generally predict labels that humans can't assign. So, if a human can't be trained to assign labels by looking at the image for 1-2 seconds, the model likely can't be trained to do it either.
* It is recommended to have about 1000 training images per label (i.e. object type you want to detect in the images). For each label you must have at least 10 images, each with at least one annotation (bounding box and the label). In general, the more images per label you have the better your model will perform.
<br>
### <b>Training vs. evaluation datasets</b> ###
When training machine learning models you typically divide the dataset usually into three separate datasets:
1. a training dataset
1. a validation dataset
1. a test dataset
A training dataset is used to build a model. The model being trained tries multiple parameters while searching for patterns in the training data. During the process of pattern identification, AutoML Vision Object Detection uses the validation dataset to test the parameters of the model. AutoML Vision Object Detection chooses the best-performing algorithms and patterns from all options identified during the training stage.
After the best performing algorithms and patterns have been identified, they are tested for error rate, quality, and accuracy using the test dataset.
Both a validation and a test dataset are used in order to avoid bias in the model. During the validation stage, optimal model parameters are used. Using these optimal model parameters can result in biased metrics. Using the test dataset to assess the quality of the model after the validation stage provides the training process with an unbiased assessment of the quality of the model.
By default, AutoML Vision Object Detection splits your dataset randomly into 3 separate sets (you don't need to do it yourself!):
* 80% of images are used for training.
* 10% of images are used for hyper-parameter tuning and/or to decide when to stop training.
* 10% of images are used for evaluating the model. These images are not used in training.
<br>
### <b>Create a CSV file with image URIs and labels</b> ###
Once your image files have been uploaded to a Cloud Storage bucket (`gs://bucket-name-vcm`), you must create a CSV file that lists all of the URI of the uploaded images, along with bounding box information and the object labels. The dataset will contain one row per bounding box in the image, so an image that has two bounding boxes will have two corresponding rows in the CSV file sharing the same image URI. The CSV file can have any filename, must be in the same bucket as your image files, must be UTF-8 encoded, and must end with a `.csv` extension.
In the example below, rows 1 and 2 reference the same image that has 2 annotations
`(car,0.1,0.1,,,0.3,0.3,,)` and `(bike,.7,.6,,,.8,.9,,)`. The first element of the annotation
is the object label in the bounding box, while the rest are the coordinates of the bounding box
within the image (see below for details).
Row 3 refers to an image that has only 1 annotation `(car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3)`, while row 4 references an image with no annotations.
The first column corresponds to the data split, the second column to the image URI, and the last columns hold the annotations.
**Example:**
```bash
TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
TEST,gs://folder/im3.png,,,,,,,,,
```
Each row above has these columns:
`
1. <b>Which dataset is the content in the row being assigned to.</b> - `TRAIN`, `VALIDATE`, `TEST` or `UNASSIGNED`
1. <b>What content is being annotated.</b> - It contains the GCS URI for the image
1. <b>A label that identifies how the object is categorized.
1. <b>A bounding box for an object in the image.</b>
The **bounding box** for an object can be specified in two ways:
* with only 2 vertices (consisting of a set of x and y coordinates) if they are diagonally opposite points of the rectangle
```
(x_relative_min,y_relative_min,,,x_relative_max,y_relative_max,,)
```
* with all 4 vertices
```
(x_relative_min,y_relative_min,x_relative_max,y_relative_min,x_relative_max,y_relative_max,x_relative_min,y_relative_max)
```
Each vertex is specified by x, y coordinate values. These coordinates must be a float in the 0 to 1 range, where 0 represents the minimum x or y value, and 1 represents the greatest x or y value.
For example, `(0,0)` represents the top left corner, and `(1,1)` represents the bottom right corner; a bounding box for the entire image is expressed as `(0,0,,,1,1,,)`, or `(0,0,1,0,1,1,0,1)`.
AutoML API does not require a specific vertex ordering. Additionally, if 4 specified vertices don't form a rectangle parallel to image edges, AutoML API calculates and uses vertices that do form such a rectangle.
### Generating a CSV file for unlabeled images stored in Cloud Storage ###
If you already have unlabeled images uploaded to Cloud Storage and would like to generate a CSV pointing to them, run this code in Cloud Shell:
```
for f in $(gsutil ls gs://YOUR_BUCKET/YOUR_IMAGES_FOLDER/);
do echo UNASSIGNED,$f;
done >> labels.csv;
```
Then copy the generated CSV file into a Google Storage Bucket:
```gsutil cp labels.csv gs://YOUR_BUCKET/labels.csv```
Then after uploading the images to AutoML Object Detection, you can use Cloud Vision API's [Object Localizer](https://cloud.google.com/vision/docs/object-localizer) feature to help build your dataset by getting more generalized labels and bounding boxes for objects in an image.
<br>
## <b>Step 2: Create a dataset</b> ##
Next step is to create and name an empty dataset that will eventually hold the training data for the model.
```
DATASET_NAME = "salad_dataset" # Replace with desired dataset name
client = automl.AutoMlClient()
# A resource that represents Google Cloud Platform location.
project_location = client.location_path(PROJECT_ID, ZONE)
metadata = automl.types.ImageObjectDetectionDatasetMetadata()
dataset = automl.types.Dataset(
display_name=display_name,
image_object_detection_dataset_metadata=metadata,
)
# Create a dataset with the dataset metadata in the region.
response = client.create_dataset(project_location, dataset)
created_dataset = response.result()
# Display the dataset information
print(f"Dataset name: {created_dataset.name}")
print("Dataset id: {}".format(created_dataset.name.split("/")[-1]))
```
<br>
## <b>Step 3: Import images into a dataset</b> ##
After you have created a dataset, prepared and formated your training data, it's time to import that training data into our created dataset.
In this notebook we will use a publicly available "Salads" training dataset that is located at `gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv`.
This dataset contains images of salads with bounding boxes and labels around their ingredients (e.g., tomato, seafood, etc.).
So the model we will train will be able to take as input the image of a salad and determine the ingredients composing the salad
as well as the location of the ingredients on the salad image.
Please note the import might take a couple of minutes to finish depending on the file size.
```
DATASET_ID = format(created_dataset.name.split("/")[-1])
DATASET_URI = "gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv"
# Get the full path of the dataset.
dataset_full_id = client.dataset_path(PROJECT_ID, ZONE, DATASET_ID)
# Get the multiple Google Cloud Storage URIs
input_uris = path.split(",")
gcs_source = automl.types.GcsSource(input_uris=input_uris)
input_config = automl.types.InputConfig(gcs_source=gcs_source)
# Import data from the input URI
response = client.import_data(dataset_full_id, input_config)
print("Processing import...")
print(f"Data imported. {response.result()}")
```
<br>
## <b>Step 4: Train your AutoML Vision model</b> ##
Once you are happy with your created dataset you can proceed with training the model. <i>Please note</i> - training time takes approximately <b>1-3h</b>
```
MODEL_NAME = "salads" # Replace with desired model name
# A resource that represents Google Cloud Platform location.
project_location = client.location_path(PROJECT_ID, ZONE)
# Leave model unset to use the default base model provided by Google
# train_budget_milli_node_hours: The actual train_cost will be equal or
# less than this value.
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodelmetadata
training_metadata = automl.types.ImageObjectDetectionModelMetadata(
train_budget_milli_node_hours=24000
)
model = automl.types.Model(
display_name=display_name,
dataset_id=dataset_id,
image_object_detection_model_metadata=metadata,
)
# Create a model with the model metadata in the region.
training_results = client.create_model(project_location, model)
print(f"Training operation name: {response.operation.name}")
print("Training started...")
```
<br>
### <b>Information about the trained model</b> ###
```
MODEL_ID = format(model.name.split("/")[-1])
# Get the full path of the model.
model_full_id = client.model_path(PROJECT_ID, ZONE, MODEL_ID)
model = client.get_model(model_full_id)
# Retrieve deployment state.
if model.deployment_state == automl.enums.Model.DeploymentState.DEPLOYED:
deployment_state = "deployed"
else:
deployment_state = "undeployed"
# Display the model information.
print(f"Model name: {model.name}")
print("Model id: {}".format(model.name.split("/")[-1]))
print(f"Model display name: {model.display_name}")
print("Model create time:")
print(f"\tseconds: {model.create_time.seconds}")
print(f"\tnanos: {model.create_time.nanos}")
print(f"Model deployment state: {deployment_state}")
```
<br>
## <b>Step 5: Evaluate the model</b> ##
After training a model, Cloud AutoML Vision Object Detection uses images from the TEST image set to evaluate the quality and accuracy of the new model.
It provides an aggregate set of evaluation metrics indicating how well the model performs overall, as well as evaluation metrics for each category label, indicating how well the model performs for that label.
By running the cell below you can list evaluation metrics for that model.
```
print("List of model evaluations:")
for evaluation in client.list_model_evaluations(model_full_id, ""):
print(f"Model evaluation name: {evaluation.name}")
print(f"Model annotation spec id: {evaluation.annotation_spec_id}")
print("Create Time:")
print(f"\tseconds: {evaluation.create_time.seconds}")
print(f"\tnanos: {evaluation.create_time.nanos / 1e9}")
print(f"Evaluation example count: {evaluation.evaluated_example_count}")
print(
"Object detection model evaluation metrics: {}\n\n".format(
evaluation.image_object_detection_evaluation_metrics
)
)
```
<br>
## <b>Step 6: Deploy the model</b> ##
Once we are happy with the performance of our trained model, we can deploy it so that it will be
available for predictions through an API.
```
response = client.deploy_model(model_full_id)
print(f"Model deployment finished. {response.result()}")
```
<br>
## <b>Step 7: Send prediction request</b> ##
In this example we will invoke an individual prediction from an image that is stored in our project's Cloud storage bucket.
Object detection models output many bounding boxes for an input image. For the output we are expecting that each box comes with:
1. a label and
1. a score of confidence.
```
TEST_IMAGE_PATH = "gs://your-bucket-name-vcm/your-folder-name/your-image.jpg" # Replace with a Cloud storage bucket uploaded image of your choice
prediction_client = automl.PredictionServiceClient()
# Read the file.
with tf.io.gfile.GFile(TEST_IMAGE_PATH, "rb") as content_file:
content = content_file.read()
image = automl.types.Image(image_bytes=content)
payload = automl.types.ExamplePayload(image=image)
# params is additional domain-specific parameters.
# score_threshold is used to filter the result
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
params = {"score_threshold": "0.8"}
response = prediction_client.predict(model_full_id, payload, params)
```
Now that we have the response object from the deployed model, we can inspect its predictions (i.e., the
bounding boxes and objects that the model has detected from the images we sent to it in the cell above):
```
print("Prediction results:")
for result in response.payload:
print(f"Predicted class name: {result.display_name}")
print(f"Predicted class score: {result.image_object_detection.score}")
bounding_box = result.image_object_detection.bounding_box
print("Normalized Vertices:")
for vertex in bounding_box.normalized_vertices:
print(f"\tX: {vertex.x}, Y: {vertex.y}")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/2_transfer_learning_roadmap/3_effect_of_number_of_classes_in_dataset/3)%20Understand%20transfer%20learning%20and%20the%20role%20of%20number%20of%20dataset%20classes%20in%20it%20-%20Keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Goals
### 1. Visualize deep learning network
### 2. Understand how the final layer would change when number of classes in dataset changes
# What do you do with a deep learning model in transfer learning
- These are the steps already done by contributors in pytorch, keras and mxnet
- You take a deep learning architecture, such as resnet, densenet, or even custom network
- Train the architecture on large datasets such as imagenet, coco, etc
- The trained wieghts become your starting point for transfer learning
- The final layer of this pretrained model has number of neurons = number of classes in the large dataset
- In transfer learning
- You take the network and load the pretrained weights on the network
- Then remove the final layer that has the extra(or less) number of neurons
- You add a new layer with number of neurons = number of classes in your custom dataset
- Optionally you can add more layers in between this newly added final layer and the old network
- Now you have two parts in your network
- One that already existed, the pretrained one, the base network
- The new sub-network or a single layer you added
- The hyper-parameter we can see here: Freeze base network
- Freezing base network makes the base network untrainable
- The base network now acts as a feature extractor and only the next half is trained
- If you do not freeze the base network the entire network is trained
(You will take this part in next sessions)
# Table of Contents
## [0. Install](#0)
## [1. Setup Default Params with Cats-Dogs dataset](#1)
## [2. Visualize network](#2)
## [3. Reset Default Params with new dataset - Logo classification](#3)
## [4. Visualize the new network](#4)
<a id='0'></a>
# Install Monk
- git clone https://github.com/Tessellate-Imaging/monk_v1.git
- cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
- (Select the requirements file as per OS and CUDA version)
```
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# If using Colab install using the commands below
!cd monk_v1/installation/Misc && pip install -r requirements_colab.txt
# If using Kaggle uncomment the following command
#!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt
# Select the requirements file as per OS and CUDA version when using a local system or cloud
#!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
```
## Dataset - Sample
- one having 2 classes
- other having 16 classes
```
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1jE-ckk0JbrdbJvIBaKMJWkTfbRDR2MaF' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1jE-ckk0JbrdbJvIBaKMJWkTfbRDR2MaF" -O study_classes.zip && rm -rf /tmp/cookies.txt
! unzip -qq study_classes.zip
```
# Imports
```
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using keras backend
from keras_prototype import prototype
```
### Creating and managing experiments
- Provide project name
- Provide experiment name
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "study-num-classes");
```
### This creates files and directories as per the following structure
workspace
|
|--------Project
|
|-----study-num-classes
|
|-----experiment-state.json
|
|-----output
|
|------logs (All training logs and graphs saved here)
|
|------models (all trained models saved here)
<a id='1'></a>
# Setup Default Params with Cats-Dogs dataset
```
gtf.Default(dataset_path="study_classes/dogs_vs_cats",
model_name="resnet50",
num_epochs=5);
```
### From Data summary - Num classes: 2
<a id='2'></a>
# Visualize network
```
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8081);
```
## The final layer
```
from IPython.display import Image
Image(filename='imgs/2_classes_base_keras.png')
```
<a id='3'></a>
# Reset Default Params with new dataset - Logo classification
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "study-num-classes");
gtf.Default(dataset_path="study_classes/logos",
model_name="resnet50",
num_epochs=5);
```
### From Data summary - Num classes: 16
<a id='4'></a>
# Visualize network
```
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8082);
```
## The final layer
```
from IPython.display import Image
Image(filename='imgs/16_classes_base_keras.png')
```
| github_jupyter |
<div align="center">
<h1><img width="30" src="https://madewithml.com/static/images/rounded_logo.png"> <a href="https://madewithml.com/">Made With ML</a></h1>
Applied ML · MLOps · Production
<br>
Join 30K+ developers in learning how to responsibly <a href="https://madewithml.com/about/">deliver value</a> with ML.
<br>
</div>
<br>
<div align="center">
<a target="_blank" href="https://newsletter.madewithml.com"><img src="https://img.shields.io/badge/Subscribe-30K-brightgreen"></a>
<a target="_blank" href="https://github.com/GokuMohandas/MadeWithML"><img src="https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star"></a>
<a target="_blank" href="https://www.linkedin.com/in/goku"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
<a target="_blank" href="https://twitter.com/GokuMohandas"><img src="https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social"></a>
<br>
🔥 Among the <a href="https://github.com/topics/deep-learning" target="_blank">top ML</a> repositories on GitHub
</div>
<br>
<hr>
# Convolutional Neural Networks (CNN)
In this lesson we will explore the basics of Convolutional Neural Networks (CNNs) applied to text for natural language processing (NLP) tasks.
<div align="left">
<a target="_blank" href="https://madewithml.com/courses/foundations/convolutional-neural-networks/"><img src="https://img.shields.io/badge/📖 Read-blog post-9cf"></a>
<a href="https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/11_Convolutional_Neural_Networks.ipynb" role="button"><img src="https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d"></a>
<a href="https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/11_Convolutional_Neural_Networks.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
</div>
# Overview
At the core of CNNs are filters (aka weights, kernels, etc.) which convolve (slide) across our input to extract relevant features. The filters are initialized randomly but learn to act as feature extractors via parameter sharing.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/convolution.gif" width="500">
</div>
* **Objective:** Extract meaningful spatial substructure from encoded data.
* **Advantages:**
* Small number of weights (shared)
* Parallelizable
* Detects spatial substrcutures (feature extractors)
* [Interpretability](https://arxiv.org/abs/1312.6034) via filters
* Can be used for processing in images, text, time-series, etc.
* **Disadvantages:**
* Many hyperparameters (kernel size, strides, etc.) to tune.
* **Miscellaneous:**
* Lot's of deep CNN architectures constantly updated for SOTA performance.
* Very popular feature extractor that's usually prepended onto other architectures.
# Set up
```
import numpy as np
import pandas as pd
import random
import torch
import torch.nn as nn
SEED = 1234
def set_seeds(seed=1234):
"""Set seeds for reproducibility."""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi-GPU
# Set seeds for reproducibility
set_seeds(seed=SEED)
# Set device
cuda = True
device = torch.device('cuda' if (
torch.cuda.is_available() and cuda) else 'cpu')
torch.set_default_tensor_type('torch.FloatTensor')
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
print (device)
```
## Load data
We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120K text samples from 4 unique classes (`Business`, `Sci/Tech`, `Sports`, `World`)
```
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/news.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
```
## Preprocessing
We're going to clean up our input data first by doing operations such as lower text, removing stop (filler) words, filters using regular expressions, etc.
```
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import re
nltk.download('stopwords')
STOPWORDS = stopwords.words('english')
print (STOPWORDS[:5])
porter = PorterStemmer()
def preprocess(text, stopwords=STOPWORDS):
"""Conditional preprocessing on our text unique to our task."""
# Lower
text = text.lower()
# Remove stopwords
pattern = re.compile(r'\b(' + r'|'.join(stopwords) + r')\b\s*')
text = pattern.sub('', text)
# Remove words in paranthesis
text = re.sub(r'\([^)]*\)', '', text)
# Spacing and filters
text = re.sub(r"([-;;.,!?<=>])", r" \1 ", text)
text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars
text = re.sub(' +', ' ', text) # remove multiple spaces
text = text.strip()
return text
# Sample
text = "Great week for the NYSE!"
preprocess(text=text)
# Apply to dataframe
preprocessed_df = df.copy()
preprocessed_df.title = preprocessed_df.title.apply(preprocess)
print (f"{df.title.values[0]}\n\n{preprocessed_df.title.values[0]}")
```
> If you have preprocessing steps like standardization, etc. that are calculated, you need to separate the training and test set first before applying those operations. This is because we cannot apply any knowledge gained from the test set accidentally (data leak) during preprocessing/training. However for global preprocessing steps like the function above where we aren't learning anything from the data itself, we can perform before splitting the data.
## Split data
```
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Data
X = preprocessed_df["title"].values
y = preprocessed_df["category"].values
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
```
## LabelEncoder
Next we'll define a `LabelEncoder` to encode our text labels into unique indices
```
import itertools
class LabelEncoder(object):
"""Label encoder for tag labels."""
def __init__(self, class_to_index={}):
self.class_to_index = class_to_index
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
def __len__(self):
return len(self.class_to_index)
def __str__(self):
return f"<LabelEncoder(num_classes={len(self)})>"
def fit(self, y):
classes = np.unique(y)
for i, class_ in enumerate(classes):
self.class_to_index[class_] = i
self.index_to_class = {v: k for k, v in self.class_to_index.items()}
self.classes = list(self.class_to_index.keys())
return self
def encode(self, y):
encoded = np.zeros((len(y)), dtype=int)
for i, item in enumerate(y):
encoded[i] = self.class_to_index[item]
return encoded
def decode(self, y):
classes = []
for i, item in enumerate(y):
classes.append(self.index_to_class[item])
return classes
def save(self, fp):
with open(fp, 'w') as fp:
contents = {'class_to_index': self.class_to_index}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, 'r') as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
# Encode
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
NUM_CLASSES = len(label_encoder)
label_encoder.class_to_index
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.encode(y_train)
y_val = label_encoder.encode(y_val)
y_test = label_encoder.encode(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
```
# Tokenizer
Our input data is text and we can't feed it directly to our models. So, we'll define a `Tokenizer` to convert our text input data into token indices. This means that every token (we can decide what a token is char, word, sub-word, etc.) is mapped to a unique index which allows us to represent our text as an array of indices.
```
import json
from collections import Counter
from more_itertools import take
class Tokenizer(object):
def __init__(self, char_level, num_tokens=None,
pad_token='<PAD>', oov_token='<UNK>',
token_to_index=None):
self.char_level = char_level
self.separator = '' if self.char_level else ' '
if num_tokens: num_tokens -= 2 # pad + unk tokens
self.num_tokens = num_tokens
self.pad_token = pad_token
self.oov_token = oov_token
if not token_to_index:
token_to_index = {pad_token: 0, oov_token: 1}
self.token_to_index = token_to_index
self.index_to_token = {v: k for k, v in self.token_to_index.items()}
def __len__(self):
return len(self.token_to_index)
def __str__(self):
return f"<Tokenizer(num_tokens={len(self)})>"
def fit_on_texts(self, texts):
if not self.char_level:
texts = [text.split(" ") for text in texts]
all_tokens = [token for text in texts for token in text]
counts = Counter(all_tokens).most_common(self.num_tokens)
self.min_token_freq = counts[-1][1]
for token, count in counts:
index = len(self)
self.token_to_index[token] = index
self.index_to_token[index] = token
return self
def texts_to_sequences(self, texts):
sequences = []
for text in texts:
if not self.char_level:
text = text.split(' ')
sequence = []
for token in text:
sequence.append(self.token_to_index.get(
token, self.token_to_index[self.oov_token]))
sequences.append(np.asarray(sequence))
return sequences
def sequences_to_texts(self, sequences):
texts = []
for sequence in sequences:
text = []
for index in sequence:
text.append(self.index_to_token.get(index, self.oov_token))
texts.append(self.separator.join([token for token in text]))
return texts
def save(self, fp):
with open(fp, 'w') as fp:
contents = {
'char_level': self.char_level,
'oov_token': self.oov_token,
'token_to_index': self.token_to_index
}
json.dump(contents, fp, indent=4, sort_keys=False)
@classmethod
def load(cls, fp):
with open(fp, 'r') as fp:
kwargs = json.load(fp=fp)
return cls(**kwargs)
```
We're going to restrict the number of tokens in our `Tokenizer` to the top 500 most frequent tokens (stop words already removed) because the full vocabulary size (~30K) is too large to run on Google Colab notebooks.
> It's important that we only fit using our train data split because during inference, our model will not always know every token so it's important to replicate that scenario with our validation and test splits as well.
```
# Tokenize
tokenizer = Tokenizer(char_level=False, num_tokens=500)
tokenizer.fit_on_texts(texts=X_train)
VOCAB_SIZE = len(tokenizer)
print (tokenizer)
# Sample of tokens
print (take(5, tokenizer.token_to_index.items()))
print (f"least freq token's freq: {tokenizer.min_token_freq}") # use this to adjust num_tokens
# Convert texts to sequences of indices
X_train = tokenizer.texts_to_sequences(X_train)
X_val = tokenizer.texts_to_sequences(X_val)
X_test = tokenizer.texts_to_sequences(X_test)
preprocessed_text = tokenizer.sequences_to_texts([X_train[0]])[0]
print ("Text to indices:\n"
f" (preprocessed) → {preprocessed_text}\n"
f" (tokenized) → {X_train[0]}")
```
# One-hot encoding
One-hot encoding creates a binary column for each unique value for the feature we're trying to map. All of the values in each token's array will be 0 except at the index that this specific token is represented by.
There are 5 words in the vocabulary:
```json
{
"a": 0,
"e": 1,
"i": 2,
"o": 3,
"u": 4
}
```
Then the text `aou` would be represented by:
```python
[[1. 0. 0. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]]
```
One-hot encoding allows us to represent our data in a way that our models can process the data and isn't biased by the actual value of the token (ex. if your labels were actual numbers).
> We have already applied one-hot encoding in the previous lessons when we encoded our labels. Each label was represented by a unique index but when determining loss, we effectively use it's one hot representation and compared it to the predicted probability distribution. We never explicitly wrote this out since all of our previous tasks were multi-class which means every input had just one output class, so the 0s didn't affect the loss (though it did matter during back propagation).
```
def to_categorical(seq, num_classes):
"""One-hot encode a sequence of tokens."""
one_hot = np.zeros((len(seq), num_classes))
for i, item in enumerate(seq):
one_hot[i, item] = 1.
return one_hot
# One-hot encoding
print (X_train[0])
print (len(X_train[0]))
cat = to_categorical(seq=X_train[0], num_classes=len(tokenizer))
print (cat)
print (cat.shape)
# Convert tokens to one-hot
vocab_size = len(tokenizer)
X_train = [to_categorical(seq, num_classes=vocab_size) for seq in X_train]
X_val = [to_categorical(seq, num_classes=vocab_size) for seq in X_val]
X_test = [to_categorical(seq, num_classes=vocab_size) for seq in X_test]
```
# Padding
Our inputs are all of varying length but we need each batch to be uniformly shaped. Therefore, we will use padding to make all the inputs in the batch the same length. Our padding index will be 0 (note that this is consistent with the `<PAD>` token defined in our `Tokenizer`).
> One-hot encoding creates a batch of shape (`N`, `max_seq_len`, `vocab_size`) so we'll need to be able to pad 3D sequences.
```
def pad_sequences(sequences, max_seq_len=0):
"""Pad sequences to max length in sequence."""
max_seq_len = max(max_seq_len, max(len(sequence) for sequence in sequences))
num_classes = sequences[0].shape[-1]
padded_sequences = np.zeros((len(sequences), max_seq_len, num_classes))
for i, sequence in enumerate(sequences):
padded_sequences[i][:len(sequence)] = sequence
return padded_sequences
# 3D sequences
print (X_train[0].shape, X_train[1].shape, X_train[2].shape)
padded = pad_sequences(X_train[0:3])
print (padded.shape)
```
# Dataset
We're going to place our data into a [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) and use a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) to efficiently create batches for training and evaluation.
```
FILTER_SIZE = 1 # unigram
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y, max_filter_size):
self.X = X
self.y = y
self.max_filter_size = max_filter_size
def __len__(self):
return len(self.y)
def __str__(self):
return f"<Dataset(N={len(self)})>"
def __getitem__(self, index):
X = self.X[index]
y = self.y[index]
return [X, y]
def collate_fn(self, batch):
"""Processing on a batch."""
# Get inputs
batch = np.array(batch, dtype=object)
X = batch[:, 0]
y = np.stack(batch[:, 1], axis=0)
# Pad sequences
X = pad_sequences(X, max_seq_len=self.max_filter_size)
# Cast
X = torch.FloatTensor(X.astype(np.int32))
y = torch.LongTensor(y.astype(np.int32))
return X, y
def create_dataloader(self, batch_size, shuffle=False, drop_last=False):
return torch.utils.data.DataLoader(
dataset=self, batch_size=batch_size, collate_fn=self.collate_fn,
shuffle=shuffle, drop_last=drop_last, pin_memory=True)
# Create datasets for embedding
train_dataset = Dataset(X=X_train, y=y_train, max_filter_size=FILTER_SIZE)
val_dataset = Dataset(X=X_val, y=y_val, max_filter_size=FILTER_SIZE)
test_dataset = Dataset(X=X_test, y=y_test, max_filter_size=FILTER_SIZE)
print ("Datasets:\n"
f" Train dataset:{train_dataset.__str__()}\n"
f" Val dataset: {val_dataset.__str__()}\n"
f" Test dataset: {test_dataset.__str__()}\n"
"Sample point:\n"
f" X: {test_dataset[0][0]}\n"
f" y: {test_dataset[0][1]}")
# Create dataloaders
batch_size = 64
train_dataloader = train_dataset.create_dataloader(batch_size=batch_size)
val_dataloader = val_dataset.create_dataloader(batch_size=batch_size)
test_dataloader = test_dataset.create_dataloader(batch_size=batch_size)
batch_X, batch_y = next(iter(test_dataloader))
print ("Sample batch:\n"
f" X: {list(batch_X.size())}\n"
f" y: {list(batch_y.size())}\n"
"Sample point:\n"
f" X: {batch_X[0]}\n"
f" y: {batch_y[0]}")
```
# CNN
## Inputs
We're going to learn about CNNs by applying them on 1D text data. In the dummy example below, our inputs are composed of character tokens that are one-hot encoded. We have a batch of N samples, where each sample has 8 characters and each character is represented by an array of 10 values (`vocab size=10`). This gives our inputs the size `(N, 8, 10)`.
> With PyTorch, when dealing with convolution, our inputs (X) need to have the channels as the second dimension, so our inputs will be `(N, 10, 8)`.
```
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# Assume all our inputs are padded to have the same # of words
batch_size = 64
max_seq_len = 8 # words per input
vocab_size = 10 # one hot size
x = torch.randn(batch_size, max_seq_len, vocab_size)
print(f"X: {x.shape}")
x = x.transpose(1, 2)
print(f"X: {x.shape}")
```
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/inputs.png" width="500">
</div>
This diagram above is for char-level tokens but extends to any level of tokenization (word-level in our case).
## Filters
At the core of CNNs are filters (aka weights, kernels, etc.) which convolve (slide) across our input to extract relevant features. The filters are initialized randomly but learn to pick up meaningful features from the input that aid in optimizing for the objective. The intuition here is that each filter represents a feature and we will use this filter on other inputs to capture the same feature (feature extraction via parameter sharing).
We can see convolution in the diagram below where we simplified the filters and inputs to be 2D for ease of visualization. Also note that the values are 0/1s but in reality they can be any floating point value.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/convolution.gif" width="500">
</div>
Now let's return to our actual inputs `x`, which is of shape (8, 10) [`max_seq_len`, `vocab_size`] and we want to convolve on this input using filters. We will use 50 filters that are of size (1, 3) and has the same depth as the number of channels (`num_channels` = `vocab_size` = `one_hot_size` = 10). This gives our filter a shape of (3, 10, 50) [`kernel_size`, `vocab_size`, `num_filters`]
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/filters.png" width="500">
</div>
* **stride**: amount the filters move from one convolution operation to the next.
* **padding**: values (typically zero) padded to the input, typically to create a volume with whole number dimensions.
So far we've used a `stride` of 1 and `VALID` padding (no padding) but let's look at an example with a higher stride and difference between different padding approaches.
Padding types:
* **VALID**: no padding, the filters only use the "valid" values in the input. If the filter cannot reach all the input values (filters go left to right), the extra values on the right are dropped.
* **SAME**: adds padding evenly to the right (preferred) and left sides of the input so that all values in the input are processed.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/padding.png" width="500">
</div>
We're going to use the [Conv1d](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html#torch.nn.Conv1d) layer to process our inputs.
```
# Convolutional filters (VALID padding)
vocab_size = 10 # one hot size
num_filters = 50 # num filters
filter_size = 3 # filters are 3X3
stride = 1
padding = 0 # valid padding (no padding)
conv1 = nn.Conv1d(in_channels=vocab_size, out_channels=num_filters,
kernel_size=filter_size, stride=stride,
padding=padding, padding_mode='zeros')
print("conv: {}".format(conv1.weight.shape))
# Forward pass
z = conv1(x)
print (f"z: {z.shape}")
```
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/conv.png" width="700">
</div>
When we apply these filter on our inputs, we receive an output of shape (N, 6, 50). We get 50 for the output channel dim because we used 50 filters and 6 for the conv outputs because:
$W_1 = \frac{W_2 - F + 2P}{S} + 1 = \frac{8 - 3 + 2(0)}{1} + 1 = 6$
$H_1 = \frac{H_2 - F + 2P}{S} + 1 = \frac{1 - 1 + 2(0)}{1} + 1 = 1$
$D_2 = D_1 $
where:
* `W`: width of each input = 8
* `H`: height of each input = 1
* `D`: depth (# channels)
* `F`: filter size = 3
* `P`: padding = 0
* `S`: stride = 1
Now we'll add padding so that the convolutional outputs are the same shape as our inputs. The amount of padding for the `SAME` padding can be determined using the same equation. We want out output to have the same width as our input, so we solve for P:
$ \frac{W-F+2P}{S} + 1 = W $
$ P = \frac{S(W-1) - W + F}{2} $
If $P$ is not a whole number, we round up (using `math.ceil`) and place the extra padding on the right side.
```
# Convolutional filters (SAME padding)
vocab_size = 10 # one hot size
num_filters = 50 # num filters
filter_size = 3 # filters are 3X3
stride = 1
conv = nn.Conv1d(in_channels=vocab_size, out_channels=num_filters,
kernel_size=filter_size, stride=stride)
print("conv: {}".format(conv.weight.shape))
# `SAME` padding
padding_left = int((conv.stride[0]*(max_seq_len-1) - max_seq_len + filter_size)/2)
padding_right = int(math.ceil((conv.stride[0]*(max_seq_len-1) - max_seq_len + filter_size)/2))
print (f"padding: {(padding_left, padding_right)}")
# Forward pass
z = conv(F.pad(x, (padding_left, padding_right)))
print (f"z: {z.shape}")
```
> We will explore larger dimensional convolution layers in subsequent lessons. For example, [Conv2D](https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html#torch.nn.Conv2d) is used with 3D inputs (images, char-level text, etc.) and [Conv3D](https://pytorch.org/docs/stable/generated/torch.nn.Conv3d.html#torch.nn.Conv3d) is used for 4D inputs (videos, time-series, etc.).
## Pooling
The result of convolving filters on an input is a feature map. Due to the nature of convolution and overlaps, our feature map will have lots of redundant information. Pooling is a way to summarize a high-dimensional feature map into a lower dimensional one for simplified downstream computation. The pooling operation can be the max value, average, etc. in a certain receptive field. Below is an example of pooling where the outputs from a conv layer are `4X4` and we're going to apply max pool filters of size `2X2`.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/pooling.png" width="500">
</div>
$W_2 = \frac{W_1 - F}{S} + 1 = \frac{4 - 2}{2} + 1 = 2$
$H_2 = \frac{H_1 - F}{S} + 1 = \frac{4 - 2}{2} + 1 = 2$
$ D_2 = D_1 $
where:
* `W`: width of each input = 4
* `H`: height of each input = 4
* `D`: depth (# channels)
* `F`: filter size = 2
* `S`: stride = 2
In our use case, we want to just take the one max value so we will use the [MaxPool1D](https://pytorch.org/docs/stable/generated/torch.nn.MaxPool1d.html#torch.nn.MaxPool1d) layer, so our max-pool filter size will be max_seq_len.
```
# Max pooling
pool_output = F.max_pool1d(z, z.size(2))
print("Size: {}".format(pool_output.shape))
```
## Batch Normalization
The last topic we'll cover before constructing our model is [batch normalization](https://arxiv.org/abs/1502.03167). It's an operation that will standardize (mean=0, std=1) the activations from the previous layer. Recall that we used to standardize our inputs in previous notebooks so our model can optimize quickly with larger learning rates. It's the same concept here but we continue to maintain standardized values throughout the forward pass to further aid optimization.
```
# Batch normalization
batch_norm = nn.BatchNorm1d(num_features=num_filters)
z = batch_norm(conv(x)) # applied to activations (after conv layer & before pooling)
print (f"z: {z.shape}")
# Mean and std before batchnorm
print (f"mean: {torch.mean(conv1(x)):.2f}, std: {torch.std(conv(x)):.2f}")
# Mean and std after batchnorm
print (f"mean: {torch.mean(z):.2f}, std: {torch.std(z):.2f}")
```
# Modeling
## Model
Let's visualize the model's forward pass.
1. We'll first tokenize our inputs (`batch_size`, `max_seq_len`).
2. Then we'll one-hot encode our tokenized inputs (`batch_size`, `max_seq_len`, `vocab_size`).
3. We'll apply convolution via filters (`filter_size`, `vocab_size`, `num_filters`) followed by batch normalization. Our filters act as character level n-gram detectors.
4. We'll apply 1D global max pooling which will extract the most relevant information from the feature maps for making the decision.
5. We feed the pool outputs to a fully-connected (FC) layer (with dropout).
6. We use one more FC layer with softmax to derive class probabilities.
<div align="left">
<img src="https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/cnn/model.png" width="1000">
</div>
```
NUM_FILTERS = 50
HIDDEN_DIM = 100
DROPOUT_P = 0.1
class CNN(nn.Module):
def __init__(self, vocab_size, num_filters, filter_size,
hidden_dim, dropout_p, num_classes):
super(CNN, self).__init__()
# Convolutional filters
self.filter_size = filter_size
self.conv = nn.Conv1d(
in_channels=vocab_size, out_channels=num_filters,
kernel_size=filter_size, stride=1, padding=0, padding_mode='zeros')
self.batch_norm = nn.BatchNorm1d(num_features=num_filters)
# FC layers
self.fc1 = nn.Linear(num_filters, hidden_dim)
self.dropout = nn.Dropout(dropout_p)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs, channel_first=False):
# Rearrange input so num_channels is in dim 1 (N, C, L)
x_in, = inputs
if not channel_first:
x_in = x_in.transpose(1, 2)
# Padding for `SAME` padding
max_seq_len = x_in.shape[2]
padding_left = int((self.conv.stride[0]*(max_seq_len-1) - max_seq_len + self.filter_size)/2)
padding_right = int(math.ceil((self.conv.stride[0]*(max_seq_len-1) - max_seq_len + self.filter_size)/2))
# Conv outputs
z = self.conv(F.pad(x_in, (padding_left, padding_right)))
z = F.max_pool1d(z, z.size(2)).squeeze(2)
# FC layer
z = self.fc1(z)
z = self.dropout(z)
z = self.fc2(z)
return z
# Initialize model
model = CNN(vocab_size=VOCAB_SIZE, num_filters=NUM_FILTERS, filter_size=FILTER_SIZE,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
model = model.to(device) # set device
print (model.named_parameters)
```
> We used `SAME` padding (w/ stride=1) which means that the conv outputs will have the same width (`max_seq_len`) as our inputs. The amount of padding differs for each batch based on the `max_seq_len` but you can calculate it by solving for P in the equation below.
$ \frac{W_1 - F + 2P}{S} + 1 = W_2 $
$ \frac{\text{max_seq_len } - \text{ filter_size } + 2P}{\text{stride}} + 1 = \text{max_seq_len} $
$ P = \frac{\text{stride}(\text{max_seq_len}-1) - \text{max_seq_len} + \text{filter_size}}{2} $
If $P$ is not a whole number, we round up (using `math.ceil`) and place the extra padding on the right side.
## Training
Let's create the `Trainer` class that we'll use to facilitate training for our experiments. Notice that we're now moving the `train` function inside this class.
```
from torch.optim import Adam
LEARNING_RATE = 1e-3
PATIENCE = 5
NUM_EPOCHS = 10
class Trainer(object):
def __init__(self, model, device, loss_fn=None, optimizer=None, scheduler=None):
# Set params
self.model = model
self.device = device
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
def train_step(self, dataloader):
"""Train step."""
# Set model to train mode
self.model.train()
loss = 0.0
# Iterate over train batches
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, targets = batch[:-1], batch[-1]
self.optimizer.zero_grad() # Reset gradients
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, targets) # Define loss
J.backward() # Backward pass
self.optimizer.step() # Update weights
# Cumulative Metrics
loss += (J.detach().item() - loss) / (i + 1)
return loss
def eval_step(self, dataloader):
"""Validation or test step."""
# Set model to eval mode
self.model.eval()
loss = 0.0
y_trues, y_probs = [], []
# Iterate over val batches
with torch.inference_mode():
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # Set device
inputs, y_true = batch[:-1], batch[-1]
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, y_true).item()
# Cumulative Metrics
loss += (J - loss) / (i + 1)
# Store outputs
y_prob = F.softmax(z).cpu().numpy()
y_probs.extend(y_prob)
y_trues.extend(y_true.cpu().numpy())
return loss, np.vstack(y_trues), np.vstack(y_probs)
def predict_step(self, dataloader):
"""Prediction step."""
# Set model to eval mode
self.model.eval()
y_probs = []
# Iterate over val batches
with torch.inference_mode():
for i, batch in enumerate(dataloader):
# Forward pass w/ inputs
inputs, targets = batch[:-1], batch[-1]
z = self.model(inputs)
# Store outputs
y_prob = F.softmax(z).cpu().numpy()
y_probs.extend(y_prob)
return np.vstack(y_probs)
def train(self, num_epochs, patience, train_dataloader, val_dataloader):
best_val_loss = np.inf
for epoch in range(num_epochs):
# Steps
train_loss = self.train_step(dataloader=train_dataloader)
val_loss, _, _ = self.eval_step(dataloader=val_dataloader)
self.scheduler.step(val_loss)
# Early stopping
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = self.model
_patience = patience # reset _patience
else:
_patience -= 1
if not _patience: # 0
print("Stopping early!")
break
# Logging
print(
f"Epoch: {epoch+1} | "
f"train_loss: {train_loss:.5f}, "
f"val_loss: {val_loss:.5f}, "
f"lr: {self.optimizer.param_groups[0]['lr']:.2E}, "
f"_patience: {_patience}"
)
return best_model
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Define optimizer & scheduler
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3)
# Trainer module
trainer = Trainer(
model=model, device=device, loss_fn=loss_fn,
optimizer=optimizer, scheduler=scheduler)
# Train
best_model = trainer.train(
NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)
```
## Evaluation
```
import json
from pathlib import Path
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Get predictions
test_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)
y_pred = np.argmax(y_prob, axis=1)
# Determine performance
performance = get_performance(
y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)
print (json.dumps(performance['overall'], indent=2))
# Save artifacts
dir = Path("cnn")
dir.mkdir(parents=True, exist_ok=True)
label_encoder.save(fp=Path(dir, 'label_encoder.json'))
tokenizer.save(fp=Path(dir, 'tokenizer.json'))
torch.save(best_model.state_dict(), Path(dir, 'model.pt'))
with open(Path(dir, 'performance.json'), "w") as fp:
json.dump(performance, indent=2, sort_keys=False, fp=fp)
```
## Inference
```
def get_probability_distribution(y_prob, classes):
"""Create a dict of class probabilities from an array."""
results = {}
for i, class_ in enumerate(classes):
results[class_] = np.float64(y_prob[i])
sorted_results = {k: v for k, v in sorted(
results.items(), key=lambda item: item[1], reverse=True)}
return sorted_results
# Load artifacts
device = torch.device("cpu")
label_encoder = LabelEncoder.load(fp=Path(dir, 'label_encoder.json'))
tokenizer = Tokenizer.load(fp=Path(dir, 'tokenizer.json'))
model = CNN(
vocab_size=VOCAB_SIZE, num_filters=NUM_FILTERS, filter_size=FILTER_SIZE,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
model.load_state_dict(torch.load(Path(dir, 'model.pt'), map_location=device))
model.to(device)
# Initialize trainer
trainer = Trainer(model=model, device=device)
# Dataloader
text = "What a day for the new york stock market to go bust!"
sequences = tokenizer.texts_to_sequences([preprocess(text)])
print (tokenizer.sequences_to_texts(sequences))
X = [to_categorical(seq, num_classes=len(tokenizer)) for seq in sequences]
y_filler = label_encoder.encode([label_encoder.classes[0]]*len(X))
dataset = Dataset(X=X, y=y_filler, max_filter_size=FILTER_SIZE)
dataloader = dataset.create_dataloader(batch_size=batch_size)
# Inference
y_prob = trainer.predict_step(dataloader)
y_pred = np.argmax(y_prob, axis=1)
label_encoder.decode(y_pred)
# Class distributions
prob_dist = get_probability_distribution(y_prob=y_prob[0], classes=label_encoder.classes)
print (json.dumps(prob_dist, indent=2))
```
# Interpretability
We went through all the trouble of padding our inputs before convolution to result is outputs of the same shape as our inputs so we can try to get some interpretability. Since every token is mapped to a convolutional output on whcih we apply max pooling, we can see which token's output was most influential towards the prediction. We first need to get the conv outputs from our model:
```
import collections
import seaborn as sns
class InterpretableCNN(nn.Module):
def __init__(self, vocab_size, num_filters, filter_size,
hidden_dim, dropout_p, num_classes):
super(InterpretableCNN, self).__init__()
# Convolutional filters
self.filter_size = filter_size
self.conv = nn.Conv1d(
in_channels=vocab_size, out_channels=num_filters,
kernel_size=filter_size, stride=1, padding=0, padding_mode='zeros')
self.batch_norm = nn.BatchNorm1d(num_features=num_filters)
# FC layers
self.fc1 = nn.Linear(num_filters, hidden_dim)
self.dropout = nn.Dropout(dropout_p)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, inputs, channel_first=False):
# Rearrange input so num_channels is in dim 1 (N, C, L)
x_in, = inputs
if not channel_first:
x_in = x_in.transpose(1, 2)
# Padding for `SAME` padding
max_seq_len = x_in.shape[2]
padding_left = int((self.conv.stride[0]*(max_seq_len-1) - max_seq_len + self.filter_size)/2)
padding_right = int(math.ceil((self.conv.stride[0]*(max_seq_len-1) - max_seq_len + self.filter_size)/2))
# Conv outputs
z = self.conv(F.pad(x_in, (padding_left, padding_right)))
return z
# Initialize
interpretable_model = InterpretableCNN(
vocab_size=len(tokenizer), num_filters=NUM_FILTERS, filter_size=FILTER_SIZE,
hidden_dim=HIDDEN_DIM, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
# Load weights (same architecture)
interpretable_model.load_state_dict(torch.load(Path(dir, 'model.pt'), map_location=device))
interpretable_model.to(device)
# Initialize trainer
interpretable_trainer = Trainer(model=interpretable_model, device=device)
# Get conv outputs
conv_outputs = interpretable_trainer.predict_step(dataloader)
print (conv_outputs.shape) # (num_filters, max_seq_len)
# Visualize a bi-gram filter's outputs
tokens = tokenizer.sequences_to_texts(sequences)[0].split(' ')
sns.heatmap(conv_outputs, xticklabels=tokens)
```
The filters have high values for the words `stock` and `market` which influenced the `Business` category classification.
> This is a crude technique (maxpool doesn't strictly behave this way on a batch) loosely based off of more elaborate [interpretability](https://arxiv.org/abs/1312.6034) methods.
| github_jupyter |
# Monte Carlo Methods: Lab 1
Take a look at Chapter 10 of Newman's *Computational Physics with Python* where much of this material is drawn from.
```
from IPython.core.display import HTML
css_file = 'https://raw.githubusercontent.com/ngcm/training-public/master/ipython_notebook_styles/ngcmstyle.css'
HTML(url=css_file)
```
## Integration
If we have an ugly function, say
$$
\begin{equation}
f(x) = \sin^2 \left(\frac{1}{x (2-x)}\right),
\end{equation}
$$
then it can be very difficult to integrate. To see this, just do a quick plot.
```
%matplotlib inline
import numpy
from matplotlib import pyplot
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
rcParams['figure.figsize'] = (12,6)
from __future__ import division
def f(x):
return numpy.sin(1.0/(x*(2.0-x)))**2
x = numpy.linspace(0.0, 2.0, 10000)
pyplot.plot(x, f(x))
pyplot.xlabel(r"$x$")
pyplot.ylabel(r"$\sin^2([x(x-2)]^{-1})$");
```
We see that as the function oscillates *infinitely often*, integrating this with standard methods is going to be very inaccurate.
However, we note that the function is bounded, so the integral (given by the shaded area below) must itself be bounded - less than the total area in the plot, which is $2$ in this case.
```
pyplot.fill_between(x, f(x))
pyplot.xlabel(r"$x$")
pyplot.ylabel(r"$\sin^2([x(x-2)]^{-1})$");
```
So if we scattered (using a *uniform* random distribution) a large number of points within this box, the fraction of them falling *below* the curve is approximately the integral we want to compute, divided by the area of the box:
$$
\begin{equation}
I = \int_a^b f(x) \, dx \quad \implies \quad I \simeq \frac{k A}{N}
\end{equation}
$$
where $N$ is the total number of points considered, $k$ is the number falling below the curve, and $A$ is the area of the box. We can choose the box, but we need $y \in [\min_{x \in [a, b]} (f(x)), \max_{x \in [a, b]} (f(x))] = [c, d]$, giving $A = (d-c)(b-a)$.
So let's apply this technique to the function above, where the box in $y$ is $[0,1]$.
```
def mc_integrate(f, domain_x, domain_y, N = 10000):
"""
Monte Carlo integration function: to be completed. Result, for the given f, should be around 1.46.
"""
import numpy.random
return I
```
### Accuracy
To check the accuracy of the method, let's apply this to calculate $\pi$.
The area of a circle of radius $2$ is $4\pi$, so the area of the *quarter* circle in $x, y \in [0, 2]$ is just $\pi$:
$$
\begin{equation}
\pi = \int_0^2 \sqrt{4 - x^2} \, dx.
\end{equation}
$$
Check the convergence of the Monte Carlo integration with $N$. (I suggest using $N = 100 \times 2^i$ for $i = 0, \dots, 19$; you should find the error scales roughly as $N^{-1/2}$)
## Mean Value Method
Monte Carlo integration is pretty inaccurate, as seen above: it converges slowly, and has poor accuracy at all $N$. An alternative is the *mean value* method, where we note that *by definition* the average value of $f$ over the interval $[a, b]$ is precisely the integral multiplied by the width of the interval.
Hence we can just choose our $N$ random points in $x$ as above, but now just compute
$$
\begin{equation}
I \simeq \frac{b-a}{N} \sum_{i=1}^N f(x_i).
\end{equation}
$$
```
def mv_integrate(f, domain_x, N = 10000):
"""
Mean value Monte Carlo integration: to be completed
"""
import numpy.random
return I
```
Let's look at the accuracy of this method again applied to computing $\pi$.
The convergence *rate* is the same (only roughly, typically), but the Mean Value method is *expected* to be better in terms of its absolute error.
### Dimensionality
Compared to standard integration methods (Gauss quadrature, Simpson's rule, etc) the convergence rate for Monte Carlo methods is very slow. However, there is one crucial advantage: as you change dimension, the amount of calculation required is *unchanged*, whereas for standard methods it grows geometrically with the dimension.
Try to compute the volume of an $n$-dimensional unit *hypersphere*, which is the object in $\mathbb{R}^n$ such that
$$
\begin{equation}
\sum_{i=1}^n x_i^2 \le 1.
\end{equation}
$$
The volume of the hypersphere [can be found in closed form](http://en.wikipedia.org/wiki/Volume_of_an_n-ball#The_volume), but can rapidly be computed using the Monte Carlo method above, by counting the $k$ points that randomly fall within the hypersphere and using the standard formula $I \simeq V k / N$.
```
def mc_integrate_multid(f, domain, N = 10000):
"""
Monte Carlo integration in arbitrary dimensions (read from the size of the domain): to be completed
"""
return I
from scipy import special
def volume_hypersphere(ndim=3):
return numpy.pi**(float(ndim)/2.0) / special.gamma(float(ndim)/2.0 + 1.0)
```
Now let us repeat this across multiple dimensions.
The errors clearly vary over a range, but the convergence remains roughly as $N^{-1/2}$ independent of the dimension; using other techniques such as Gauss quadrature would see the points required scaling geometrically with the dimension.
## Importance sampling
Consider the integral (which arises, for example, in the theory of Fermi gases)
$$
\begin{equation}
I = \int_0^1 \frac{x^{-1/2}}{e^x + 1} \, dx.
\end{equation}
$$
This has a finite value, but the integrand diverges as $x \to 0$. This *may* cause a problem for Monte Carlo integration when a single value may give a spuriously large contribution to the sum.
We can get around this by changing the points at which the integrand is sampled. Choose a *weighting* function $w(x)$. Then a weighted average of any function $g(x)$ can be
$$
\begin{equation}
<g>_w = \frac{\int_a^b w(x) g(x) \, dx}{\int_a^b w(x) \, dx}.
\end{equation}
$$
As our integral is
$$
\begin{equation}
I = \int_a^b f(x) \, dx
\end{equation}
$$
we can, by setting $g(x) = f(x) / w(x)$ get
$$
\begin{equation}
I = \int_a^b f(x) \, dx = \left< \frac{f(x)}{w(x)} \right>_w \int_a^b w(x) \, dx.
\end{equation}
$$
This gives
$$
\begin{equation}
I \simeq \frac{1}{N} \sum_{i=1}^N \frac{f(x_i)}{w(x_i)} \int_a^b w(x) \, dx,
\end{equation}
$$
where the points $x_i$ are now chosen from a *non-uniform* probability distribution with pdf
$$
\begin{equation}
p(x) = \frac{w(x)}{\int_a^b w(x) \, dx}.
\end{equation}
$$
This is a generalization of the mean value method - we clearly recover the mean value method when the weighting function $w(x) \equiv 1$. A careful choice of the weighting function can mitigate problematic regions of the integrand; e.g., in the example above we could choose $w(x) = x^{-1/2}$, giving $p(x) = x^{-1/2}/2$. In general, the hard part of the algorithm is going to be generating the samples from this non-uniform distribution. Here we have the advantage that $p$ is given by the `numpy.random.power` distribution.
So, let's try to solve the integral above. We need $\int_0^1 w(x) = 2$. The expected solution is around 0.84.So, let's try to solve the integral above. The expected solution is around 0.84.
In the general case, how do we generate the samples from the non-uniform probability distribution $p$?
What really matters here is not the function $p$ from which we draw the random numbers `x`. What really matters is that the random numbers appear to follow the behaviour, the distribution $p$, that we want. This may seem like stating the same thing, but it's not. We can use a technique called *rejection sampling* to construct a set of numbers that follows a certain (cumulative) distribution without having to construct the pdf that it actually follows at all.
To do this, we need to know the distribution we want (here $p(x) = 1/(2 \sqrt{x})$) and another distribution $q(x)$ that we can easily compute with a constant $K$ such that $p(x) \le K q(x)$. What we're doing here is just for illustration, as the *power* distribution $p(x) = a x^{a-1}$ is provided by `numpy.random.power` and perfectly matches the distribution we want for $a=1/2$. Here we're going to need some distribution that diverges faster than $p$ for small $x$, so we can choose the power distribution with $a=1/3$, provided, for example, $K = 1.6$:
```
x = numpy.linspace(0.01,1,2000)
p = 1/(2*numpy.sqrt(x))
q = 1/(3*x**(2/3))
K = 1.6
pyplot.semilogy(x, p, lw=2, label=r"$p(x)$")
pyplot.semilogy(x, K * q, lw=2, label=r"$K q(x)$")
pyplot.xlabel(r"$x$")
pyplot.legend()
pyplot.show()
```
Rejection sampling works by drawing random samples from the easy-to-compute distribution $q$. We then keep the samples drawn from $q$ with a certain probability: if $p(x) / (K q(x)) < U$, where $U$ is drawn from the uniform distribution, then we keep the sample. As we're calculating $p$, not drawing samples from it, this shouldn't be a problem.
Let's check this working:
So now we can write an importance sampling algorithm without having to integrate the weighting function first, by using rejection sampling to find a set of samples from the resulting pdf without actually having to sample it.
We won't do that here, but it's a key conceptual step for MCMC algorithms such as Metropolis Hastings.
| github_jupyter |
# Analyzing IMDB Data in Keras
```
# Imports
import numpy as np
import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
```
## 1. Loading the data
This dataset comes preloaded with Keras, so one simple command will get us training and testing data. There is a parameter for how many words we want to look at. We've set it at 1000, but feel free to experiment.
```
# Loading the data (it's preloaded in Keras)
# (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000, skip_top=4)
# print(x_train.shape)
# print(x_test.shape)
print('hi there boi')
```
## 2. Examining the data
Notice that the data has been already pre-processed, where all the words have numbers, and the reviews come in as a vector with the words that the review contains. For example, if the word 'the' is the first one in our dictionary, and a review contains the word 'the', then there is a 1 in the corresponding vector.
The output comes as a vector of 1's and 0's, where 1 is a positive sentiment for the review, and 0 is negative.
```
print(x_train[0])
print(y_train[0])
```
## 3. One-hot encoding the output
Here, we'll turn the input vectors into (0,1)-vectors. For example, if the pre-processed vector contains the number 14, then in the processed vector, the 14th entry will be 1.
```
# One-hot encoding the output into vector mode, each of length 1000
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print(x_train[0])
```
And we'll also one-hot encode the output.
```
# One-hot encoding the output
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train.shape)
print(y_test.shape)
```
## 4. Building the model architecture
Build a model here using sequential. Feel free to experiment with different layers and sizes! Also, experiment adding dropout to reduce overfitting.
```
# TODO: Build the model architecture
# Make sure to use methods such as dropout or regularization, and good Keras optimizers to do this
model = Sequential()
model.add(Dense(64, input_dim=x_train.shape[1]))
model.add(Activation('tanh'))
model.add(Dense(2))
model.add(Activation('sigmoid'))
# TODO: Compile the model using a loss function and an optimizer.
loss = "binary_crossentropy"
optimizer = "adam"
metrics = ["accuracy"]
epocs = 50
model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
model.summary()
```
## 5. Training the model
Run the model here. Experiment with different batch_size, and number of epochs!
```
# TODO: Run the model. Feel free to experiment with different batch sizes and number of epochs.
history = model.fit(x_train, y_train, epochs=epocs, verbose=0)
```
## 6. Evaluating the model
This will give you the accuracy of the model, as evaluated on the testing set. Can you get something over 85%?
```
score = model.evaluate(x_test, y_test)
print("Accuracy: ", score[1])
```
| github_jupyter |
```
!pip install git+https://github.com/slremy/netsapi --user --upgrade
from netsapi.challenge import *
import pandas as pd
import numpy as np
import itertools
import copy
class RemyGA:
'''
Simple Genetic Algorithm.
https://github.com/slremy/estool/
'''
def __init__(self, num_params, # number of model parameters
random_individuals_fcn,
mutate_fcn,
immigrant_ratio=.2, # percentage of new individuals
sigma_init=0.1, # initial standard deviation
sigma_decay=0.999, # anneal standard deviation
sigma_limit=0.01, # stop annealing if less than this
popsize=16, # population size
elite_ratio=0.1, # percentage of the elites
forget_best=False, # forget the historical best elites
weight_decay=0.01, # weight decay coefficient
):
self.num_params = num_params
self.sigma_init = sigma_init
self.sigma_decay = sigma_decay
self.sigma_limit = sigma_limit
self.popsize = popsize
self.random_individuals_fcn = random_individuals_fcn
self.mutate_fcn = mutate_fcn
self.solutions = None
self.elite_ratio = elite_ratio
self.elite_popsize = int(self.popsize * self.elite_ratio)
self.immigrant_ratio = immigrant_ratio
self.immigrant_popsize = int(self.popsize * self.immigrant_ratio)
self.sigma = self.sigma_init
self.elite_params = np.zeros((self.elite_popsize, self.num_params))
#self.elite_rewards = np.zeros(self.elite_popsize)
self.best_param = np.zeros(self.num_params)
self.best_reward = 0
self.reward_pdf = np.zeros(self.popsize+1)
self.solutions = np.zeros((self.popsize, self.num_params))
self.first_iteration = True
self.forget_best = forget_best
self.weight_decay = weight_decay
def rms_stdev(self):
return self.sigma # same sigma for all parameters.
def ask(self, process=lambda x:x):
'''returns a list of parameters'''
self.epsilon = np.random.randn(self.popsize, self.num_params) * self.sigma
def mate(a, b):
#single point crossover
c = np.copy(a)
idx = np.where(np.random.rand((c.size)) > 0.5)
c[idx] = b[idx]
return c
def crossover(a,b):
cross_point = int((self.num_params-1)*np.random.rand(1));
c = np.append(a[:cross_point], b[cross_point:self.num_params]);
return c
index_array = np.arange(self.popsize)
if self.first_iteration:
self.solutions = process(self.random_individuals_fcn(self.popsize,self.num_params))
else:
#intialize the index list for "mating" chromosomes
childrenIDX = range(self.popsize - self.elite_popsize - self.immigrant_popsize);
selected = np.arange(2*len(childrenIDX));
for i in range(len(selected)):
testNo = 1;
#Choose a parent
while self.reward_pdf[testNo] < np.random.rand():
testNo = testNo + 1;
selected[i] = index_array[testNo];
children = []
for i in range(len(childrenIDX)):
chromosomeA = self.solutions[selected[i*2+0], :];
chromosomeB = self.solutions[selected[i*2+1], :];
child = crossover(chromosomeA,chromosomeB) if 0.5 < np.random.rand() else crossover(chromosomeB,chromosomeA)
children.append(self.mutate_fcn(child))
self.solutions = process(np.concatenate((self.elite_params, self.random_individuals_fcn(self.immigrant_popsize,self.num_params), np.array(children))))
return self.solutions
def tell(self, reward_table_result):
# input must be a numpy float array
assert(len(reward_table_result) == self.popsize), "Inconsistent reward_table size reported."
reward_table = np.array(reward_table_result)
if self.weight_decay > 0:
l2_decay = compute_weight_decay(self.weight_decay, self.solutions)
reward_table += l2_decay
reward = reward_table
solution = self.solutions
reward_masked = np.ma.masked_array(reward,mask = (np.isnan(reward) | np.isinf(reward)))
self.reward_pdf = (np.cumsum(reward_masked)/np.sum(reward_masked)).compressed()
sorted_idx = np.argsort(reward_masked)[::-1]
idx = sorted_idx[~reward_masked.mask][0:self.elite_popsize]
assert(len(idx) == self.elite_popsize), "Inconsistent elite size reported."
self.elite_rewards = reward[idx]
self.elite_params = solution[idx]
self.curr_best_reward = self.elite_rewards[0]
if self.first_iteration or (self.curr_best_reward > self.best_reward):
self.first_iteration = False
self.best_reward = self.elite_rewards[0]
self.best_param = np.copy(self.elite_params[0])
if (self.sigma > self.sigma_limit):
self.sigma *= self.sigma_decay
self.first_iteration = False
def current_param(self):
return self.elite_params[0]
def set_mu(self, mu):
pass
def best_param(self):
return self.best_param
def result(self): # return best params so far, along with historically best reward, curr reward, sigma
return (self.best_param, self.best_reward, self.curr_best_reward, self.sigma)
def mutate(chromosome):
mutation_rate = .5
for j in range(chromosome.shape[0]):
r = np.random.rand(1);
if(r > mutation_rate):
chromosome[j] = np.remainder(chromosome[j]+np.random.randn(1),0.99);
return chromosome
def make_random_individuals(x,y):
value = np.random.choice(act_space , (x,y) )
return value
def boundary(individual):
processed = individual%(1+np.finfo(float).eps)
return processed
act_space = [ 0 , 0.2 , 0.4 , 0.6 , 0.8 , 1]
class SRGAAgent():
def __init__(self, environment):
self._epsilon = 0.2 # 20% chances to apply a random action
self._gamma = 0.99 # Discounting factor
self._alpha = 0.5 # soft update param
self.saveEnv = environment
self.environment = copy.deepcopy(self.saveEnv) #self._env = env
self.popsize=10
self.num_paramters = 10
self.solver = RemyGA(self.num_paramters, # number of model parameters
random_individuals_fcn=make_random_individuals,
mutate_fcn = mutate,
sigma_init=1, # initial standard deviation
popsize=self.popsize, # population size
elite_ratio=0.2, # percentage of the elites
forget_best=False, # forget the historical best elites
weight_decay=0.00, # weight decay coefficient
)
def stateSpace(self):
return range(1,self.environment.policyDimension+1)
def train(self):
allrewards = []
for episode in range(20):
rewards = []
if episode % self.popsize == 0:
# ask for a set of random candidate solutions to be evaluated
solutions = self.solver.ask(boundary)
#convert an array of 10 floats into a policy of itn, irs per year for 5 years
policies = []
for v in solutions:
actions = [i for i in itertools.zip_longest(*[iter(v)] * 2, fillvalue="")]
policy = {i+1: list(actions[i]) for i in range(5)}
policies.append(policy)
# calculate the reward for each given solution using the environment's method
batchRewards = self.environment.evaluatePolicy(policies)
rewards.append(batchRewards)
self.solver.tell(batchRewards);
allrewards.append(rewards)
return np.array(allrewards)
def generate(self):
for _ in range(10):
self.environment = copy.deepcopy(self.saveEnv)
self.train()
#generate a policy from the array used to represent the candidate solution
actions = [i for i in itertools.zip_longest(*[iter(self.solver.best_param)] * 2, fillvalue="")]
best_policy = {state: list(actions[state-1]) for state in self.stateSpace()}
best_reward = self.environment.evaluatePolicy(best_policy)
print(best_policy, best_reward)
return best_policy, best_reward
EvaluateChallengeSubmission(ChallengeProveEnvironment , SRGAAgent, "SRGAAgent_20.csv")
!pip freeze > requirements.txt
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Containers" data-toc-modified-id="Containers-1"><span class="toc-item-num">1 </span>Containers</a></span><ul class="toc-item"><li><span><a href="#1.-Tuples" data-toc-modified-id="1.-Tuples-1.1"><span class="toc-item-num">1.1 </span>1. Tuples</a></span><ul class="toc-item"><li><span><a href="#1.1-Tuples-are-(as-the-rest-of-elements-of-Python)-objects" data-toc-modified-id="1.1-Tuples-are-(as-the-rest-of-elements-of-Python)-objects-1.1.1"><span class="toc-item-num">1.1.1 </span>1.1 Tuples are (as the rest of elements of Python) objects</a></span></li><li><span><a href="#1.2.-Tuple-definition" data-toc-modified-id="1.2.-Tuple-definition-1.1.2"><span class="toc-item-num">1.1.2 </span>1.2. Tuple definition</a></span></li><li><span><a href="#1.3.-Counting-ocurrences-in-tuples" data-toc-modified-id="1.3.-Counting-ocurrences-in-tuples-1.1.3"><span class="toc-item-num">1.1.3 </span>1.3. Counting ocurrences in tuples</a></span></li><li><span><a href="#1.4.-Searching-for-an-item-in-a-tuple" data-toc-modified-id="1.4.-Searching-for-an-item-in-a-tuple-1.1.4"><span class="toc-item-num">1.1.4 </span>1.4. Searching for an item in a tuple</a></span></li><li><span><a href="#1.5.-Slicing-in-tuples" data-toc-modified-id="1.5.-Slicing-in-tuples-1.1.5"><span class="toc-item-num">1.1.5 </span>1.5. Slicing in tuples</a></span></li><li><span><a href="#1.6.-Functions-can-return-tuples" data-toc-modified-id="1.6.-Functions-can-return-tuples-1.1.6"><span class="toc-item-num">1.1.6 </span>1.6. Functions can return tuples</a></span></li><li><span><a href="#1.7.-Swapping-pairs-with-tuples-is-fun!" data-toc-modified-id="1.7.-Swapping-pairs-with-tuples-is-fun!-1.1.7"><span class="toc-item-num">1.1.7 </span>1.7. Swapping pairs with tuples is fun!</a></span></li><li><span><a href="#1.8.-Tuples-are-inmutable" data-toc-modified-id="1.8.-Tuples-are-inmutable-1.1.8"><span class="toc-item-num">1.1.8 </span>1.8. Tuples are inmutable</a></span></li></ul></li><li><span><a href="#2.-[0]-Lists" data-toc-modified-id="2.-[0]-Lists-1.2"><span class="toc-item-num">1.2 </span>2. [0] <a href="https://docs.python.org/3.7/library/stdtypes.html#sequence-types-list-tuple-range" target="_blank">Lists</a></a></span><ul class="toc-item"><li><span><a href="#2.1-[1]-(Of-course)-lists-are-objects" data-toc-modified-id="2.1-[1]-(Of-course)-lists-are-objects-1.2.1"><span class="toc-item-num">1.2.1 </span>2.1 [1] (Of course) lists are objects</a></span></li><li><span><a href="#2.2-[0]-Appending-items-to-a-list-(O(1))" data-toc-modified-id="2.2-[0]-Appending-items-to-a-list-(O(1))-1.2.2"><span class="toc-item-num">1.2.2 </span>2.2 [0] Appending items to a list (O(1))</a></span></li><li><span><a href="#2.3-[0]-Inserting-items-(O(n))" data-toc-modified-id="2.3-[0]-Inserting-items-(O(n))-1.2.3"><span class="toc-item-num">1.2.3 </span>2.3 [0] Inserting items (O(n))</a></span></li><li><span><a href="#2.4-[0]-Deleting-items-from-a-list-by-content-(O(n)))" data-toc-modified-id="2.4-[0]-Deleting-items-from-a-list-by-content-(O(n)))-1.2.4"><span class="toc-item-num">1.2.4 </span>2.4 [0] Deleting items from a list by content (O(n)))</a></span></li><li><span><a href="#2.4-[0]-Deleting-items-from-the-begin-of-the-list-(O(1)))" data-toc-modified-id="2.4-[0]-Deleting-items-from-the-begin-of-the-list-(O(1)))-1.2.5"><span class="toc-item-num">1.2.5 </span>2.4 [0] Deleting items from the begin of the list (O(1)))</a></span></li><li><span><a href="#2.4-[0]-Deleting-items-from-the-end-of-the-list-(O(1)))" data-toc-modified-id="2.4-[0]-Deleting-items-from-the-end-of-the-list-(O(1)))-1.2.6"><span class="toc-item-num">1.2.6 </span>2.4 [0] Deleting items from the end of the list (O(1)))</a></span></li><li><span><a href="#2.5-[0]-Sorting-the-elements-of-a-list-(O(n-log-n))" data-toc-modified-id="2.5-[0]-Sorting-the-elements-of-a-list-(O(n-log-n))-1.2.7"><span class="toc-item-num">1.2.7 </span>2.5 [0] Sorting the elements of a list (O(n log n))</a></span></li><li><span><a href="#2.6-[1]-Erasing-all-list-items-(O(1))" data-toc-modified-id="2.6-[1]-Erasing-all-list-items-(O(1))-1.2.8"><span class="toc-item-num">1.2.8 </span>2.6 [1] Erasing all list items (O(1))</a></span></li><li><span><a href="#2.7-[0]-List-slicing-(O(s))" data-toc-modified-id="2.7-[0]-List-slicing-(O(s))-1.2.9"><span class="toc-item-num">1.2.9 </span>2.7 [0] List slicing (O(s))</a></span></li><li><span><a href="#2.8-[1]-Defining-lists-with-list-comprehensions:" data-toc-modified-id="2.8-[1]-Defining-lists-with-list-comprehensions:-1.2.10"><span class="toc-item-num">1.2.10 </span>2.8 [1] Defining lists with <a href="http://www.secnetix.de/olli/Python/list_comprehensions.hawk" target="_blank"><em>list comprehensions</em></a>:</a></span></li><li><span><a href="#2.9-[1]-Lists-are-mutable-objects" data-toc-modified-id="2.9-[1]-Lists-are-mutable-objects-1.2.11"><span class="toc-item-num">1.2.11 </span>2.9 [1] Lists are mutable objects</a></span></li></ul></li><li><span><a href="#3.-[0]--Sets" data-toc-modified-id="3.-[0]--Sets-1.3"><span class="toc-item-num">1.3 </span>3. [0] <a href="https://docs.python.org/3.7/library/stdtypes.html#set-types-set-frozenset" target="_blank">Sets</a></a></span><ul class="toc-item"><li><span><a href="#3.2.-[0]-Sets-can-grow-(O(1))" data-toc-modified-id="3.2.-[0]-Sets-can-grow-(O(1))-1.3.1"><span class="toc-item-num">1.3.1 </span>3.2. [0] Sets can grow (O(1))</a></span></li><li><span><a href="#3.3.-[0]-Sets-can-not-contain-dupplicate-objects" data-toc-modified-id="3.3.-[0]-Sets-can-not-contain-dupplicate-objects-1.3.2"><span class="toc-item-num">1.3.2 </span>3.3. [0] Sets can not contain dupplicate objects</a></span></li><li><span><a href="#3.4.-[1]-Sets-can-not-contain-mutable-objects" data-toc-modified-id="3.4.-[1]-Sets-can-not-contain-mutable-objects-1.3.3"><span class="toc-item-num">1.3.3 </span>3.4. [1] Sets can not contain mutable objects</a></span></li><li><span><a href="#3.5-[0]-Intersection-of-sets-(O(min(len(s),-len(t)))" data-toc-modified-id="3.5-[0]-Intersection-of-sets-(O(min(len(s),-len(t)))-1.3.4"><span class="toc-item-num">1.3.4 </span>3.5 [0] Intersection of sets (O(min(len(s), len(t)))</a></span></li><li><span><a href="#3.6-[0]-Union-of-sets-(O(len(s)+len(t)))" data-toc-modified-id="3.6-[0]-Union-of-sets-(O(len(s)+len(t)))-1.3.5"><span class="toc-item-num">1.3.5 </span>3.6 [0] Union of sets (O(len(s)+len(t)))</a></span></li><li><span><a href="#3.7.-[0]-Sets-are-MUCH-more-efficient-for-searching-by-content-than-lists" data-toc-modified-id="3.7.-[0]-Sets-are-MUCH-more-efficient-for-searching-by-content-than-lists-1.3.6"><span class="toc-item-num">1.3.6 </span>3.7. [0] Sets are MUCH more <a href="https://wiki.python.org/moin/TimeComplexity" target="_blank">efficient for searching by content</a> than lists</a></span></li></ul></li><li><span><a href="#4-[0]-Dictionaries" data-toc-modified-id="4-[0]-Dictionaries-1.4"><span class="toc-item-num">1.4 </span>4 [0] <a href="https://docs.python.org/3.7/library/stdtypes.html#dict" target="_blank">Dictionaries</a></a></span><ul class="toc-item"><li><span><a href="#4.1-[0]-Static-definition-of-a-dictionary" data-toc-modified-id="4.1-[0]-Static-definition-of-a-dictionary-1.4.1"><span class="toc-item-num">1.4.1 </span>4.1 [0] Static definition of a dictionary</a></span></li><li><span><a href="#4.2-[0]-Indexing-of-a-dictionary-by-a-key-(O(1))" data-toc-modified-id="4.2-[0]-Indexing-of-a-dictionary-by-a-key-(O(1))-1.4.2"><span class="toc-item-num">1.4.2 </span>4.2 [0] Indexing of a dictionary by a key (O(1))</a></span></li><li><span><a href="#4.3-[0]-Testing-if-a-key-is-the-dictionary-(O(1))" data-toc-modified-id="4.3-[0]-Testing-if-a-key-is-the-dictionary-(O(1))-1.4.3"><span class="toc-item-num">1.4.3 </span>4.3 [0] Testing if a key is the dictionary (O(1))</a></span></li><li><span><a href="#4.4-[1]-Getting-the-keys-(O(n))" data-toc-modified-id="4.4-[1]-Getting-the-keys-(O(n))-1.4.4"><span class="toc-item-num">1.4.4 </span>4.4 [1] Getting the keys (O(n))</a></span></li><li><span><a href="#4.5-[1]-Getting-the-values-(O(n))" data-toc-modified-id="4.5-[1]-Getting-the-values-(O(n))-1.4.5"><span class="toc-item-num">1.4.5 </span>4.5 [1] Getting the values (O(n))</a></span></li><li><span><a href="#4.4-[1]-Determining-the-position-of-a-key-in-a-dictionary-(O(n))" data-toc-modified-id="4.4-[1]-Determining-the-position-of-a-key-in-a-dictionary-(O(n))-1.4.6"><span class="toc-item-num">1.4.6 </span>4.4 [1] Determining the position of a key in a dictionary (O(n))</a></span></li><li><span><a href="#4.6-[0]-Inserting-a-new-entry-(O(1))" data-toc-modified-id="4.6-[0]-Inserting-a-new-entry-(O(1))-1.4.7"><span class="toc-item-num">1.4.7 </span>4.6 [0] Inserting a new entry (O(1))</a></span></li><li><span><a href="#[0]-4.7-Deleting-an-entry-(O(1))" data-toc-modified-id="[0]-4.7-Deleting-an-entry-(O(1))-1.4.8"><span class="toc-item-num">1.4.8 </span>[0] 4.7 Deleting an entry (O(1))</a></span></li><li><span><a href="#4.8-[1]-Dictionaries-are-mutable" data-toc-modified-id="4.8-[1]-Dictionaries-are-mutable-1.4.9"><span class="toc-item-num">1.4.9 </span>4.8 [1] Dictionaries are mutable</a></span></li><li><span><a href="#4.9-[0]-Looping-a-dictionary-(O(n))" data-toc-modified-id="4.9-[0]-Looping-a-dictionary-(O(n))-1.4.10"><span class="toc-item-num">1.4.10 </span>4.9 [0] Looping a dictionary (O(n))</a></span></li></ul></li><li><span><a href="#5.-Bytes" data-toc-modified-id="5.-Bytes-1.5"><span class="toc-item-num">1.5 </span>5. <a href="http://python-para-impacientes.blogspot.com.es/2014/07/tipos-de-cadenas-unicode-byte-y.html" target="_blank">Bytes</a></a></span><ul class="toc-item"><li><span><a href="#5.1.-Creation-of-bytes-sequence" data-toc-modified-id="5.1.-Creation-of-bytes-sequence-1.5.1"><span class="toc-item-num">1.5.1 </span>5.1. Creation of bytes sequence</a></span></li><li><span><a href="#5.2.-Indexing-in-a-bytes-sequence" data-toc-modified-id="5.2.-Indexing-in-a-bytes-sequence-1.5.2"><span class="toc-item-num">1.5.2 </span>5.2. Indexing in a bytes sequence</a></span></li><li><span><a href="#5.3.-Concatenation-of-bytes-sequences" data-toc-modified-id="5.3.-Concatenation-of-bytes-sequences-1.5.3"><span class="toc-item-num">1.5.3 </span>5.3. Concatenation of bytes sequences</a></span></li><li><span><a href="#5.4.-Bytes-are-inmutable" data-toc-modified-id="5.4.-Bytes-are-inmutable-1.5.4"><span class="toc-item-num">1.5.4 </span>5.4. Bytes are inmutable</a></span></li></ul></li><li><span><a href="#6.-Bytearray" data-toc-modified-id="6.-Bytearray-1.6"><span class="toc-item-num">1.6 </span>6. <a href="http://ze.phyr.us/bytearray/" target="_blank">Bytearray</a></a></span></li><li><span><a href="#7.-Arrays" data-toc-modified-id="7.-Arrays-1.7"><span class="toc-item-num">1.7 </span>7. <a href="https://docs.python.org/3/library/array.html" target="_blank">Arrays</a></a></span><ul class="toc-item"><li><span><a href="#Element-access" data-toc-modified-id="Element-access-1.7.1"><span class="toc-item-num">1.7.1 </span>Element access</a></span></li><li><span><a href="#Slice-access" data-toc-modified-id="Slice-access-1.7.2"><span class="toc-item-num">1.7.2 </span>Slice access</a></span></li><li><span><a href="#Appending-elements" data-toc-modified-id="Appending-elements-1.7.3"><span class="toc-item-num">1.7.3 </span>Appending elements</a></span></li><li><span><a href="#Concatenating-arrays" data-toc-modified-id="Concatenating-arrays-1.7.4"><span class="toc-item-num">1.7.4 </span>Concatenating arrays</a></span></li><li><span><a href="#Deleting-elements" data-toc-modified-id="Deleting-elements-1.7.5"><span class="toc-item-num">1.7.5 </span>Deleting elements</a></span></li></ul></li></ul></li></ul></div>
## 3. [0] [Sets](https://docs.python.org/3.7/library/stdtypes.html#set-types-set-frozenset)
[Sets](https://en.wikipedia.org/wiki/Set_%28abstract_data_type%29) are implemented as [hash table](https://en.wikipedia.org/wiki/Hash_table) of (unordered) objects, therefore sets are good for get/set/delete/searching items and bad for . Sets do not support indexing, slicing, or other sequence-like behavior.
```
a = {1, 2, 'a', (1, 2)}
a
print(type(a))
help(a)
```
### 3.2. [0] Sets can grow (O(1))
```
a.add('a')
print(a)
```
### 3.3. [0] Sets can not contain dupplicate objects
```
a.add('a')
print(a)
```
### 3.4. [1] Sets can not contain mutable objects
Mutable objects can not be hashed :-(
```
a = set()
a.add([1,2]) # Sets can not contain lists
a = set() # Empty set
a.add({1,2,3}) # Sets can not contain sets
```
### 3.5 [0] Intersection of sets (O(min(len(s), len(t)))
```
a = {1,2,3}
b = {2,3,4}
a.intersection(b)
```
### 3.6 [0] Union of sets (O(len(s)+len(t)))
```
a.union(b)
```
### 3.7. [0] Sets are MUCH more [efficient for searching by content](https://wiki.python.org/moin/TimeComplexity) than lists
```
a = set(range(1000))
print(a)
%timeit '0' in a
a = list(range(1000))
print(a)
%timeit '0' in a
```
| github_jupyter |
```
from numpy.random import seed
import numpy as np
class AdalineSGD(object):
def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
if random_state:
seed(random_state)
def fit(self, X, y):
self._initialize_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
for xi, target in zip(X, y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost)/len(y)
self.cost_.append(avg_cost)
return self
def partial_fit(self, X, y):
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def _shuffle(self, X, y):
r = np.random.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self, m):
self.w_ = np.zeros(1 + m)
self.w_initiaized = True
def _update_weights(self, xi, target):
"""Apply Adaline learning rule to update the weights"""
output = self.net_input(xi)
error = (target - output)
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta * error
cost = 0.5 * error**2
return cost
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:] + self.w_[0])
def activation(self, X):
"""Compute linear activation"""
return self.net_input(X)
def predict(self, X):
"""return class label after unit step"""
return np.where(self.activation(X) >= 0.0, 1, -1)
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/rasbt/python-machine-learning-book/master/code/datasets/iris/iris.data', header=None)
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0, 2]].values
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier, resolution=0.02):
# marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the desicion surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
ada = AdalineSGD(n_iter=15, eta=0.01, random_state=1)
ada.fit(X_std, y)
print(X_std[0, :].shape[]
print(X_std[1, :])
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Stochastic Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Averag Cost')
plt.show()
```
| github_jupyter |
# Tutorial 01: Introduction
*Authors: Zach del Rosario*
---
This is an introduction to `py_grama`, a toolset for building and anlyzing models in Python.
**Learning Goals**: By completing this notebook, you will learn:
1. How to install `py_grama`
1. How to run `py_grama` in a Jupyter notebook
1. *grama* verb classes and how to recognize them
1. The key elements of a `py_grama` model and how to find them
1. How to program with *pipes*
**Prerequisites**:
- Familiarity with the Python programming language
**Table of Contents**:
1. [Setup](#s1)
1. [Installation](#s1.1)
1. [Jupyter](#s1.2)
1. [The *grama* language](#s2)
1. [Objects](#s2.1)
1. [`py_grama` models](#s2.2)
1. [Verbs](#s2.3)
1. [Pipes](#s2.4)
## Setup <a id="s1"></a>
In this first section, we will set up a Python environment and install `py_grama`.
---
### Installation <a id="s1.1"></a>
Requirements:
- A *terminal*
+ OS X: The *Terminal* app
+ Windows: Use *Anaconda Prompt*, installed with Anaconda Python
+ Linux: (You got this)
- Python + scientific computing packages. We recommend [Anaconda](https://www.anaconda.com/distribution/) 3.X.
- `py_grama`, hosted at [this public GitHub repo](https://github.com/zdelrosario/py_grama). Download the repository and run from terminal:
```bash
$ cd py_grama/
$ python setup.py install
$ pip install -r requirements.txt
```
If your installation is ready, you should be able to run the following code in your terminal:
```bash
$ python
```
Which will start the *Python interpreter*, in which you should be able to run:
```python
>>> import grama as gr
```
Which imports `py_grama`. Press `Control + D` to exit the Python interpreter.
### Jupyter <a id="s1.2"></a>
If you're already reading this Jupyter notebook locally, feel free to skip this section.
#### Starting Jupyter
If you're reading this notebook in your browser, you might not be aware that this *Jupyter notebook* is actually *executable*. Using your terminal, navigate to the directory with the tutorial notebooks:
```bash
$ cd py_grama/tutorials/
```
and run Jupyter with the following command:
```bash
$ jupyter notebook
```
This will open up a page in your browser. Click on `t01_introduction_exercise.ipynb` to continue this tutorial.
#### Working in Jupyter
Jupyter notebooks are organized into *cells*, and each cell is one of two types:
- **Markdown** cells contain formatted text. This cell is a Markdown cell.
- **Code** cells contain *executable python code*. The cell below is a Code cell.
You can *execute* a cell by click-selecting it (shown with a rounded rectangle to the left) and pressing `Control + Enter`. See [this tutorial](https://mybinder.org/v2/gh/ipython/ipython-in-depth/master?filepath=binder/Index.ipynb) for more information on Jupyter.
#### __Q1: Execute a cell__
Select the code cell below and execute it. You should see `model: Cantilever Beam` plus a bunch of information about the model. If not, make sure your installation is valid.
```
import grama as gr
from grama.models import make_cantilever_beam
md = make_cantilever_beam()
md.printpretty()
```
## The *grama* language <a id="s2"></a>
---
### Objects <a id="s2.1"></a>
The *grama* language has *verbs* which take action on *objects*. The primary object types of interest are:
- Data, implemented as [Pandas](https://pandas.pydata.org/) `DataFrame`s
- Models, implemented as `py_grama` `Model`s
Data are used to inform models, and models can be used to generate new data. Models also have a number of attributes: The most important are summarized below.
### `py_grama` Models <a id="s2.2"></a>
Models in `py_grama` have both *functions* and *inputs*. When studying a model, it is important to first get the *basic facts* before attempting a more involved analysis. The `printpretty()` method gives these basic facts about a model.
#### __Q2: Get the basic facts__
Use the `printpretty()` method on the model loaded below to get the basic facts on `md_ishigami`. *Hint:* We used `printpretty()` in a cell above.
```
###
# TASK: Summarize a model
# TODO: Use the printpretty() method on md_ishigami
###
# -- NO NEED TO MODIFY THIS ----
from grama.models import make_ishigami
md_ishigami = make_ishigami()
# -- UNCOMMENT AND COMPLETE THIS CODE ----
# md_ishigami
```
Let's take apart these basic facts:
1. `model:` precedes the *model name*.
1. `inputs:` precedes the complete list of model inputs. These are further organized into:
1. *variables*: These are inputs which enter into the model's `functions`. There are two kinds of variables:
1. `var_det:` Deterministic variables which take fixed, user-selected values. See `a, b` above.
1. `var_rand:` Random variables which are inherently uncertain, see `x1, x2, x3` above. These are listed with distribution information.
1. *parameters*: These characterize the distributions for the random variables. They are quantities like `loc, scale` above.
1. `functions:` precedes the complete list of model functions. Each function lists the variables it takes and the *outputs* it returns. The Ishigami model has just one function `fun`.
### Verbs <a id="s2.3"></a>
The *grama* verbs are organized into different classes based on their primary input and output object type. In `py_grama` these verb classes are identified by their prefixes:
| Verb Type | Prefix (Short) | In | Out |
| --------- | --------------- | ---- | ----- |
| Evaluate | `eval_` (`ev_`) | `md` | `df` |
| Fit | `fit_` (`ft_`) | `df` | `md` |
| Transform | `tran_` (`tf_`) | `df` | `df` |
| Compose | `comp_` (`cp_`) | `md` | `md` |
| Plot | `plot_` (`pt_`) | `df` | (Plot) |
For readability, we recommend naming `DataFrames` with the `df_` prefix, and models with the `md_` prefix. Let's take a look at some of the *verbs* available in `py_grama`.
```
### EXECUTE THIS CELL
df_sinews = gr.eval_sinews(md_ishigami, df_det="nom")
gr.plot_auto(df_sinews)
```
The verb `gr.eval_sinews` is an Evaluation, so it takes a model and returns a dataframe. This function generates a "sinew" analysis, which sweeps across the random variables in directions parallel to their coordinate axes. The function repeats these sweeps a number of times from random starting locations. This gives us a sense of how the function behaves in each of its (random) variables, contextualized by other possibilities due to the starting location of the sweep.
The verb `gr.plot_auto` is a utility function that takes `py_grama` results and quickly constructs a visualization. This is not intended to produce presentation-quality graphics, but rather as a tool to quickly post-process data.
Based on these results, it appears that both `x1` and `x2` have some sinusoidal impact on the response, while `x3` has quadratic behavior.
### Pipes <a id="s2.4"></a>
Note that each verb class has both a full (e.g. `eval_`) and short (e.g. `ev_`) prefix form. We have seen the full prefix versions; the short prefix verbs are *pipe-enabled*. Pipes allow us to use [*functional programming*](https://en.wikipedia.org/wiki/Functional_programming) patterns. One consequence of this pattern is that we can avoid assigning *intermediate variables*. For instance, we could perform a sensitivity analysis with the following code:
```python
df_hybrid = gr.eval_hybrid(md_ishigami, df_det="nom")
df_sobol = gr.tran_sobol(df_hybrid)
df_sobol
```
We could eliminate the intermediate variables by using the *pipe operator* `>>` via:
```python
(
md_ishigami
>> gr.ev_hybrid(df_det="nom")
>> gr.tf_sobol()
)
```
The `>>` operator essentially takes the output from the left and inserts it as the first argument to the function on the right. Note above that we replaced the prefixes with their short forms. Also note that we enclose the operations in parentheses; if we did not use paren, then we would need to use line continuation characters `\` at the end of each line.
#### __Q3: Functionalize__
Translate the following code into functional form using the pipe operator `>>`.
```python
df_sinews = gr.eval_sinews(md_ishigami, df_det="nom")
gr.plot_auto(df_sinews)
```
```
###
# TASK: Functionalize
# TODO: Use the pipe operator >> to translate this code
###
# -- UNCOMMENT AND COMPLETE THIS CODE ----
# df_sinews = gr.eval_sinews(md_ishigami, df_det="nom")
# gr.plot_auto(df_sinews)
```
There is a great deal more information on the *grama* language available in the [documentation](https://py-grama.readthedocs.io/en/latest/source/language.html), but this is beyond the scope of the present tutorial.
# Next Steps
When you are done, please take [this survey](https://docs.google.com/forms/d/e/1FAIpQLSdRhycrA9sQhElByOaF1CG_7-Bw9eCby4408o1gnhDMUBNZeg/viewform?usp=pp_url&entry.923399158=4088579) about the exercise.
When you're ready, move on to [Tutorial 02: Exploratory Model Analysis](https://github.com/zdelrosario/py_grama/blob/master/tutorials/t02_explore_assignment.ipynb).
| github_jupyter |
# Data Object Service Demo
This notebook demonstrates how to use the demonstration server and client to make a simple Data Object service that makes available data from a few different sources.
## Installing the Python package
First, we'll install the Data Object Service Schemas package from PyPi, it includes a Python client and demonstration server.
```
!pip install ga4gh-dos-schemas
```
## Running the server
Once you've installed the PyPi package, you can run the demonstration server using `ga4gh_dos_server`. Open this in a separate terminal.
You should see something like:
```
$ ga4gh_dos_server
* Serving Flask app "ga4gh.dos.server" (lazy loading)
* Environment: production
WARNING: Do not use the development server in a production environment.
Use a production WSGI server instead.
* Debug mode: on
* Running on http://0.0.0.0:8080/ (Press CTRL+C to quit)
* Restarting with stat
* Debugger is active!
* Debugger PIN: 192-487-366
```
Your DOS is now ready to accept requests to Create, Get, and List Data Objects!
## Using the Client to Access the Demo Server
We can now use the Python client to create a simple Data Object. The same could be done using cURL or wget.
```
from ga4gh.dos.client import Client
client = Client("http://localhost:8080/ga4gh/dos/v1")
c = client.client
models = client.models
```
At first, the service will not present any Data Objects.
```
c.ListDataObjects().result()
```
We can now create an simple Data Object representing a file.
```
!echo "Hello DOS" > dos.txt
!md5sum dos.txt
DataObject = models.get_model('DataObject')
Checksum = models.get_model('Checksum')
URL = models.get_model('URL')
hello_object = DataObject()
# Set the Data Object metadata
hello_object.id = 'test'
hello_object.checksums = [Checksum(checksum="976feb684cfdb4b2337530699e1d0fbd", type="md5")]
hello_object.urls = [URL(url="file://dos.txt")]
hello_object.name = 'dos.txt'
# Post the Data Object to the service
c.CreateDataObject(body={'data_object': hello_object}).result()
# Get the resulting created object
c.GetDataObject(data_object_id='test').result()
```
## Using DOS With Reference FASTAs
A useful Data Object Service might present a list of available reference FASTAs for performing downstream alignment and analysis.
We'll index the UCSC human reference FASTAs into DOS as an example.
```
!wget http://hgdownload.cse.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz
!md5sum chr22.fa.gz
# Adding a second URL because FTP is preferred
chr22 = DataObject()
chr22.id = 'hg38-chr22'
chr22.name = 'chr22.fa.gz'
chr22.urls = [
URL(url='http://hgdownload.cse.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz'),
URL(url='ftp://hgdownload.cse.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz')]
chr22.checksums = [Checksum(checksum='41b47ce1cc21b558409c19b892e1c0d1', type='md5')]
chr22.aliases = ['NC_000022', 'CM000684']
chr22.size = '12255678'
# Add the chr22 Data Object to the service
c.CreateDataObject(body={'data_object': chr22}).result()
c.GetDataObject(data_object_id='hg38-chr22').result()
```
## Using DOS with htsget
Data Objects are meant to represent versioned artifacts and can represent an API resource. For example, we could use DOS as a way of exposing htsget resources.
In the [htsget Quickstart documentation](https://htsget.readthedocs.io/en/stable/quickstart.html) a link is made to the following snippet, which will stream the BAM results to the client.
```
!htsget http://htsnexus.rnd.dnanex.us/v1/reads/BroadHiSeqX_b37/NA12878 \
--reference-name=2 --start=1000 --end=20000 -O NA12878_2.bam
!md5sum NA12878_2.bam
!ls -al NA12878_2.bam
na12878_2 = DataObject()
na12878_2.id = 'na12878_2'
na12878_2.name = 'NA12878_2.bam'
na12878_2.checksums = [Checksum(checksum='eaf80af5e9e54db5936578bed06ffcdc', type='md5')]
na12878_2.urls = [
URL(
url="http://htsnexus.rnd.dnanex.us/v1/reads/BroadHiSeqX_b37/NA12878",
system_metadata={'reference_name': 2, 'start': 1000, 'end': 20000})]
na12878_2.aliases = ['NA12878 chr 2 subset']
na12878_2.size = '555749'
c.CreateDataObject(body={'data_object': na12878_2}).result()
c.GetDataObject(data_object_id='na12878_2').result()
```
## Using DOS with S3
One of the original intentions of DOS is to create an interoperability layer over the various object stores. We can create Data Objects that point to items in S3 so that subsequent downloaders can find them.
Using [dos_connect](https://github.com/ohsu-comp-bio/dos_connect), a DOS hosting the 1kgenomes s3 data is available.
```
client_1kg = Client('http://ec2-52-26-45-130.us-west-2.compute.amazonaws.com:8080/ga4gh/dos/v1/')
c1kg = client_1kg.client
do_1kg = c1kg.ListDataObjects().result().data_objects[0]
print(do_1kg.urls[0].url)
print(do_1kg.checksums[0])
print(do_1kg.id)
```
We can now use an S3 downloader to retrieve the file and confirm the checksum.
```
!dos-downloader http://ec2-52-26-45-130.us-west-2.compute.amazonaws.com:8080/ga4gh/dos/v1/ b3549308-9dd0-4fdb-92b2-5a2697521354 --aws_secret_key $aws_secret_access_key --aws_access_key $aws_access_key_id
```
## DOS GDC Data
Another demonstration in this repository asks you to create a DOS of the NCI GDC data. This process has been automated as part of a lambda: dos-gdc-lambda.
```
cgdc = Client("https://dos-gdc.ucsc-cgp-dev.org/")
gdc_do = cgdc.client.ListDataObjects().result().data_objects[0]
print(gdc_do.name)
print(gdc_do.size)
print(gdc_do.urls[0].url)
```
## DAS DOS
UCSC Genome Browser makes available a service for getting sequence by region from named FASTA. Working with DOS is pretty easy.
Both of these APIs allow one to make further range queries against the result.
https://genome.ucsc.edu/FAQ/FAQdownloads.html#download23
```
chr22.urls.append(URL(url='http://genome.ucsc.edu/cgi-bin/das/hg19/dna?segment=chr22:15000,16000'))
chr22.urls.append(URL(url='http://togows.org/api/ucsc/hg38/chr22:15000-16000.fasta'))
c.UpdateDataObject(body={'data_object': chr22}, data_object_id=chr22.id).result()
response_chr22 = c.GetDataObject(data_object_id=chr22.id).result().data_object
# Note the change in version, in DOS versions are just arbitrary strings
print(response_chr22.version, chr22.version)
url_1 = response_chr22.urls[2].url
url_2 = response_chr22.urls[3].url
!wget $url_1
!head dna?segment=chr22:15000,16000
!wget $url_2
!head chr22:15000-16000.fasta
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#はじめに" data-toc-modified-id="はじめに-1"><span class="toc-item-num">1 </span>はじめに</a></span><ul class="toc-item"><li><span><a href="#研究の目的" data-toc-modified-id="研究の目的-1.1"><span class="toc-item-num">1.1 </span>研究の目的</a></span></li><li><span><a href="#研究の動機" data-toc-modified-id="研究の動機-1.2"><span class="toc-item-num">1.2 </span>研究の動機</a></span></li></ul></li><li><span><a href="#基本的事項" data-toc-modified-id="基本的事項-2"><span class="toc-item-num">2 </span>基本的事項</a></span><ul class="toc-item"><li><span><a href="#Emacs" data-toc-modified-id="Emacs-2.1"><span class="toc-item-num">2.1 </span>Emacs</a></span></li><li><span><a href="#Ruby" data-toc-modified-id="Ruby-2.2"><span class="toc-item-num">2.2 </span>Ruby</a></span></li><li><span><a href="#RubyGems" data-toc-modified-id="RubyGems-2.3"><span class="toc-item-num">2.3 </span>RubyGems</a></span></li><li><span><a href="#Keybind" data-toc-modified-id="Keybind-2.4"><span class="toc-item-num">2.4 </span>Keybind</a></span></li><li><span><a href="#CUI(Character-User-Interface)" data-toc-modified-id="CUI(Character-User-Interface)-2.5"><span class="toc-item-num">2.5 </span>CUI(Character User Interface)</a></span></li><li><span><a href="#使用したgemファイル" data-toc-modified-id="使用したgemファイル-2.6"><span class="toc-item-num">2.6 </span>使用したgemファイル</a></span><ul class="toc-item"><li><span><a href="#diff-lcs" data-toc-modified-id="diff-lcs-2.6.1"><span class="toc-item-num">2.6.1 </span>diff-lcs</a></span></li><li><span><a href="#Thor" data-toc-modified-id="Thor-2.6.2"><span class="toc-item-num">2.6.2 </span>Thor</a></span></li><li><span><a href="#Minitest" data-toc-modified-id="Minitest-2.6.3"><span class="toc-item-num">2.6.3 </span>Minitest</a></span></li><li><span><a href="#FileUtils" data-toc-modified-id="FileUtils-2.6.4"><span class="toc-item-num">2.6.4 </span>FileUtils</a></span></li><li><span><a href="#open3" data-toc-modified-id="open3-2.6.5"><span class="toc-item-num">2.6.5 </span>open3</a></span></li><li><span><a href="#Bundler" data-toc-modified-id="Bundler-2.6.6"><span class="toc-item-num">2.6.6 </span>Bundler</a></span></li><li><span><a href="#Rubocop" data-toc-modified-id="Rubocop-2.6.7"><span class="toc-item-num">2.6.7 </span>Rubocop</a></span></li></ul></li></ul></li><li><span><a href="#editor_learnerの概要" data-toc-modified-id="editor_learnerの概要-3"><span class="toc-item-num">3 </span>editor_learnerの概要</a></span><ul class="toc-item"><li><span><a href="#Installation" data-toc-modified-id="Installation-3.1"><span class="toc-item-num">3.1 </span>Installation</a></span><ul class="toc-item"><li><span><a href="#githubによるinstall" data-toc-modified-id="githubによるinstall-3.1.1"><span class="toc-item-num">3.1.1 </span>githubによるinstall</a></span></li><li><span><a href="#gemによるinstall" data-toc-modified-id="gemによるinstall-3.1.2"><span class="toc-item-num">3.1.2 </span>gemによるinstall</a></span></li></ul></li><li><span><a href="#uninstall" data-toc-modified-id="uninstall-3.2"><span class="toc-item-num">3.2 </span>uninstall</a></span><ul class="toc-item"><li><span><a href="#githubからinstallした場合のuninstall方法" data-toc-modified-id="githubからinstallした場合のuninstall方法-3.2.1"><span class="toc-item-num">3.2.1 </span>githubからinstallした場合のuninstall方法</a></span></li><li><span><a href="#gemからinstallした場合のuninstall方法" data-toc-modified-id="gemからinstallした場合のuninstall方法-3.2.2"><span class="toc-item-num">3.2.2 </span>gemからinstallした場合のuninstall方法</a></span></li></ul></li><li><span><a href="#動作環境" data-toc-modified-id="動作環境-3.3"><span class="toc-item-num">3.3 </span>動作環境</a></span><ul class="toc-item"><li><span><a href="#error時の対処法" data-toc-modified-id="error時の対処法-3.3.1"><span class="toc-item-num">3.3.1 </span>error時の対処法</a></span></li></ul></li><li><span><a href="#初期設定" data-toc-modified-id="初期設定-3.4"><span class="toc-item-num">3.4 </span>初期設定</a></span></li><li><span><a href="#delete" data-toc-modified-id="delete-3.5"><span class="toc-item-num">3.5 </span>delete</a></span></li><li><span><a href="#random_h.rbとsequential_h.rb" data-toc-modified-id="random_h.rbとsequential_h.rb-3.6"><span class="toc-item-num">3.6 </span>random_h.rbとsequential_h.rb</a></span></li><li><span><a href="#random_checkの動作" data-toc-modified-id="random_checkの動作-3.7"><span class="toc-item-num">3.7 </span>random_checkの動作</a></span></li><li><span><a href="#sequential_checkの動作" data-toc-modified-id="sequential_checkの動作-3.8"><span class="toc-item-num">3.8 </span>sequential_checkの動作</a></span></li></ul></li><li><span><a href="#実装コードの解説" data-toc-modified-id="実装コードの解説-4"><span class="toc-item-num">4 </span>実装コードの解説</a></span><ul class="toc-item"><li><span><a href="#起動時に毎回動作するプログラム" data-toc-modified-id="起動時に毎回動作するプログラム-4.1"><span class="toc-item-num">4.1 </span>起動時に毎回動作するプログラム</a></span><ul class="toc-item"><li><span><a href="#プログラム内のインスタンス変数の概要" data-toc-modified-id="プログラム内のインスタンス変数の概要-4.1.1"><span class="toc-item-num">4.1.1 </span>プログラム内のインスタンス変数の概要</a></span></li><li><span><a href="#Fileの作成" data-toc-modified-id="Fileの作成-4.1.2"><span class="toc-item-num">4.1.2 </span>Fileの作成</a></span></li></ul></li><li><span><a href="#ファイル削除処理delete" data-toc-modified-id="ファイル削除処理delete-4.2"><span class="toc-item-num">4.2 </span>ファイル削除処理delete</a></span></li><li><span><a href="#random_check" data-toc-modified-id="random_check-4.3"><span class="toc-item-num">4.3 </span>random_check</a></span></li><li><span><a href="#sequential_check" data-toc-modified-id="sequential_check-4.4"><span class="toc-item-num">4.4 </span>sequential_check</a></span><ul class="toc-item"><li><span><a href="#インスタンス定数に格納されたパス" data-toc-modified-id="インスタンス定数に格納されたパス-4.4.1"><span class="toc-item-num">4.4.1 </span>インスタンス定数に格納されたパス</a></span></li><li><span><a href="#動作部分" data-toc-modified-id="動作部分-4.4.2"><span class="toc-item-num">4.4.2 </span>動作部分</a></span></li></ul></li><li><span><a href="#新しいターミナルを開くopen_terminal" data-toc-modified-id="新しいターミナルを開くopen_terminal-4.5"><span class="toc-item-num">4.5 </span>新しいターミナルを開くopen_terminal</a></span></li></ul></li><li><span><a href="#他のソフトとの比較" data-toc-modified-id="他のソフトとの比較-5"><span class="toc-item-num">5 </span>他のソフトとの比較</a></span><ul class="toc-item"><li><span><a href="#PTYPING" data-toc-modified-id="PTYPING-5.1"><span class="toc-item-num">5.1 </span>PTYPING</a></span></li><li><span><a href="#e-typing" data-toc-modified-id="e-typing-5.2"><span class="toc-item-num">5.2 </span>e-typing</a></span></li><li><span><a href="#寿司打" data-toc-modified-id="寿司打-5.3"><span class="toc-item-num">5.3 </span>寿司打</a></span></li><li><span><a href="#考察" data-toc-modified-id="考察-5.4"><span class="toc-item-num">5.4 </span>考察</a></span></li></ul></li><li><span><a href="#総括" data-toc-modified-id="総括-6"><span class="toc-item-num">6 </span>総括</a></span></li><li><span><a href="#謝辞" data-toc-modified-id="謝辞-7"><span class="toc-item-num">7 </span>謝辞</a></span></li><li><span><a href="#付録" data-toc-modified-id="付録-8"><span class="toc-item-num">8 </span>付録</a></span></li><li><span><a href="#参考文献" data-toc-modified-id="参考文献-9"><span class="toc-item-num">9 </span>参考文献</a></span></li></ul></div>
# はじめに
## 研究の目的
editor_learnerの開発の大きな目的はeditor(Emacs)操作,CUI操作(キーバインドなど),Ruby言語の習熟とタイピング速度の向上である.editor上で動かすためファイルの開閉,保存,画面分割といった基本操作を習熟することができ,Ruby言語のプログラムを写経することでRuby言語の習熟へと繋げる.更にコードを打つことで正しい運指を身につけタイピング速度の向上も図っている.コードを打つ際にキーバインドを利用することでGUIではなくCUI操作にも適応していく.これら全てはプログラマにとって作業を効率化させるだけでなく,プログラマとしての質の向上につながる.
## 研究の動機
初めはタッチタイピングを習得した経験を活かして,西谷によって開発されたshunkuntype(ターミナル上で実行するタイピングソフト)の再開発をテーマにしていたが,これ以上タイピングに特化したソフトを開発しても同じようなものがWeb上に大量に転がっており,そのようなものをいくつも開発しても意味がないと考えた.そこで西谷研究室ではタイピング,Ruby言語,Emacsによるeditor操作,CUI操作の習熟が作業効率に非常に大きな影響を与えるので習熟を勧めている.そこで西谷研究室で使用されているeditorであるEmacs操作,Ruby言語の学習,タイピング速度,正確性の向上,CUI操作.これらの習熟を目的としたソフトを開発しようと考えた.
# 基本的事項
## Emacs
本研究において使用するeditorはEmacsである.ツールはプログラマ自身の手の延長である.これは他のどのようなソフトウェアツールよりもEditorに対して当てはまる.テキストはプログラミングにおける最も基本的な生素材なので,できる限り簡単に操作できる必要があります.
そこで西谷研究室で勧められているEmacsの機能については以下の通りである,
1. 設定可能である. フォント,色,ウィンドウサイズ,キーバインドを含めた全ての外見が好みに応じて設定できるようになっていること.通常の操作がキーストロークだけで行えると,手をキーボードから離す必要がなくなり,結果的にマウスやメニュー駆動型のコマンドよりも効率的に操作できるようになります
1. 拡張性がある. 新しいプログラミング言語が出てきただけで,使い物にならなくなるようなエディタではなく,どんな新しい言語やテキスト形式が出てきたとしても,その言語の意味合いを「教え込む」ことが可能です
1. プログラム可能であること. 込み入った複数の手順を実行できるよう,Editorはプログラム可能であることが必須である.
これらの機能は本来エディタが持つべき基本的な機能である.これらに加えてEmacsは,
1. 構文のハイライト Rubyの構文にハイライトを入れたい場合はファイル名の後に.rbと入れることでRubyモードに切り替わり構文にハイライトを入れることが可能になる.
1. 自動インデント. テキストを編集する際,改行時に自動的にスペースやタブなどを入力しインデント調整を行ってくれる.
などのプログラミング言語に特化した特徴を備えています.強力なeditorを習熟することは生産性を高めることに他ならない.カーソルの移動にしても,1回のキー入力で単語単位,行単位,ブロック単位,関数単位でカーソルを移動させることができれば,一文字ずつ,あるいは一行ずつ繰り返してキー入力を行う場合とは効率が大きく変わってきます.Emacsはこれらの全ての機能を孕んでいてeditorとして非常に優秀である.よって本研究はEmacsをベースとして研究を進める.
## Ruby
Rubyの基本的な説明は以下の通り,Rubyはまつもとゆきひろにより開発されたオブジェクト指向スクリプト言語であり,スクリプト言語が用いられてきた領域でのオブジェクト指向プログラミングを実現する言語である.
本研究はRuby言語を使用しています.大きな理由としては
* 構文の自由度が高く,記述量が少なくて済む.
* 強力な標準ライブラリが備えられている.
Rubyは変数の型付けがないため,記述量を少なく済ませることができ,"gem"という形式で公開されているライブラリが豊富かつ強力なので本研究はRuby言語を使用しました.
## RubyGems
Rubygemの基本的な説明は以下の通り,RubyGemsは,Ruby言語用のパッケージ管理システムであり,Rubyのプログラムと("gem"と呼ばれる)ライブラリの配布用標準フォーマットを提供している.gemを容易に管理でき,gemを配布するサーバの機能も持つ.
本研究ではRubyGemsのgemを利用してファイル操作やパスの受け取りなどを行い,本研究で開発したソフトもgemに公開してある.
## Keybind
Keybindの基本的な説明は以下の通り,押下するキー(単独キーまたは複数キーの組み合わせ)と,実行される機能との対応関係のことである.また,キーを押下したときに実行させる機能を割り当てる行為のことである.
以下controlを押しながらをc-と記述する.本研究におけるKeybindの習熟はCUI操作の習熟に酷似している.カーソル移動においてもGUIベースでマウスを使い行の先頭をクリックするより,CUIによりc-aを押すことで即座に行の先頭にカーソルを持っていくことができる.習熟するのであれば,どちらの方が早いかは一目瞭然である.本研究はKeybindの習熟によるCUI操作の適応で作業の効率化,高速化に重点を置いている.
## CUI(Character User Interface)
CUIは,キーボード等からの文字列を入力とし,文字列が表示されるウィンドウや古くはラインプリンタで印字される文字などを出力とする,ユーザインタフェースの様式で,GUI(Graphical User Interface)の対義語として使われる.
CUIとGUIにはそれぞれ大きな違いがある.GUIの利点は以下の通り,
* 文字だけでなくアイコンなどの絵も表示できる.
* 対象物が明確な点や,マウスで比較的簡単に操作できる.
* 即座に操作結果が反映される.
CUIの利点は以下の通り,
* コマンドを憶えていれば複雑な処理が簡単に行える.
* キーボードから手を離すことなく作業の高速化が行える.
今回GUIではなくCUI操作の習熟を目的にした理由は,
* コマンドを憶えることで作業効率が上がる.
* editor操作の習熟も孕んでいるから.
カーソル移動においてもGUIではなくCUI操作により,ワンコマンドで動かした方が効率的である.上記の理由から,GUIではなくCUI操作の習熟を目的としている.
## 使用したgemファイル
### diff-lcs
diff-lcsは,二つのファイルの差分を求めて出力してくれる.テキストの差分を取得するメソッドは,Diff::LCS.sdiff と Diff::LCS.diff の2つがある.複数行の文字列を比較した場合の2つのメソッドの違いは以下のとおり.
1. Diff::LCS.sdiff
1. 比較結果を1文字ずつ表示する
1. Diff::LCS.diff
1. 比較した結果,違いがあった行について,違いがあった箇所のみ表示する.
今回使用したのは後者(Diff:LCS.diff)である.理由は間違った部分だけを表示した方が見やすいと考えたからである.
### Thor
Thorは,コマンドラインツールの作成を支援するライブラリです.gitやbundlerのようなサブコマンドツールを簡単に作成することができます.
Thorの使用でサブコマンドを自然言語に近い形で覚えることができる.
### Minitest
Minitestはテストを自動化するためのテスト用のフレームワークである.Rubyにはいくつかのテスティングフレームワークがありますが,Minitestというフレームワークを利用した理由は以下の通りです.
1. Rubyをインストールすると一緒にインストールされるため,特別なセットアップが不要.
1. 学習コストが比較的低い.
1. Railsのデフォルトのテスティングフレームワークなので,Railsを開発するときにも知識を活かしやすい.
上記の理由から,sequential_checkではminitestを採用しております.
### FileUtils
再帰的な削除などの基本的なファイル操作を行うためのライブラリ
### open3
プログラムを実行し,そのプロセスの標準出力,標準入力,標準エラー出力にパイプをつなぐためのものである.
### Bundler
Bundlerはアプリケーション谷で依存するgemパッケージを管理するためのツールです.1つのシステム上で複数のアプリケーションを開発する場合や,デプロイ時にアプリケーションに紐付けてgemパッケージを管理したい場合に利用される.
### Rubocop
RubocopはRubyのソースコード解析ツールである.Rubyスタイルガイドや他のスタイルガイドに準拠しているかどうかを自動チェックしてくれるソフトウェアです.自分が打ち込んだ問題文となるソースコードのチェックに使用しました.
# editor_learnerの概要
## Installation
### githubによるinstall
githubによるインストール方法は以下の通りである.
1. "https://github.com/souki1103/editor_learner" へアクセス
1. Clone or downloadを押下,SSHのURLをコピー
1. コマンドラインにてgit clone(コピーしたURL)を行う
上記の手順で開発したファイルがそのまま自分のディレクトリにインストールされる.
### gemによるinstall
gemによるインストール方法は以下の通りである.
1. コマンドラインにてgem install editor_learnerと入力,実行
1. ファイルがホームディレクトの.rbenv/versions/2.4.0/lib/ruby/gems/2.4.0/gemsにeditor_learnerが収納される
これでeditor_learnerとコマンドラインで入力することで実行可能となる.
## uninstall
### githubからinstallした場合のuninstall方法
gituhubからinstallした場合のuninstall方法は以下の通りである.
1. ホームディレクトで
1. rm -rf editor_learnerを入力
1. ホームディレクトリからeditor_learnerが削除されていることを確認する.
以上がuninstall方法である.
### gemからinstallした場合のuninstall方法
gemからinstallした場合のuninstall方法は以下の通りである.
1. ターミナル上のコマンドラインで
1. gem uninstall editor_learnerを入力
1. ホームディレクトの.rbenv/versions/2.4.0/lib/ruby/gems/2.4.0/gemsにeditor_learnerが削除されていることを確認する.
以上がuninstall方法である.
## 動作環境
Rubyのversionが2.4.0以上でなければ動かない.理由としては,gemに格納されているパスを正しいく受け渡しできないからである.2.4.0以下で動作させるためにはeditor_learnerの最新versionのみを入れることによって動作することが確認できている.
### error時の対処法
errorが出た場合は以下の方法を試してください
1. rm -rf editor_learnerをコマンドラインで入力
これによりファイル生成によるバグを解消できる.もう一つの方法は
1. gem uninstall editor_learnerをコマンドラインで入力
1. 全てのversionをuninstallする.
1. 再度gem install editor_learnerで最新versionのみをinstallする.
上記の手順によりRubyのversionによるバグが解消されることが確認できている.現在起こるであろうと予想されるバグの解消法は上記の2つである.Rubyのversionが2.4.0以上であればなんの不具合もなく動作することが確認できている.
## 初期設定
特別な初期設定はほとんどないが起動方法は以下の通りである,
1. コマンドライン上にてeditor_learnerを入力する.
2. editor_learnerを起動することでホームディレクトリにeditor_learner/workshopと呼ばれるファイルが作成される.workshopは作業場という意味である.
2. workshopの中にquestion.rbとanswer.rb,random_h.rbとruby_1~ruby_6が作成され,ruby_1~ruby_6の中に1.rb~3.rbが作成されていることを確認する.

1. 起動すると以下のようなサブコマンドの書かれた画面が表示されることを確認する.
```
Commands:
editor_lerner delete [number~number]
editor_learner help [COMMAND]
editor_learner random_check
editor_leraner sequential_check [lesson_number] [1~3numbers]
```
1. editor_learnerの後にサブコマンドと必要に応じた引数を入力すると動作する.それぞれのサブコマンドの更に詳しい説明は以下の通りである.
## delete
editor_learnerを起動することで初期設定で述べたようにホームディレクトリにeditor_learner/workshopが作成される.deleteはworkshopに作成されたruby_1~ruby_6を削除するために作成されたものである.sequential_checkで1度プログラムを作成してしまうと再度実行するとIt have been finished!と表示されてしまうので,削除するコマンドを作成しました.コマンド例は以下の通りである.
コマンド例
1. editor_learner delete 1 3
上記のように入力することで1〜3までのファイルが削除される.サブコマンドの後の引数は2つの数字(char型)であり,削除するファイルの範囲を入力する.
## random_h.rbとsequential_h.rb
random_h.rbとsequential_h.rbが初期設定で作成され,editor_learnerを起動することで自動的に作成され,random_checkとsequential_checkを行う際に最初に開くファイルとなる.random_check用とsequential_check用に二つのファイルがある.random_check用のファイルは以下の通りである.
random_h.rb

上から順に説明すると,
1. question.rbを開くためにc-x 2で画面を2分割にする.
1. c-x c-fでquestion.rbを探して開く.
1. 次にanswer.rbを開くために画面を3分割する
1. 同様にc-x c-fでanswer.rbを探して開く.
1. c-x oでanswer.rbを編集するためにポインタを移動させる.
1. question.rbに書かれているコードをanswer.rbに写す.
これらの手順がrandom_h.rbに記述されている.全ての手順を終えたターミナルの状態は以下の通り,

上記の画像では,右上に問題であるquestion.rbが表示され,それを左上にあるanswer.rbに写す形となる.
次にsequential_h.rb

書かれている内容自体はrandom_h.rbとほとんど差異がないが,開くファイルの名前が違うため別のファイルとして作成された.この手順に沿って作業することになる.下に書かれているのは主要キーバインドであり,必要に応じて見て,使用する形となっている.上記の手順を行なったターミナル画面の状態はrandom_h.rbの最終形態を同じである.
## random_checkの動作
random_checkの動作開始から終了は以下の通りである.
1. コマンドライン上にてeditor_learne random_checkを入力
1. 新しいターミナル(ホームディレクトリ/editor_learner/workshopから始まる)が開かれる.
1. random_h.rbを開いてrandom_h.rbに沿ってquestion.rbに書かれているコードをanswer.rbに写す.
1. 前のターミナルに戻り,コマンドラインに"check"と入力することで正誤判定を行ってくれる.
1. 間違っていればdiff-lcsにより間違った箇所が表示される.
1. 正しければ新しいターミナルが開かれてから終了までの時間とIt have been finished!が表示され終了となる.
更に次回random_check起動時には前に書いたコードがanswer.rbに格納されたままなので全て削除するのではなく,前のコードの必要な部分は残すことができる.
random_checkの大きな目的はtyping速度,正確性の向上,editor操作やRuby言語の習熟に重点を置いている.いかに早く終わらせるかのポイントがtyping速度,正確性とeditor操作である.
## sequential_checkの動作
sequential_checkの動作開始から終了は以下の通りである.
1. コマンドライン上でeditor_learner sequential_check(1~6の数字) (1~3の数字)を入力
1. 新しいターミナル(ホームディレクトリ/editor_learner/workshop/ruby_(1~6の数字))が開かれる.
1. sequential_h.rbを開いてsequential_h.rbに沿ってq.rbに書かれている内容を第2引数の数字.rbに写す.
1. 前のターミナルに戻り,コマンドラインに"check"と入力することで正誤判定を行う.
1. 間違っていれば間違った箇所が表示される.再度q.rbと第2引数の数字.rbを開いて間違った箇所を修正する.
1. 正しければruby_1/1.rb is done!のように表示される.
sequential_checkは1~3の順に1.rbがリファクタリングや追加され2.rbになり,完成形が3.rbになるといった形式である.連続的なプログラムの完成までを写経するのでsequential_checkと名付けられた.
sequential_checkの大きな目的はリファクタリングによるRuby言語の学習とCUI操作によるキーバインドの習熟,タイピング速度,正確性の向上に重点を置いている.コードがリファクタリングされる様を写経することで自分自身でRubyのコードを書くときに他の人が見やすくなるようなコードが書けるようになる.
# 実装コードの解説
本章では,今回作成したプログラムをライブラリ化し継続的な発展が可能なようにそれぞれの処理の解説を記述する.
## 起動時に毎回動作するプログラム
editor_learnerを起動したときに自動に動く部分である.コードは以下の通りである.
```
def initialize(*args)
super
@prac_dir="#{ENV['HOME']}/editor_learner/workshop"
@lib_location = Open3.capture3("gem environment gemdir")
@versions = Open3.capture3("gem list editor_learner")
p @latest_version = @versions[0].chomp.gsub(' (', '-').gsub(')','')
@inject = File.join(@lib_location[0].chomp, "/gems/#{@latest_version}/lib")
if File.exist?(@prac_dir) != true then
FileUtils.mkdir_p(@prac_dir)
FileUtils.touch("#{@prac_dir}/question.rb")
FileUtils.touch("#{@prac_dir}/answer.rb")
FileUtils.touch("#{@prac_dir}/random_h.rb")
if File.exist?("#{@inject}/random_h.rb") == true then
FileUtils.cp("#{@inject}/random_h.rb", "#{@prac_dir}/random_h.rb")
elsif
FileUtils.cp("#{ENV['HOME']}/editor_learner/lib/random_h.rb", "#{@prac_dir}/random_h.rb")
end
end
range = 1..6
range_ruby = 1..3
range.each do|num|
if File.exist?("#{@prac_dir}/ruby_#{num}") != true then
FileUtils.mkdir("#{@prac_dir}/ruby_#{num}")
FileUtils.touch("#{@prac_dir}/ruby_#{num}/q.rb")
FileUtils.touch("#{@prac_dir}/ruby_#{num}/sequential_h.rb")
if File.exist?("#{@inject}/sequential_h.rb") == true then
FileUtils.cp("#{@inject}/sequential_h.rb", "#{@prac_dir}/ruby_#{num}/sequential_h.rb")
else
FileUtils.cp("#{ENV['HOME']}/editor_learner/lib/sequential_h.rb", "#{@prac_dir}/ruby_#{num}/sequential_h.rb")
end
range_ruby.each do|n|
FileUtils.touch("#{@prac_dir}/ruby_#{num}/#{n}.rb")
end
end
end
end
```
この部分は基本的にディレクトリやファイルの作成が主である.上から順に説明すると,@prac_dirはホームディレクトリ/editor_learner/workshopを指しており,ファイルを作る際のパスとして作成されたインスタンス定数である.その後の3つのインスタンス定数(@lib_location,@versions,@latest_version)はgemでinstallされた場合ファイルの場所がホームディレクトリ/.rbenv/versions/2.4.0/lib/ruby/gems/2.4.0/gemsのeditor_learnerに格納されているためgemでinstallした人とgithubでinstallした人とではパスが変わってしまうためこれらの3つのインスタンス定数を用意した.実際の振る舞いとしては,File.existによりprac_dirがなければディレクトリを作成しさらにその中にquestion.rbとanswer.rbを作成する.gemにリリースしていることからgemでinstallした人とgithubでinstallした人のパスの違いをif文で条件分岐させている.これによりrandom_h.rbを正常にコピーすることができた.
### プログラム内のインスタンス変数の概要
インスタンス変数は,'@'で始まる変数はインスタンス変数であり,特定のオブジェクトに所属しています.インスタンス変数はそのクラスまたはサブクラスのメソッドから参照できます.初期化されない孫スタンス変数を参照した時の値はnillです.
このメソッドで使用されているインスタンス変数は5つである.prac_dirはホームディレクトリ/editor_learner/workshopを指しており,必要なファイルをここに作るのでパスとして受け渡すインスタンス変数となっている.その後の4つのインスタンス変数はgemからinstallした場合における,editor_learnerが格納されているパスを受け渡すためのインスタンス変数である.一つずつの説明は以下の通り,
* lib_locationはターミナル上で"gem environment gemdir"を入力した場合に出力されるパスを格納している.(自分のターミナル場で実行すると/Users/souki/.rbenv/versions/2.4.0/lib/ruby/gems/2.4.0)
* versionsはgemでinstallされたeditor_learnerのversionを受け取るためのパスを格納したインスタンス変数である.
* latest_versionははversionsで受け取ったeditor_learnerのversionの最新部分のパスを格納したインスタンス変数である.
* injectは実際にこれらのパスをつなぎ合わせてできるgemでinstallされたeditor_learnerが格納されているパスが格納されているインスタン変数である.(自分の場合は/Users/souki/.rbenv/versions/2.4.0/lib/ruby/gems/2.4.0/gems/editor_learner-1.1.2となる)
### Fileの作成
全てのパスの準備が整ったら実際に作業する場所に必要なファイル(question.rbやanswer.rb)などの作成が行われる.本研究のコードではeditor_learner/workshopがホームディレクトリになければ作成する.さらに,その中にrandom_checkに必要なファイル(question.rb,answer.rb,random_h.rb)が作成される.random_h.rbはgemでinstallした場合はeditor_learnerの格納されている部分からコピーを行なっている.
次に,sequential_checkに必要なファイルを作成する.editor_learner/workshopにruby_1~ruby6がなければ作成し,その中に1.rb~3.rbとq.rb(問題をコピーするためのファイル)とsequential_h.rbが作成される.sequential_h.rbはrandom_h.rbと同じでgemからinstallした場合はeditor_learnerの格納されている部分からコピーを行なっている.このメソッドの大きな役割はファイル作成である.
## ファイル削除処理delete
sequential_checkで終了したchapterをもう一度したい場合に一度ファイルを削除しなければいけないので,deleteメソッドの大きな役割はsequential_checkで終了したファイルの削除である.
```
desc 'delete [number~number]', 'delete the ruby_file choose number to delet\
e file'
def delete(n, m)
range = n..m
range.each{|num|
if File.exist?("#{@prac_dir}/ruby_#{num}") == true then
system "rm -rf #{@prac_dir}/ruby_#{num}"
end
}
end
```
コード自体はいたってシンプルで引数を2つ受け取ることでその間の範囲のFileを削除するようなコードとなっている.systemの"rm -rf ファイル名"がファイルを削除するコマンドなのでそこで受け取った引数の範囲でファイルの削除を行っている.
## random_check
random_checkのコードは以下の通り,
```
desc 'random_check', 'ramdom check your typing and edit skill.'
def random_check(*argv)
random = rand(1..15)
p random
s = "#{random}.rb"
puts "check starting ..."
puts "type following commands on the terminal"
puts "> emacs question.rb answer.rb"
src_dir = File.expand_path('../..', __FILE__) # "Users/souki/editor_learner"
if File.exist?("#{@inject}/random_check_question/#{s}") == true then
FileUtils.cp("#{@inject}/random_check_question/#{s}", "#{@prac_dir}/question.rb")
elsif
FileUtils.cp(File.join(src_dir, "lib/random_check_question/#{s}"), "#{@prac_dir}/question.rb")
end
open_terminal
start_time = Time.now
loop do
a = STDIN.gets.chomp
if a == "check" && FileUtils.compare_file("#{@prac_dir}/question.rb", "#{@prac_dir}/answer.rb") == true then
puts "It have been finished!"
break
elsif FileUtils.compare_file("#{@prac_dir}/question.rb", "#{@prac_dir}/answer.rb") != true then
@inputdata = File.open("#{@prac_dir}/answer.rb").readlines
@checkdata = File.open("#{@prac_dir}/question.rb").readlines
diffs = Diff::LCS.diff("#{@inputdata}", "#{@checkdata}")
diffs.each do |diff|
p diff
end
end
end
end_time = Time.now
time = end_time - start_time - 1
puts "#{time} sec"
end
```
random_checkの概要を簡単に説明すると15個あるRubyのコードから1~15の乱数を取得し,選ばれた数字のファイルが問題としてコピーされて,それをanswer.rbに入力することで正解していたら新しいターミナルが開かれてから終了までの時間を評価する仕組みとなっている.
上から解説を行うと,1から15のrandomな乱数を取得,起動と同時にどのファイルがコピーされたか表示される.そして,src_dirでホームディレクトリ/editor_learnerのパスが代入される.そして,gemでinstallした人とgithubからcloneした場合によるファイルコピーのパスの違いをifで条件分岐.そして,1から15の乱数のファイルがquestion.rbにコピーされる.コピーされた後に新しいターミナルが開かれ,時間計測が開始される.そして,checkを前の画面に入力できるようにgetsを使った.初めにgetsだけを使用した時改行が入ってしまいうまく入力できなかった.しかし,chompを入れることで改行をなくすことに成功.しかし,argvとgetsを両方入れることが不可能なことが判明した.そこでgetsの前にSTDINを入れることでargvとの併用が可能なことがわかり,STDIN.gets.chompと入力することでキーボードからの入力を受け取ることができた.そして,checkが入力されてかつFileUtils.compareでファイルの比較で正しければ時間計測を終了し,表示する.間違っていた場合はインスタンス定数であるinputとoutputにquestion.rbとanswer.rbの中身が格納されてDiff::LCSのdiffによって間違っている箇所だけを表示する.一連のコード解説は以上である.
## sequential_check
sequential_checkの場合はリファクタリングにあたりたくさんのインスタンス定数を作った.コードは以下の通り,
```
desc 'sequential_check [lesson_number] [1~3number] ','sequential check your typing skill and edit skill choose number'
def sequential_check(*argv, n, m)
l = m.to_i - 1
@seq_dir = "lib/sequential_check_question"
q_rb = "ruby_#{n}/#{m}.rb"
@seqnm_dir = File.join(@seq_dir,q_rb)
@pracnm_dir = "#{ENV['HOME']}/editor_learner/workshop/ruby_#{n}/#{m}.rb"
@seqnq_dir = "lib/sequential_check_question/ruby_#{n}/q.rb"
@pracnq_dir = "#{ENV['HOME']}/editor_learner/workshop/ruby_#{n}/q.rb"
@seqnl_dir = "lib/sequential_check_question/ruby_#{n}/#{l}.rb"
@pracnl_dir = "#{ENV['HOME']}/editor_learner/workshop/ruby_#{n}/#{l}.rb"
puts "check starting ..."
puts "type following commands on the terminal"
src_dir = File.expand_path('../..', __FILE__)
if File.exist?("#{@inject}/sequential_check_question/ruby_#{n}/#{m}.rb") == true then
FileUtils.cp("#{@inject}/sequential_check_question/ruby_#{n}/#{m}.rb", "#{@pracnq_dir}")
elsif
FileUtils.cp(File.join(src_dir, "#{@seqnm_dir}"), "#{@pracnq_dir}")
end
if l != 0 && FileUtils.compare_file("#{@pracnm_dir}", "#{@pracnq_dir}") != true
FileUtils.compare_file("#{@pracnl_dir}", (File.join(src_dir, "#{@seqnl_dir}"))) == true
FileUtils.cp("#{@pracnl_dir}", "#{@pracnm_dir}")
end
if FileUtils.compare_file(@pracnm_dir, @pracnq_dir) != true then
system "osascript -e 'tell application \"Terminal\" to do script \"cd #{@prac_dir}/ruby_#{n} \" '"
loop do
a = STDIN.gets.chomp
if a == "check" && FileUtils.compare_file("#{@pracnm_dir}", "#{@pracnq_dir}") == true then
puts "ruby_#{n}/#{m}.rb is done!"
break
elsif FileUtils.compare_file("#{@pracnm_dir}", "#{@pracnq_dir}") != true then
@inputdata = File.open("#{@pracnm_dir}").readlines
@checkdata = File.open("#{@pracnq_dir}").readlines
diffs = Diff::LCS.diff("#{@inputdata}", "#{@checkdata}")
diffs.each do |diff|
p diff
end
end
end
else
p "ruby_#{n}/#{m}.rb is finished!"
end
end
```
### インスタンス定数に格納されたパス
インスタンス定数に格納されているパスについての説明は上から順に以下の通り,
1. seq_dirはgithubでcloneした人が問題をコピーするときに使うパスである.
1. seqnm_dirはその名の通りseq_dirに引数であるnとmを代入したパスである.例として引数に1と1が代入された時は以下の通り,
1. editor_learner/sequential_check_question/ruby_1/1.rbとなる.
1. pracnm_dirはprac_dirに二つの引数nとmを代入したものである.実際に作業するところのパスとして使用する.例として引数として1と1が代入された時は以下の通り,
1. ホームディレクトリ/editor_learner/workshop/ruby_1/1.rbが格納される.
1. 同様にseqとpracの後についている文字はその後のruby_(数字)/(数字).rbの数字に入る文字を後につけている.
### 動作部分
まずgemでinstallした場合とgithubでinstallした場合による違いを条件分岐によりパスを変えている.さらに1.rbが終了していた場合2.rbに1.rbをコピーした状態から始まるように処理が行われている.その後は"check"が入力された時かつFileUtils.compareで正解していれば終了.間違っていればDiff::LCSで間違っている箇所を表示.もう一度修正し,"check"を入力,正解していれば終了.以上が一連のコードの解説である.
## 新しいターミナルを開くopen_terminal
新しいターミナルを開くメソッドである.コードは以下の通りである.
```
def open_terminal
pwd = Dir.pwd
system "osascript -e 'tell application \"Terminal\" to do script \"cd #{@prac_dir} \" '"
end
```
新しく開かれたターミナルはprac_dir(editor_learner/workshop)のディレクトリからスタートするように設定されている.random_checkではeditor_learner/workshopでターミナルが開かれ,sequential_checkではeditor_learner/workshop/第1引数で入力されたファイルの場所が開かれるようになっている.
# 他のソフトとの比較
他のタイピングソフトとの比較を行った表が以下の通りである.

上記のタイピングソフトは自分もよく使っていたタイピングソフトであり,評価も高いソフトである.それぞれの特徴は以下の通り,
## PTYPING
PTYPINGは豊富なプログラム言語が入力可能である.しかし,コードを打つのではなく,コードに使われるintなどよく使われる単語が60秒の間にどれだけ打てるかというソフトです.
## e-typing
e-typingはインターネットで無料提供されているソフトである.ローマ字入力を基本として,単語,短文,長文の3部構成となっておりタイピングの資格取得の練習もできる.
## 寿司打
自分が一番利用したサイト,GUIベースによりローマ字入力を基本とし,打てば打つほど秒数が伸びていきどれだけ入力できるかをランキング形式で表示される.
## 考察
これら全てのソフトを利用した結果,editor_learnerはローマ字入力ができない点では他のソフトに遅れをとるが,実際にプログラムを書くようになってからコードを写経することで{}や()などといったローマ字入力ではあまり入力しないような記号の入力が非常に早くなった.さらに,editor_learnerは現段階ではRubyの学習のみだが,引数を変えて元となるプログラムを作成することで全てのプログラム言語を学ぶことができる.さらに,実際にコードを入力することができるソフトはたくさんあるが,実行可能なものは少ない(Webで行うものが大半を占めているから.)実際に西谷研究室でeditor_learnerで学習を行っていない学生と行った自分のrandom_check平均秒数は前者は200秒程なのに対して,自分は60秒程である.これらの結果からeditor_learnerによる学習により,Ruby言語の学習にもなり,タイピング速度,正確性の向上,CUI操作の適応による差が出たと考えた.
# 総括
実際に今までたくさんのタイピングソフトやプログラムコードの打てるタイピングソフトを数多く利用してきたが,editor操作の習熟が可能なソフトは見たことも聞いたこともなかった.実際にタイピングだけが早い学生はたくさんいるがeditor操作やキーバインドも使いこなせる学生は少なかった.本研究で開発したeditor_learnerによりそれらの技術も上達し,作業効率などの向上が見込める結果となった.
# 謝辞
本研究を行うにあたり,終始多大なるご指導,御鞭撻をいただいた西谷滋人教授に対し,深く御礼申し上げます.また,本研究の進行に伴い,様々な助力,知識の供給をいただきました西谷研究室の同輩,先輩方に心から感謝の意を示します.本当にありがとうございました.
# 付録
```
require 'fileutils'
require 'colorize'
require 'thor'
require "editor_learner/version"
require 'diff-lcs'
require "open3"
module EditorLearner
class CLI < Thor
def initialize(*args)
super
@prac_dir="#{ENV['HOME']}/editor_learner/workshop"
@lib_location = Open3.capture3("gem environment gemdir")
@versions = Open3.capture3("gem list editor_learner")
p @latest_version = @versions[0].chomp.gsub(' (', '-').gsub(')','')
@inject = File.join(@lib_location[0].chomp, "/gems/#{@latest_version}/lib")
if File.exist?(@prac_dir) != true then
FileUtils.mkdir_p(@prac_dir)
FileUtils.touch("#{@prac_dir}/question.rb")
FileUtils.touch("#{@prac_dir}/answer.rb")
FileUtils.touch("#{@prac_dir}/random_h.rb")
if File.exist?("#{@inject}/random_h.rb") == true then
FileUtils.cp("#{@inject}/random_h.rb", "#{@prac_dir}/random_h.rb")
elsif
FileUtils.cp("#{ENV['HOME']}/editor_learner/lib/random_h.rb", "#{@prac_dir}/random_h.rb")
end
end
range = 1..6
range_ruby = 1..3
range.each do|num|
if File.exist?("#{@prac_dir}/ruby_#{num}") != true then
FileUtils.mkdir("#{@prac_dir}/ruby_#{num}")
FileUtils.touch("#{@prac_dir}/ruby_#{num}/q.rb")
FileUtils.touch("#{@prac_dir}/ruby_#{num}/sequential_h.rb")
if File.exist?("#{@inject}/sequential_h.rb") == true then
FileUtils.cp("#{@inject}/sequential_h.rb", "#{@prac_dir}/ruby_#{num}/sequential_h.rb")
else
FileUtils.cp("#{ENV['HOME']}/editor_learner/lib/sequential_h.rb", "#{@prac_dir}/ruby_#{num}/sequential_h.rb")
end
range_ruby.each do|n|
FileUtils.touch("#{@prac_dir}/ruby_#{num}/#{n}.rb")
end
end
end
end
desc 'delete [number~number]', 'delete the ruby_file choose number to delete file'
def delete(n, m)
range = n..m
range.each{|num|
if File.exist?("#{@prac_dir}/ruby_#{num}") == true then
system "rm -rf #{@prac_dir}/ruby_#{num}"
end
}
end
desc 'sequential_check [lesson_number] [1~3number] ','sequential check your typing skill and edit skill choose number'
def sequential_check(*argv, n, m)
l = m.to_i - 1
@seq_dir = "lib/sequential_check_question"
q_rb = "ruby_#{n}/#{m}.rb"
@seqnm_dir = File.join(@seq_dir,q_rb)
@pracnm_dir = "#{ENV['HOME']}/editor_learner/workshop/ruby_#{n}/#{m}.rb"
@seqnq_dir = "lib/sequential_check_question/ruby_#{n}/q.rb"
@pracnq_dir = "#{ENV['HOME']}/editor_learner/workshop/ruby_#{n}/q.rb"
@seqnl_dir = "lib/sequential_check_question/ruby_#{n}/#{l}.rb"
@pracnl_dir = "#{ENV['HOME']}/editor_learner/workshop/ruby_#{n}/#{l}.rb"
puts "check starting ..."
puts "type following commands on the terminal"
src_dir = File.expand_path('../..', __FILE__)
if File.exist?("#{@inject}/sequential_check_question/ruby_#{n}/#{m}.rb") == true then
FileUtils.cp("#{@inject}/sequential_check_question/ruby_#{n}/#{m}.rb", "#{@pracnq_dir}")
elsif
FileUtils.cp(File.join(src_dir, "#{@seqnm_dir}"), "#{@pracnq_dir}")
end
if l != 0 && FileUtils.compare_file("#{@pracnm_dir}", "#{@pracnq_dir}") != true
FileUtils.compare_file("#{@pracnl_dir}", (File.join(src_dir, "#{@seqnl_dir}"))) == true
FileUtils.cp("#{@pracnl_dir}", "#{@pracnm_dir}")
end
if FileUtils.compare_file(@pracnm_dir, @pracnq_dir) != true then
system "osascript -e 'tell application \"Terminal\" to do script \"cd #{@prac_dir}/ruby_#{n} \" '"
loop do
a = STDIN.gets.chomp
if a == "check" && FileUtils.compare_file("#{@pracnm_dir}", "#{@pracnq_dir}") == true then
puts "ruby_#{n}/#{m}.rb is done!"
break
elsif FileUtils.compare_file("#{@pracnm_dir}", "#{@pracnq_dir}") != true then
@inputdata = File.open("#{@pracnm_dir}").readlines
@checkdata = File.open("#{@pracnq_dir}").readlines
diffs = Diff::LCS.diff("#{@inputdata}", "#{@checkdata}")
diffs.each do |diff|
p diff
end
end
end
else
p "ruby_#{n}/#{m}.rb is finished!"
end
end
desc 'random_check', 'ramdom check your typing and edit skill.'
def random_check(*argv)
random = rand(1..15)
p random
s = "#{random}.rb"
puts "check starting ..."
puts "type following commands on the terminal"
puts "> emacs question.rb answer.rb"
src_dir = File.expand_path('../..', __FILE__) # "Users/souki/editor_learner"
if File.exist?("#{@inject}/random_check_question/#{s}") == true then
FileUtils.cp("#{@inject}/random_check_question/#{s}", "#{@prac_dir}/question.rb")
else
FileUtils.cp(File.join(src_dir, "lib/random_check_question/#{s}"), "#{@prac_dir}/question.rb")
end
open_terminal
start_time = Time.now
loop do
a = STDIN.gets.chomp
if a == "check" && FileUtils.compare_file("#{@prac_dir}/question.rb", "#{@prac_dir}/answer.rb") == true then
puts "It have been finished!"
break
elsif FileUtils.compare_file("#{@prac_dir}/question.rb", "#{@prac_dir}/answer.rb") != true then
@inputdata = File.open("#{@prac_dir}/answer.rb").readlines
@checkdata = File.open("#{@prac_dir}/question.rb").readlines
diffs = Diff::LCS.diff("#{@inputdata}", "#{@checkdata}")
diffs.each do |diff|
p diff
end
end
end
end_time = Time.now
time = end_time - start_time - 1
puts "#{time} sec"
end
no_commands do
def open_terminal
pwd = Dir.pwd
system "osascript -e 'tell application \"Terminal\" to do script \"cd #{@prac_dir} \" '"
end
end
end
end
```
# 参考文献
Andrew Hunt著,「達人プログラマー」 (オーム社出版,2016).
| github_jupyter |
# Named Entity Recognition in Mandarin on a Weibo Social Media Dataset
---
[Github](https://github.com/eugenesiow/practical-ml/blob/master/notebooks/Named_Entity_Recognition_Mandarin_Weibo.ipynb) | More Notebooks @ [eugenesiow/practical-ml](https://github.com/eugenesiow/practical-ml)
---
Notebook to train a [flair](https://github.com/flairNLP/flair) model in mandarin using stacked embeddings (with word and BERT embeddings) to perform named entity recognition (NER).
The [dataset](https://github.com/hltcoe/golden-horse) used contains 1,890 Sina Weibo messages annotated with four entity types (person, organization, location and geo-political entity), including named and nominal mentions from the paper [Peng et al. (2015)](https://www.aclweb.org/anthology/D15-1064/) and with revised annotated data from [He et al. (2016)](https://arxiv.org/abs/1611.04234).
The current state-of-the-art model on this dataset is from [Peng et al. (2016)](https://www.aclweb.org/anthology/P16-2025/) with an average F1-score of **47.0%** (Table 1) and from [Peng et al. (2015)](https://www.aclweb.org/anthology/D15-1064.pdf) with an F1-score of **44.1%** (Table 2). The authors say that the poor results on the test set show the "difficulty of this task" - which is true a sense because the dataset is really quite small for the NER task with 4 classes (x2 as they differentiate nominal and named entities) with a test set of only 270 sentences.
Our flair model is able to improve the state-of-the-art with an F1-score of **67.5%**, which is a cool 20+ absolute percentage points better than the current state-of-the-art performance.
The notebook is structured as follows:
* Setting up the GPU Environment
* Getting Data
* Training and Testing the Model
* Using the Model (Running Inference)
## Task Description
> Named entity recognition (NER) is the task of tagging entities in text with their corresponding type. Approaches typically use BIO notation, which differentiates the beginning (B) and the inside (I) of entities. O is used for non-entity tokens.
# Setting up the GPU Environment
#### Ensure we have a GPU runtime
If you're running this notebook in Google Colab, select `Runtime` > `Change Runtime Type` from the menubar. Ensure that `GPU` is selected as the `Hardware accelerator`. This will allow us to use the GPU to train the model subsequently.
#### Install Dependencies
```
pip install -q flair
```
# Getting Data
The dataset, including the train, test and dev sets, has just been included in the `0.7 release` of flair, hence, we just use the `flair.datasets` loader to load the `WEIBO_NER` dataset into the flair `Corpus`. The [raw datasets](https://github.com/87302380/WEIBO_NER) are also available on Github.
```
import flair.datasets
from flair.data import Corpus
corpus = flair.datasets.WEIBO_NER()
print(corpus)
```
We can see that the total 1,890 sentences have already been split into train (1,350), dev (270) and test (270) sets in a 5:1:1 ratio.
# Training and Testing the Model
#### Train the Model
To train the flair `SequenceTagger`, we use the `ModelTrainer` object with the corpus and the tagger to be trained. We use flair's sensible default options in the `.train()` method, while specifying the output folder for the `SequenceTagger` model to be `/content/model/`. We also set the `embeddings_storage_mode` to be `gpu` to utilise the GPU to store the embeddings for more speed. Note that if you run this with a larger dataset you might run out of GPU memory, so be sure to set this option to `cpu` - it will still use the GPU to train but the embeddings will not be stored in the CPU and there will be a transfer to the GPU each epoch.
Be prepared to allow the training to run for about 0.5 to 1 hour. We set the `max_epochs` to 50 so the the training will complete faster, for higher F1-score you can increase this number to 100 or 150.
```
import flair
from typing import List
from flair.trainers import ModelTrainer
from flair.models import SequenceTagger
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, BertEmbeddings, BytePairEmbeddings
tag_type = 'ner'
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# For an even faster training time, you can comment out the BytePairEmbeddings
# Note: there will be a small drop in performance if you do so.
embedding_types: List[TokenEmbeddings] = [
WordEmbeddings('zh-crawl'),
BytePairEmbeddings('zh'),
BertEmbeddings('bert-base-chinese'),
]
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
tagger: SequenceTagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=True)
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.train('/content/model/',
learning_rate=0.1,
mini_batch_size=32,
max_epochs=50,
embeddings_storage_mode='gpu')
```
We see that the output accuracy (F1-score) for our new model is **67.5%** (F1-score (micro) 0.6748). We use micro F1-score (rather than macro F1-score) as there are multiple entity classes in this setup with [class imbalance](https://datascience.stackexchange.com/questions/15989/micro-average-vs-macro-average-performance-in-a-multiclass-classification-settin).
> We have a new SOTA NER model in mandarin, over 20 percentage points (absolute) better than the previous SOTA for this Weibo dataset!
## Using the Model (Running Inference)
Running the model to do some predictions/inference is as simple as calling `tagger.predict(sentence)`. Do note that for mandarin each character needs to be split with spaces between each character (e.g. `一 节 课 的 时 间`) so that the tokenizer will work properly to split them to tokens (if you're processing them for input into the model when building an app). For more information on this, check out the [flair tutorial on tokenization](https://github.com/flairNLP/flair/blob/master/resources/docs/TUTORIAL_1_BASICS.md#tokenization).
```
from flair.data import Sentence
from flair.models import SequenceTagger
from flair.data import Corpus
# Load the model that we trained, you can comment this out if you already have
# the model loaded (e.g. if you just ran the training)
tagger: SequenceTagger = SequenceTagger.load("/content/model/final-model.pt")
# Load the WEIBO corpus and use the first 5 sentences from the test set
corpus = flair.datasets.WEIBO_NER()
for idx in range(0, 5):
sentence = corpus.test[idx]
tagger.predict(sentence)
print(sentence.to_tagged_string())
```
We can connect to Google Drive with the following code to save any files you want to persist. You can also click the `Files` icon on the left panel and click `Mount Drive` to mount your Google Drive.
The root of your Google Drive will be mounted to `/content/drive/My Drive/`. If you have problems mounting the drive, you can check out this [tutorial](https://towardsdatascience.com/downloading-datasets-into-google-drive-via-google-colab-bcb1b30b0166).
```
from google.colab import drive
drive.mount('/content/drive/')
```
You can move the model files from our local directory to your Google Drive.
```
import shutil
shutil.move('/content/model/', "/content/drive/My Drive/model/")
```
More Notebooks @ [eugenesiow/practical-ml](https://github.com/eugenesiow/practical-ml) and do drop us some feedback on how to improve the notebooks on the [Github repo](https://github.com/eugenesiow/practical-ml/).
| github_jupyter |
```
%matplotlib inline
```
`파이토치(PyTorch) 기본 익히기 <intro.html>`_ ||
`빠른 시작 <quickstart_tutorial.html>`_ ||
`텐서(Tensor) <tensorqs_tutorial.html>`_ ||
`Dataset과 Dataloader <data_tutorial.html>`_ ||
`변형(Transform) <transforms_tutorial.html>`_ ||
`신경망 모델 구성하기 <buildmodel_tutorial.html>`_ ||
`Autograd <autogradqs_tutorial.html>`_ ||
**최적화(Optimization)** ||
`모델 저장하고 불러오기 <saveloadrun_tutorial.html>`_
모델 매개변수 최적화하기
==========================================================================
이제 모델과 데이터가 준비되었으니, 데이터에 매개변수를 최적화하여 모델을 학습하고, 검증하고, 테스트할 차례입니다.
모델을 학습하는 과정은 반복적인 과정을 거칩니다; (*에폭(epoch)*\ 이라고 부르는) 각 반복 단계에서 모델은 출력을 추측하고,
추측과 정답 사이의 오류(\ *손실(loss)*\ )를 계산하고, (`이전 장 <autograd_tutorial.html>`_\ 에서 본 것처럼)
매개변수에 대한 오류의 도함수(derivative)를 수집한 뒤, 경사하강법을 사용하여 이 파라매터들을 **최적화(optimize)**\ 합니다.
이 과정에 대한 자세한 설명은 `3Blue1Brown의 역전파 <https://www.youtube.com/watch?v=tIeHLnjs5U8>`__ 영상을 참고하세요.
기본(Pre-requisite) 코드
------------------------------------------------------------------------------------------
이전 장인 `Dataset과 DataLoader <data_tutorial.html>`_\ 와 `신경망 모델 구성하기 <buildmodel_tutorial.html>`_\ 에서
코드를 기져왔습니다.
```
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
train_dataloader = DataLoader(training_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork()
```
하이퍼파라매터(Hyperparameter)
------------------------------------------------------------------------------------------
하이퍼파라매터(Hyperparameter)는 모델 최적화 과정을 제어할 수 있는 조절 가능한 매개변수입니다.
서로 다른 하이퍼파라매터 값은 모델 학습과 수렴율(convergence rate)에 영향을 미칠 수 있습니다.
(하이퍼파라매터 튜닝(tuning)에 대해 `더 알아보기 <https://tutorials.pytorch.kr/beginner/hyperparameter_tuning_tutorial.html>`__)
학습 시에는 다음과 같은 하이퍼파라매터를 정의합니다:
- **에폭(epoch) 수** - 데이터셋을 반복하는 횟수
- **배치 크기(batch size)** - 매개변수가 갱신되기 전 신경망을 통해 전파된 데이터 샘플의 수
- **학습률(learning rate)** - 각 배치/에폭에서 모델의 매개변수를 조절하는 비율. 값이 작을수록 학습 속도가 느려지고, 값이 크면 학습 중 예측할 수 없는 동작이 발생할 수 있습니다.
```
learning_rate = 1e-3
batch_size = 64
epochs = 5
```
최적화 단계(Optimization Loop)
------------------------------------------------------------------------------------------
하이퍼파라매터를 설정한 뒤에는 최적화 단계를 통해 모델을 학습하고 최적화할 수 있습니다.
최적화 단계의 각 반복(iteration)을 **에폭**\ 이라고 부릅니다.
하나의 에폭은 다음 두 부분으로 구성됩니다:
- **학습 단계(train loop)** - 학습용 데이터셋을 반복(iterate)하고 최적의 매개변수로 수렴합니다.
- **검증/테스트 단계(validation/test loop)** - 모델 성능이 개선되고 있는지를 확인하기 위해 테스트 데이터셋을 반복(iterate)합니다.
학습 단계(training loop)에서 일어나는 몇 가지 개념들을 간략히 살펴보겠습니다. 최적화 단계(optimization loop)를 보려면
`full-impl-label` 부분으로 건너뛰시면 됩니다.
손실 함수(loss function)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
학습용 데이터를 제공하면, 학습되지 않은 신경망은 정답을 제공하지 않을 확률이 높습니다. **손실 함수(loss function)**\ 는
획득한 결과와 실제 값 사이의 틀린 정도(degree of dissimilarity)를 측정하며, 학습 중에 이 값을 최소화하려고 합니다.
주어진 데이터 샘플을 입력으로 계산한 예측과 정답(label)을 비교하여 손실(loss)을 계산합니다.
일반적인 손실함수에는 회귀 문제(regression task)에 사용하는 `nn.MSELoss <https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html#torch.nn.MSELoss>`_\ (평균 제곱 오차(MSE; Mean Square Error))나
분류(classification)에 사용하는 `nn.NLLLoss <https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html#torch.nn.NLLLoss>`_ (음의 로그 우도(Negative Log Likelihood)),
그리고 ``nn.LogSoftmax``\ 와 ``nn.NLLLoss``\ 를 합친 `nn.CrossEntropyLoss <https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss>`_
등이 있습니다.
모델의 출력 로짓(logit)을 ``nn.CrossEntropyLoss``\ 에 전달하여 로짓(logit)을 정규화하고 예측 오류를 계산합니다.
```
# 손실 함수를 초기화합니다.
loss_fn = nn.CrossEntropyLoss()
```
옵티마이저(Optimizer)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
최적화는 각 학습 단계에서 모델의 오류를 줄이기 위해 모델 매개변수를 조정하는 과정입니다. **최적화 알고리즘**\ 은 이 과정이 수행되는 방식(여기에서는 확률적 경사하강법(SGD; Stochastic Gradient Descent))을 정의합니다.
모든 최적화 절차(logic)는 ``optimizer`` 객체에 캡슐화(encapsulate)됩니다. 여기서는 SGD 옵티마이저를 사용하고 있으며, PyTorch에는 ADAM이나 RMSProp과 같은 다른 종류의 모델과 데이터에서 더 잘 동작하는
`다양한 옵티마이저 <https://pytorch.org/docs/stable/optim.html>`_\ 가 있습니다.
학습하려는 모델의 매개변수와 학습률(learning rate) 하이퍼파라매터를 등록하여 옵티마이저를 초기화합니다.
```
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
```
학습 단계(loop)에서 최적화는 세단계로 이뤄집니다:
* ``optimizer.zero_grad()``\ 를 호출하여 모델 매개변수의 변화도를 재설정합니다. 기본적으로 변화도는 더해지기(add up) 때문에 중복 계산을 막기 위해 반복할 때마다 명시적으로 0으로 설정합니다.
* ``loss.backward()``\ 를 호출하여 예측 손실(prediction loss)을 역전파합니다. PyTorch는 각 매개변수에 대한 손실의 변화도를 저장합니다.
* 변화도를 계산한 뒤에는 ``optimizer.step()``\ 을 호출하여 역전파 단계에서 수집된 변화도로 매개변수를 조정합니다.
전체 구현
------------------------------------------------------------------------------------------
최적화 코드를 반복하여 수행하는 ``train_loop``\ 와 테스트 데이터로 모델의 성능을 측정하는 ``test_loop``\ 를 정의하였습니다.
```
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# 예측(prediction)과 손실(loss) 계산
pred = model(X)
loss = loss_fn(pred, y)
# 역전파
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
```
손실 함수와 옵티마이저를 초기화하고 ``train_loop``\ 와 ``test_loop``\ 에 전달합니다.
모델의 성능 향상을 알아보기 위해 자유롭게 에폭(epoch) 수를 증가시켜 볼 수 있습니다.
```
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
```
더 읽어보기
------------------------------------------------------------------------------------------
- `Loss Functions <https://pytorch.org/docs/stable/nn.html#loss-functions>`_
- `torch.optim <https://pytorch.org/docs/stable/optim.html>`_
- `Warmstart Training a Model <https://tutorials.pytorch.kr/recipes/recipes/warmstarting_model_using_parameters_from_a_different_model.html>`_
| github_jupyter |
```
%matplotlib inline
```
Learning Hybrid Frontend Syntax Through Example
===============================================
**Author:** `Nathan Inkawhich <https://github.com/inkawhich>`_
This document is meant to highlight the syntax of the Hybrid Frontend
through a non-code intensive example. The Hybrid Frontend is one of the
new shiny features of Pytorch 1.0 and provides an avenue for developers
to transition their models from **eager** to **graph** mode. PyTorch
users are very familiar with eager mode as it provides the ease-of-use
and flexibility that we all enjoy as researchers. Caffe2 users are more
aquainted with graph mode which has the benefits of speed, optimization
opportunities, and functionality in C++ runtime environments. The hybrid
frontend bridges the gap between the the two modes by allowing
researchers to develop and refine their models in eager mode (i.e.
PyTorch), then gradually transition the proven model to graph mode for
production, when speed and resouce consumption become critical.
Hybrid Frontend Information
---------------------------
The process for transitioning a model to graph mode is as follows.
First, the developer constructs, trains, and tests the model in eager
mode. Then they incrementally **trace** and **script** each
function/module of the model with the Just-In-Time (JIT) compiler, at
each step verifying that the output is correct. Finally, when each of
the components of the top-level model have been traced and scripted, the
model itself is traced. At which point the model has been transitioned
to graph mode, and has a complete python-free representation. With this
representation, the model runtime can take advantage of high-performance
Caffe2 operators and graph based optimizations.
Before we continue, it is important to understand the idea of tracing
and scripting, and why they are separate. The goal of **trace** and
**script** is the same, and that is to create a graph representation of
the operations taking place in a given function. The discrepency comes
from the flexibility of eager mode that allows for **data-dependent
control flows** within the model architecture. When a function does NOT
have a data-dependent control flow, it may be *traced* with
``torch.jit.trace``. However, when the function *has* a data-dependent
control flow it must be *scripted* with ``torch.jit.script``. We will
leave the details of the interworkings of the hybrid frontend for
another document, but the code example below will show the syntax of how
to trace and script different pure python functions and torch Modules.
Hopefully, you will find that using the hybrid frontend is non-intrusive
as it mostly involves adding decorators to the existing function and
class definitions.
Motivating Example
------------------
In this example we will implement a strange math function that may be
logically broken up into four parts that do, and do not contain
data-dependent control flows. The purpose here is to show a non-code
intensive example where the use of the JIT is highlighted. This example
is a stand-in representation of a useful model, whose implementation has
been divided into various pure python functions and modules.
The function we seek to implement, $Y(x)$, is defined for
$x \epsilon \mathbb{N}$ as
\begin{align}z(x) = \Biggl \lfloor \frac{\sqrt{\prod_{i=1}^{|2 x|}i}}{5} \Biggr \rfloor\end{align}
\begin{align}Y(x) = \begin{cases}
\frac{z(x)}{2} & \text{if } z(x)\%2 == 0, \\
z(x) & \text{otherwise}
\end{cases}\end{align}
\begin{align}\begin{array}{| r | r |} \hline
x &1 &2 &3 &4 &5 &6 &7 \\ \hline
Y(x) &0 &0 &-5 &20 &190 &-4377 &-59051 \\ \hline
\end{array}\end{align}
As mentioned, the computation is split into four parts. Part one is the
simple tensor calculation of $|2x|$, which can be traced. Part two
is the iterative product calculation that represents a data dependent
control flow to be scripted (the number of loop iteration depends on the
input at runtime). Part three is a trace-able
$\lfloor \sqrt{a/5} \rfloor$ calculation. Finally, part 4 handles
the output cases depending on the value of $z(x)$ and must be
scripted due to the data dependency. Now, let's see how this looks in
code.
Part 1 - Tracing a pure python function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We can implement part one as a pure python function as below. Notice, to
trace this function we call ``torch.jit.trace`` and pass in the function
to be traced. Since the trace requires a dummy input of the expected
runtime type and shape, we also include the ``torch.rand`` to generate a
single valued torch tensor.
```
import torch
def fn(x):
return torch.abs(2*x)
# This is how you define a traced function
# Pass in both the function to be traced and an example input to ``torch.jit.trace``
traced_fn = torch.jit.trace(fn, torch.rand(()))
```
Part 2 - Scripting a pure python function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We can also implement part 2 as a pure python function where we
iteratively compute the product. Since the number of iterations depends
on the value of the input, we have a data dependent control flow, so the
function must be scripted. We can script python functions simply with
the ``@torch.jit.script`` decorator.
```
# This is how you define a script function
# Apply this decorator directly to the function
@torch.jit.script
def script_fn(x):
z = torch.ones([1], dtype=torch.int64)
for i in range(int(x)):
z = z * (i + 1)
return z
```
Part 3 - Tracing a nn.Module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Next, we will implement part 3 of the computation within the forward
function of a ``torch.nn.Module``. This module may be traced, but rather
than adding a decorator here, we will handle the tracing where the
Module is constructed. Thus, the class definition is not changed at all.
```
# This is a normal module that can be traced.
class TracedModule(torch.nn.Module):
def forward(self, x):
x = x.type(torch.float32)
return torch.floor(torch.sqrt(x) / 5.)
```
Part 4 - Scripting a nn.Module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the final part of the computation we have a ``torch.nn.Module`` that
must be scripted. To accomodate this, we inherit from
``torch.jit.ScriptModule`` and add the ``@torch.jit.script_method``
decorator to the forward function.
```
# This is how you define a scripted module.
# The module should inherit from ScriptModule and the forward should have the
# script_method decorator applied to it.
class ScriptModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
r = -x
if int(torch.fmod(x, 2.0)) == 0.0:
r = x / 2.0
return r
```
Top-Level Module
~~~~~~~~~~~~~~~~
Now we will put together the pieces of the computation via a top level
module called ``Net``. In the constructor, we will instantiate the
``TracedModule`` and ``ScriptModule`` as attributes. This must be done
because we ultimately want to trace/script the top level module, and
having the traced/scripted modules as attributes allows the Net to
inherit the required submodules' parameters. Notice, this is where we
actually trace the ``TracedModule`` by calling ``torch.jit.trace()`` and
providing the necessary dummy input. Also notice that the
``ScriptModule`` is constructed as normal because we handled the
scripting in the class definition.
Here we can also print the graphs created for each individual part of
the computation. The printed graphs allows us to see how the JIT
ultimately interpreted the functions as graph computations.
Finally, we define the ``forward`` function for the Net module where we
run the input data ``x`` through the four parts of the computation.
There is no strange syntax here and we call the traced and scripted
modules and functions as expected.
```
# This is a demonstration net that calls all of the different types of
# methods and functions
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# Modules must be attributes on the Module because if you want to trace
# or script this Module, we must be able to inherit the submodules'
# params.
self.traced_module = torch.jit.trace(TracedModule(), torch.rand(()))
self.script_module = ScriptModule()
print('traced_fn graph', traced_fn.graph)
print('script_fn graph', script_fn.graph)
print('TracedModule graph', self.traced_module.__getattr__('forward').graph)
print('ScriptModule graph', self.script_module.__getattr__('forward').graph)
def forward(self, x):
# Call a traced function
x = traced_fn(x)
# Call a script function
x = script_fn(x)
# Call a traced submodule
x = self.traced_module(x)
# Call a scripted submodule
x = self.script_module(x)
return x
```
Running the Model
~~~~~~~~~~~~~~~~~
All that's left to do is construct the Net and compute the output
through the forward function. Here, we use $x=5$ as the test input
value and expect $Y(x)=190.$ Also, check out the graphs that were
printed during the construction of the Net.
```
# Instantiate this net and run it
n = Net()
print(n(torch.tensor([5]))) # 190.
```
Tracing the Top-Level Model
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The last part of the example is to trace the top-level module, ``Net``.
As mentioned previously, since the traced/scripted modules are
attributes of Net, we are able to trace ``Net`` as it inherits the
parameters of the traced/scripted submodules. Note, the syntax for
tracing Net is identical to the syntax for tracing ``TracedModule``.
Also, check out the graph that is created.
```
n_traced = torch.jit.trace(n, torch.tensor([5]))
print(n_traced(torch.tensor([5])))
print('n_traced graph', n_traced.graph)
```
Hopefully, this document can serve as an introduction to the hybrid
frontend as well as a syntax reference guide for more experienced users.
Also, there are a few things to keep in mind when using the hybrid
frontend. There is a constraint that traced/scripted methods must be
written in a restricted subset of python, as features like generators,
defs, and Python data structures are not supported. As a workaround, the
scripting model *is* designed to work with both traced and non-traced
code which means you can call non-traced code from traced functions.
However, such a model may not be exported to ONNX.
| github_jupyter |
The **beta-binomial** can be written as
$$
y_i \sim Bin(\theta_i, n_i)
$$
$$
\theta_i \sim Beta(\alpha, \beta)
$$
The **posterior distribution** is approximately equivalent to
$$
p(\theta, \alpha, \beta|y) \propto p(\alpha, \beta) \times p(\theta | \alpha, \beta) \times p( y| \theta, \alpha, \beta)
$$
The **beta distribution** has the form
$$
p(\theta) = \frac{\Gamma(\alpha + \beta}{\Gamma(\alpha)\times \Gamma(\beta)}\theta^{\alpha -1}(1 - \theta)^{\beta - 1}
$$
The second half of the joint posterior distribution, $ p(y|\theta, \alpha, \beta)$, is a binomial distribution. We can ignore the **binomial coefficient** here since we are only approximating the posterior distribution, and it is a constant value.
In general, for a hierarchical Bayesian model with observed values $y$, distribution parameters $\theta$, and hyperparameters $\phi$, the posterior distribution of $\phi$ is
$$
p(\phi | y) = \int p(\theta, \phi|y) d\theta
$$
This is equivalent to
$$
p(\phi | y) = \frac{p(\theta, \phi | y)}{p(\theta| \phi, y)}
$$
We can compute the log-likelihood of N total observed experiments, each with $n$ trials and $y$ successes, and parameters $\alpha$ and $\beta$:
$$
p(\alpha, \beta|y) \propto p(\alpha, \beta) \prod_{i}^{N}
\frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \times \Gamma(\beta)}
\frac{\Gamma(\alpha + y_i)\Gamma(\beta + n_i - y_i)}{\Gamma(\alpha + \beta + n_j)}
$$
This can be converted into a log-likelihood so that we sum the individual experiment likelihoods, instead of multiplying:
$$
p(\alpha, \beta|y) \propto p(\alpha, \beta) \times \sum_{i}^{N}{log\Gamma(\alpha + \beta) - log\Gamma(\alpha) - log\Gamma(\beta) + log\Gamma(\alpha + y_i) + log\Gamma(\beta + n_i - y_i) - log\Gamma(\alpha + \beta + n_i)}
$$
```
from typing import List
from scipy.special import gammaln
def log_likelihood(α: float, β: float, y:List[int], n: List[int])-> float:
LL = 0
for Y, N in zip(y, n):
LL += (gammaln(α + β) - gammaln(α) - gammaln(β) + gammaln(α + Y) +
gammaln(β + N - Y) - gammaln(α + β + N))
return LL
y = [1,5,10]
n = [10, 51, 120]
α = 1
β = 1
# we intuitively expect that θ should be somewhere around ~ 0.10, so our
# likelihood should be relatively low
log_likelihood(α, β, y, n)
α = 2
β = 10
# should be a much better model
log_likelihood(α, β, y, n)
import numpy as np
X, Z = np.meshgrid(np.arange(1,20), np.arange(1,20))
param_space = np.c_[X.ravel(), Z.ravel()]
surface = np.zeros(X.shape)
best_parameters = {
"α": 1,
"β": 1,
"LL": -9e12
}
for parameters in param_space:
α = parameters[0]
β = parameters[1]
LL = log_likelihood(α, β, y, n)
if LL > best_parameters["LL"]:
best_parameters["α"] = α
best_parameters["β"] = β
best_parameters["LL"] = LL
surface[α - 1, β - 1] = LL
print(best_parameters)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize = (5,5))
CS = ax.contourf(X,Z, surface,cmap=plt.cm.bone)
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('Log Likelihood')
# Add the contour line levels to the colorbar
plt.xlabel("β")
plt.ylabel("α")
plt.show()
```
| github_jupyter |
# Assignment 2: Deep N-grams
Welcome to the second assignment of course 3. In this assignment you will explore Recurrent Neural Networks `RNN`.
- You will be using the fundamentals of google's [trax](https://github.com/google/trax) package to implement any kind of deeplearning model.
By completing this assignment, you will learn how to implement models from scratch:
- How to convert a line of text into a tensor
- Create an iterator to feed data to the model
- Define a GRU model using `trax`
- Train the model using `trax`
- Compute the accuracy of your model using the perplexity
- Predict using your own model
## Outline
- [Overview](#0)
- [Part 1: Importing the Data](#1)
- [1.1 Loading in the data](#1.1)
- [1.2 Convert a line to tensor](#1.2)
- [Exercise 01](#ex01)
- [1.3 Batch generator](#1.3)
- [Exercise 02](#ex02)
- [1.4 Repeating Batch generator](#1.4)
- [Part 2: Defining the GRU model](#2)
- [Exercise 03](#ex03)
- [Part 3: Training](#3)
- [3.1 Training the Model](#3.1)
- [Exercise 04](#ex04)
- [Part 4: Evaluation](#4)
- [4.1 Evaluating using the deep nets](#4.1)
- [Exercise 05](#ex05)
- [Part 5: Generating the language with your own model](#5)
- [Summary](#6)
<a name='0'></a>
### Overview
Your task will be to predict the next set of characters using the previous characters.
- Although this task sounds simple, it is pretty useful.
- You will start by converting a line of text into a tensor
- Then you will create a generator to feed data into the model
- You will train a neural network in order to predict the new set of characters of defined length.
- You will use embeddings for each character and feed them as inputs to your model.
- Many natural language tasks rely on using embeddings for predictions.
- Your model will convert each character to its embedding, run the embeddings through a Gated Recurrent Unit `GRU`, and run it through a linear layer to predict the next set of characters.
<img src = "model.png" style="width:600px;height:150px;"/>
The figure above gives you a summary of what you are about to implement.
- You will get the embeddings;
- Stack the embeddings on top of each other;
- Run them through two layers with a relu activation in the middle;
- Finally, you will compute the softmax.
To predict the next character:
- Use the softmax output and identify the word with the highest probability.
- The word with the highest probability is the prediction for the next word.
```
import os
import trax
import trax.fastmath.numpy as np
import pickle
import numpy
import random as rnd
from trax import fastmath
from trax import layers as tl
# set random seed
trax.supervised.trainer_lib.init_random_number_generators(32)
rnd.seed(32)
```
<a name='1'></a>
# Part 1: Importing the Data
<a name='1.1'></a>
### 1.1 Loading in the data
<img src = "shakespeare.png" style="width:250px;height:250px;"/>
Now import the dataset and do some processing.
- The dataset has one sentence per line.
- You will be doing character generation, so you have to process each sentence by converting each **character** (and not word) to a number.
- You will use the `ord` function to convert a unique character to a unique integer ID.
- Store each line in a list.
- Create a data generator that takes in the `batch_size` and the `max_length`.
- The `max_length` corresponds to the maximum length of the sentence.
```
dirname = 'data/'
lines = [] # storing all the lines in a variable.
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as files:
for line in files:
# remove leading and trailing whitespace
pure_line = line.strip()
# if pure_line is not the empty string,
if pure_line:
# append it to the list
lines.append(pure_line)
n_lines = len(lines)
print(f"Number of lines: {n_lines}")
print(f"Sample line at position 0 {lines[0]}")
print(f"Sample line at position 999 {lines[999]}")
```
Notice that the letters are both uppercase and lowercase. In order to reduce the complexity of the task, we will convert all characters to lowercase. This way, the model only needs to predict the likelihood that a letter is 'a' and not decide between uppercase 'A' and lowercase 'a'.
```
# go through each line
for i, line in enumerate(lines):
# convert to all lowercase
lines[i] = line.lower()
print(f"Number of lines: {n_lines}")
print(f"Sample line at position 0 {lines[0]}")
print(f"Sample line at position 999 {lines[999]}")
eval_lines = lines[-1000:] # Create a holdout validation set
lines = lines[:-1000] # Leave the rest for training
print(f"Number of lines for training: {len(lines)}")
print(f"Number of lines for validation: {len(eval_lines)}")
```
<a name='1.2'></a>
### 1.2 Convert a line to tensor
Now that you have your list of lines, you will convert each character in that list to a number. You can use Python's `ord` function to do it.
Given a string representing of one Unicode character, the `ord` function return an integer representing the Unicode code point of that character.
```
# View the unique unicode integer associated with each character
print(f"ord('a'): {ord('a')}")
print(f"ord('b'): {ord('b')}")
print(f"ord('c'): {ord('c')}")
print(f"ord(' '): {ord(' ')}")
print(f"ord('x'): {ord('x')}")
print(f"ord('y'): {ord('y')}")
print(f"ord('z'): {ord('z')}")
print(f"ord('1'): {ord('1')}")
print(f"ord('2'): {ord('2')}")
print(f"ord('3'): {ord('3')}")
```
<a name='ex01'></a>
### Exercise 01
**Instructions:** Write a function that takes in a single line and transforms each character into its unicode integer. This returns a list of integers, which we'll refer to as a tensor.
- Use a special integer to represent the end of the sentence (the end of the line).
- This will be the EOS_int (end of sentence integer) parameter of the function.
- Include the EOS_int as the last integer of the
- For this exercise, you will use the number `1` to represent the end of a sentence.
```
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: line_to_tensor
def line_to_tensor(line, EOS_int=1):
"""Turns a line of text into a tensor
Args:
line (str): A single line of text.
EOS_int (int, optional): End-of-sentence integer. Defaults to 1.
Returns:
list: a list of integers (unicode values) for the characters in the `line`.
"""
# Initialize the tensor as an empty list
tensor = []
### START CODE HERE (Replace instances of 'None' with your code) ###
# for each character:
for c in line:
# convert to unicode int
c_int = ord(c)
# append the unicode integer to the tensor list
tensor.append(c_int)
# include the end-of-sentence integer
tensor.append(EOS_int)
### END CODE HERE ###
return tensor
# Testing your output
line_to_tensor('abc xyz')
```
##### Expected Output
```CPP
[97, 98, 99, 32, 120, 121, 122, 1]
```
<a name='1.3'></a>
### 1.3 Batch generator
Most of the time in Natural Language Processing, and AI in general we use batches when training our data sets. Here, you will build a data generator that takes in a text and returns a batch of text lines (lines are sentences).
- The generator converts text lines (sentences) into numpy arrays of integers padded by zeros so that all arrays have the same length, which is the length of the longest sentence in the entire data set.
Once you create the generator, you can iterate on it like this:
```
next(data_generator)
```
This generator returns the data in a format that you could directly use in your model when computing the feed-forward of your algorithm. This iterator returns a batch of lines and per token mask. The batch is a tuple of three parts: inputs, targets, mask. The inputs and targets are identical. The second column will be used to evaluate your predictions. Mask is 1 for non-padding tokens.
<a name='ex02'></a>
### Exercise 02
**Instructions:** Implement the data generator below. Here are some things you will need.
- While True loop: this will yield one batch at a time.
- if index >= num_lines, set index to 0.
- The generator should return shuffled batches of data. To achieve this without modifying the actual lines a list containing the indexes of `data_lines` is created. This list can be shuffled and used to get random batches everytime the index is reset.
- if len(line) < max_length append line to cur_batch.
- Note that a line that has length equal to max_length should not be appended to the batch.
- This is because when converting the characters into a tensor of integers, an additional end of sentence token id will be added.
- So if max_length is 5, and a line has 4 characters, the tensor representing those 4 characters plus the end of sentence character will be of length 5, which is the max length.
- if len(cur_batch) == batch_size, go over every line, convert it to an int and store it.
**Remember that when calling np you are really calling trax.fastmath.numpy which is trax’s version of numpy that is compatible with JAX. As a result of this, where you used to encounter the type numpy.ndarray now you will find the type jax.interpreters.xla.DeviceArray.**
<details>
<summary>
<font size="3" color="darkgreen"><b>Hints</b></font>
</summary>
<p>
<ul>
<li>Use the line_to_tensor function above inside a list comprehension in order to pad lines with zeros.</li>
<li>Keep in mind that the length of the tensor is always 1 + the length of the original line of characters. Keep this in mind when setting the padding of zeros.</li>
</ul>
</p>
```
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: data_generator
def data_generator(batch_size, max_length, data_lines, line_to_tensor=line_to_tensor, shuffle=True):
"""Generator function that yields batches of data
Args:
batch_size (int): number of examples (in this case, sentences) per batch.
max_length (int): maximum length of the output tensor.
NOTE: max_length includes the end-of-sentence character that will be added
to the tensor.
Keep in mind that the length of the tensor is always 1 + the length
of the original line of characters.
data_lines (list): list of the sentences to group into batches.
line_to_tensor (function, optional): function that converts line to tensor. Defaults to line_to_tensor.
shuffle (bool, optional): True if the generator should generate random batches of data. Defaults to True.
Yields:
tuple: two copies of the batch (jax.interpreters.xla.DeviceArray) and mask (jax.interpreters.xla.DeviceArray).
NOTE: jax.interpreters.xla.DeviceArray is trax's version of numpy.ndarray
"""
# initialize the index that points to the current position in the lines index array
index = 0
# initialize the list that will contain the current batch
cur_batch = []
# count the number of lines in data_lines
num_lines = len(data_lines)
# create an array with the indexes of data_lines that can be shuffled
lines_index = [*range(num_lines)]
# shuffle line indexes if shuffle is set to True
if shuffle:
rnd.shuffle(lines_index)
### START CODE HERE (Replace instances of 'None' with your code) ###
while True:
# if the index is greater or equal than to the number of lines in data_lines
if index >= num_lines:
# then reset the index to 0
index = 0
# shuffle line indexes if shuffle is set to True
if shuffle:
rnd.shuffle(lines_index)
# get a line at the `lines_index[index]` position in data_lines
line = data_lines[lines_index[index]]
# if the length of the line is less than max_length
if len(line) < max_length:
# append the line to the current batch
cur_batch.append(line)
# increment the index by one
index += 1
# if the current batch is now equal to the desired batch size
if len(cur_batch) == batch_size:
batch = []
mask = []
# go through each line (li) in cur_batch
for li in cur_batch:
# convert the line (li) to a tensor of integers
tensor = line_to_tensor(li)
# Create a list of zeros to represent the padding
# so that the tensor plus padding will have length `max_length`
pad = [0] * (max_length - len(tensor))
# combine the tensor plus pad
tensor_pad = tensor + pad
# append the padded tensor to the batch
batch.append(tensor_pad)
# A mask for tensor_pad is 1 wherever tensor_pad is not
# 0 and 0 wherever tensor_pad is 0, i.e. if tensor_pad is
# [1, 2, 3, 0, 0, 0] then example_mask should be
# [1, 1, 1, 0, 0, 0]
# Hint: Use a list comprehension for this
example_mask = [0 if t == 0 else 1 for t in tensor_pad]
mask.append(example_mask)
# convert the batch (data type list) to a trax's numpy array
batch_np_arr = np.array(batch)
mask_np_arr = np.array(mask)
### END CODE HERE ##
# Yield two copies of the batch and mask.
yield batch_np_arr, batch_np_arr, mask_np_arr
# reset the current batch to an empty list
cur_batch = []
# Try out your data generator
tmp_lines = ['12345678901', #length 11
'123456789', # length 9
'234567890', # length 9
'345678901'] # length 9
# Get a batch size of 2, max length 10
tmp_data_gen = data_generator(batch_size=2,
max_length=10,
data_lines=tmp_lines,
shuffle=False)
# get one batch
tmp_batch = next(tmp_data_gen)
# view the batch
tmp_batch
```
##### Expected output
```CPP
(DeviceArray([[49, 50, 51, 52, 53, 54, 55, 56, 57, 1],
[50, 51, 52, 53, 54, 55, 56, 57, 48, 1]], dtype=int32),
DeviceArray([[49, 50, 51, 52, 53, 54, 55, 56, 57, 1],
[50, 51, 52, 53, 54, 55, 56, 57, 48, 1]], dtype=int32),
DeviceArray([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=int32))
```
Now that you have your generator, you can just call them and they will return tensors which correspond to your lines in Shakespeare. The first column and the second column are identical. Now you can go ahead and start building your neural network.
<a name='1.4'></a>
### 1.4 Repeating Batch generator
The way the iterator is currently defined, it will keep providing batches forever.
Although it is not needed, we want to show you the `itertools.cycle` function which is really useful when the generator eventually stops
Notice that it is expected to use this function within the training function further below
Usually we want to cycle over the dataset multiple times during training (i.e. train for multiple *epochs*).
For small datasets we can use [`itertools.cycle`](https://docs.python.org/3.8/library/itertools.html#itertools.cycle) to achieve this easily.
```
import itertools
infinite_data_generator = itertools.cycle(
data_generator(batch_size=2, max_length=10, data_lines=tmp_lines))
```
You can see that we can get more than the 5 lines in tmp_lines using this.
```
ten_lines = [next(infinite_data_generator) for _ in range(10)]
print(len(ten_lines))
```
<a name='2'></a>
# Part 2: Defining the GRU model
Now that you have the input and output tensors, you will go ahead and initialize your model. You will be implementing the `GRULM`, gated recurrent unit model. To implement this model, you will be using google's `trax` package. Instead of making you implement the `GRU` from scratch, we will give you the necessary methods from a build in package. You can use the following packages when constructing the model:
- `tl.Serial`: Combinator that applies layers serially (by function composition). [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Serial) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/combinators.py#L26)
- You can pass in the layers as arguments to `Serial`, separated by commas.
- For example: `tl.Serial(tl.Embeddings(...), tl.Mean(...), tl.Dense(...), tl.LogSoftmax(...))`
___
- `tl.ShiftRight`: Allows the model to go right in the feed forward. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.attention.ShiftRight) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/attention.py#L297)
- `ShiftRight(n_shifts=1, mode='train')` layer to shift the tensor to the right n_shift times
- Here in the exercise you only need to specify the mode and not worry about n_shifts
___
- `tl.Embedding`: Initializes the embedding. In this case it is the size of the vocabulary by the dimension of the model. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Embedding) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.py#L113)
- `tl.Embedding(vocab_size, d_feature)`.
- `vocab_size` is the number of unique words in the given vocabulary.
- `d_feature` is the number of elements in the word embedding (some choices for a word embedding size range from 150 to 300, for example).
___
- `tl.GRU`: `Trax` GRU layer. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.rnn.GRU) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/rnn.py#L143)
- `GRU(n_units)` Builds a traditional GRU of n_cells with dense internal transformations.
- `GRU` paper: https://arxiv.org/abs/1412.3555
___
- `tl.Dense`: A dense layer. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.py#L28)
- `tl.Dense(n_units)`: The parameter `n_units` is the number of units chosen for this dense layer.
___
- `tl.LogSoftmax`: Log of the output probabilities. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.LogSoftmax) / [source code](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.py#L242)
- Here, you don't need to set any parameters for `LogSoftMax()`.
___
<a name='ex03'></a>
### Exercise 03
**Instructions:** Implement the `GRULM` class below. You should be using all the methods explained above.
```
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: GRULM
def GRULM(vocab_size=256, d_model=512, n_layers=2, mode='train'):
"""Returns a GRU language model.
Args:
vocab_size (int, optional): Size of the vocabulary. Defaults to 256.
d_model (int, optional): Depth of embedding (n_units in the GRU cell). Defaults to 512.
n_layers (int, optional): Number of GRU layers. Defaults to 2.
mode (str, optional): 'train', 'eval' or 'predict', predict mode is for fast inference. Defaults to "train".
Returns:
trax.layers.combinators.Serial: A GRU language model as a layer that maps from a tensor of tokens to activations over a vocab set.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
model = tl.Serial(
tl.ShiftRight(mode=mode), # Stack the ShiftRight layer
tl.Embedding(vocab_size=vocab_size, d_feature=d_model), # Stack the embedding layer
[tl.GRU(n_units=d_model) for _ in range(n_layers)], # Stack GRU layers of d_model units keeping n_layer parameter in mind (use list comprehension syntax)
tl.Dense(n_units=vocab_size), # Dense layer
tl.LogSoftmax() # Log Softmax
)
### END CODE HERE ###
return model
# testing your model
model = GRULM()
print(model)
```
##### Expected output
```CPP
Serial[
ShiftRight(1)
Embedding_256_512
GRU_512
GRU_512
Dense_256
LogSoftmax
]
```
<a name='3'></a>
# Part 3: Training
Now you are going to train your model. As usual, you have to define the cost function, the optimizer, and decide whether you will be training it on a `gpu` or `cpu`. You also have to feed in a built model. Before, going into the training, we re-introduce the `TrainTask` and `EvalTask` abstractions from the last week's assignment.
To train a model on a task, Trax defines an abstraction `trax.supervised.training.TrainTask` which packages the train data, loss and optimizer (among other things) together into an object.
Similarly to evaluate a model, Trax defines an abstraction `trax.supervised.training.EvalTask` which packages the eval data and metrics (among other things) into another object.
The final piece tying things together is the `trax.supervised.training.Loop` abstraction that is a very simple and flexible way to put everything together and train the model, all the while evaluating it and saving checkpoints.
Using `training.Loop` will save you a lot of code compared to always writing the training loop by hand, like you did in courses 1 and 2. More importantly, you are less likely to have a bug in that code that would ruin your training.
```
batch_size = 32
max_length = 64
```
An `epoch` is traditionally defined as one pass through the dataset.
Since the dataset was divided in `batches` you need several `steps` (gradient evaluations) in order to complete an `epoch`. So, one `epoch` corresponds to the number of examples in a `batch` times the number of `steps`. In short, in each `epoch` you go over all the dataset.
The `max_length` variable defines the maximum length of lines to be used in training our data, lines longer that that length are discarded.
Below is a function and results that indicate how many lines conform to our criteria of maximum length of a sentence in the entire dataset and how many `steps` are required in order to cover the entire dataset which in turn corresponds to an `epoch`.
```
def n_used_lines(lines, max_length):
'''
Args:
lines: all lines of text an array of lines
max_length - max_length of a line in order to be considered an int
output_dir - folder to save your file an int
Return:
number of efective examples
'''
n_lines = 0
for l in lines:
if len(l) <= max_length:
n_lines += 1
return n_lines
num_used_lines = n_used_lines(lines, 32)
print('Number of used lines from the dataset:', num_used_lines)
print('Batch size (a power of 2):', int(batch_size))
steps_per_epoch = int(num_used_lines/batch_size)
print('Number of steps to cover one epoch:', steps_per_epoch)
```
**Expected output:**
Number of used lines from the dataset: 25881
Batch size (a power of 2): 32
Number of steps to cover one epoch: 808
<a name='3.1'></a>
### 3.1 Training the model
You will now write a function that takes in your model and trains it. To train your model you have to decide how many times you want to iterate over the entire data set.
<a name='ex04'></a>
### Exercise 04
**Instructions:** Implement the `train_model` program below to train the neural network above. Here is a list of things you should do:
- Create a `trax.supervised.trainer.TrainTask` object, this encapsulates the aspects of the dataset and the problem at hand:
- labeled_data = the labeled data that we want to *train* on.
- loss_fn = [tl.CrossEntropyLoss()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html?highlight=CrossEntropyLoss#trax.layers.metrics.CrossEntropyLoss)
- optimizer = [trax.optimizers.Adam()](https://trax-ml.readthedocs.io/en/latest/trax.optimizers.html?highlight=Adam#trax.optimizers.adam.Adam) with learning rate = 0.0005
- Create a `trax.supervised.trainer.EvalTask` object, this encapsulates aspects of evaluating the model:
- labeled_data = the labeled data that we want to *evaluate* on.
- metrics = [tl.CrossEntropyLoss()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.metrics.CrossEntropyLoss) and [tl.Accuracy()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.metrics.Accuracy)
- How frequently we want to evaluate and checkpoint the model.
- Create a `trax.supervised.trainer.Loop` object, this encapsulates the following:
- The previously created `TrainTask` and `EvalTask` objects.
- the training model = [GRULM](#ex03)
- optionally the evaluation model, if different from the training model. NOTE: in presence of Dropout etc we usually want the evaluation model to behave slightly differently than the training model.
You will be using a cross entropy loss, with Adam optimizer. Please read the [trax](https://trax-ml.readthedocs.io/en/latest/index.html) documentation to get a full understanding. Make sure you use the number of steps provided as a parameter to train for the desired number of steps.
**NOTE:** Don't forget to wrap the data generator in `itertools.cycle` to iterate on it for multiple epochs.
```
from trax.supervised import training
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: train_model
def train_model(model, data_generator, batch_size=32, max_length=64, lines=lines, eval_lines=eval_lines, n_steps=1, output_dir='model/'):
"""Function that trains the model
Args:
model (trax.layers.combinators.Serial): GRU model.
data_generator (function): Data generator function.
batch_size (int, optional): Number of lines per batch. Defaults to 32.
max_length (int, optional): Maximum length allowed for a line to be processed. Defaults to 64.
lines (list, optional): List of lines to use for training. Defaults to lines.
eval_lines (list, optional): List of lines to use for evaluation. Defaults to eval_lines.
n_steps (int, optional): Number of steps to train. Defaults to 1.
output_dir (str, optional): Relative path of directory to save model. Defaults to "model/".
Returns:
trax.supervised.training.Loop: Training loop for the model.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
bare_train_generator = data_generator(batch_size, max_length, data_lines=lines)
infinite_train_generator = itertools.cycle(bare_train_generator)
bare_eval_generator = data_generator(batch_size, max_length, data_lines=eval_lines)
infinite_eval_generator = itertools.cycle(bare_eval_generator)
train_task = training.TrainTask(
labeled_data=infinite_train_generator, # Use infinite train data generator
loss_layer=tl.CrossEntropyLoss(), # Don't forget to instantiate this object
optimizer=trax.optimizers.Adam(0.0005) # Don't forget to add the learning rate parameter
)
eval_task = training.EvalTask(
labeled_data=infinite_eval_generator, # Use infinite eval data generator
metrics=[tl.CrossEntropyLoss(), tl.Accuracy()], # Don't forget to instantiate these objects
n_eval_batches=3 # For better evaluation accuracy in reasonable time
)
training_loop = training.Loop(model,
train_task,
eval_task=eval_task,
output_dir=output_dir)
training_loop.run(n_steps=n_steps)
### END CODE HERE ###
# We return this because it contains a handle to the model, which has the weights etc.
return training_loop
# Train the model 1 step and keep the `trax.supervised.training.Loop` object.
training_loop = train_model(GRULM(), data_generator)
```
The model was only trained for 1 step due to the constraints of this environment. Even on a GPU accelerated environment it will take many hours for it to achieve a good level of accuracy. For the rest of the assignment you will be using a pretrained model but now you should understand how the training can be done using Trax.
<a name='4'></a>
# Part 4: Evaluation
<a name='4.1'></a>
### 4.1 Evaluating using the deep nets
Now that you have learned how to train a model, you will learn how to evaluate it. To evaluate language models, we usually use perplexity which is a measure of how well a probability model predicts a sample. Note that perplexity is defined as:
$$P(W) = \sqrt[N]{\prod_{i=1}^{N} \frac{1}{P(w_i| w_1,...,w_{n-1})}}$$
As an implementation hack, you would usually take the log of that formula (to enable us to use the log probabilities we get as output of our `RNN`, convert exponents to products, and products into sums which makes computations less complicated and computationally more efficient). You should also take care of the padding, since you do not want to include the padding when calculating the perplexity (because we do not want to have a perplexity measure artificially good).
$$log P(W) = {log\big(\sqrt[N]{\prod_{i=1}^{N} \frac{1}{P(w_i| w_1,...,w_{n-1})}}\big)}$$
$$ = {log\big({\prod_{i=1}^{N} \frac{1}{P(w_i| w_1,...,w_{n-1})}}\big)^{\frac{1}{N}}}$$
$$ = {log\big({\prod_{i=1}^{N}{P(w_i| w_1,...,w_{n-1})}}\big)^{-\frac{1}{N}}} $$
$$ = -\frac{1}{N}{log\big({\prod_{i=1}^{N}{P(w_i| w_1,...,w_{n-1})}}\big)} $$
$$ = -\frac{1}{N}{\big({\sum_{i=1}^{N}{logP(w_i| w_1,...,w_{n-1})}}\big)} $$
<a name='ex05'></a>
### Exercise 05
**Instructions:** Write a program that will help evaluate your model. Implementation hack: your program takes in preds and target. Preds is a tensor of log probabilities. You can use [`tl.one_hot`](https://github.com/google/trax/blob/22765bb18608d376d8cd660f9865760e4ff489cd/trax/layers/metrics.py#L154) to transform the target into the same dimension. You then multiply them and sum.
You also have to create a mask to only get the non-padded probabilities. Good luck!
<details>
<summary>
<font size="3" color="darkgreen"><b>Hints</b></font>
</summary>
<p>
<ul>
<li>To convert the target into the same dimension as the predictions tensor use tl.one.hot with target and preds.shape[-1].</li>
<li>You will also need the np.equal function in order to unpad the data and properly compute perplexity.</li>
<li>Keep in mind while implementing the formula above that <em> w<sub>i</sub></em> represents a letter from our 256 letter alphabet.</li>
</ul>
</p>
```
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: test_model
def test_model(preds, target):
"""Function to test the model.
Args:
preds (jax.interpreters.xla.DeviceArray): Predictions of a list of batches of tensors corresponding to lines of text.
target (jax.interpreters.xla.DeviceArray): Actual list of batches of tensors corresponding to lines of text.
Returns:
float: log_perplexity of the model.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
total_log_ppx = np.sum(preds * tl.one_hot(target, preds.shape[-1]),axis= -1) # HINT: tl.one_hot() should replace one of the Nones
non_pad = 1.0 - np.equal(target, 0) # You should check if the target equals 0
ppx = total_log_ppx * non_pad # Get rid of the padding
log_ppx = np.sum(ppx) / np.sum(non_pad)
### END CODE HERE ###
return -log_ppx
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# Testing
model = GRULM()
model.init_from_file('model.pkl.gz')
batch = next(data_generator(batch_size, max_length, lines, shuffle=False))
preds = model(batch[0])
log_ppx = test_model(preds, batch[1])
print('The log perplexity and perplexity of your model are respectively', log_ppx, np.exp(log_ppx))
```
**Expected Output:** The log perplexity and perplexity of your model are respectively around 1.9 and 7.2.
<a name='5'></a>
# Part 5: Generating the language with your own model
We will now use your own language model to generate new sentences for that we need to make draws from a Gumble distribution.
The Gumbel Probability Density Function (PDF) is defined as:
$$ f(z) = {1\over{\beta}}e^{(-z+e^{(-z)})} $$
where: $$ z = {(x - \mu)\over{\beta}}$$
The maximum value, which is what we choose as the prediction in the last step of a Recursive Neural Network `RNN` we are using for text generation, in a sample of a random variable following an exponential distribution approaches the Gumbel distribution when the sample increases asymptotically. For that reason, the Gumbel distribution is used to sample from a categorical distribution.
```
# Run this cell to generate some news sentence
def gumbel_sample(log_probs, temperature=1.0):
"""Gumbel sampling from a categorical distribution."""
u = numpy.random.uniform(low=1e-6, high=1.0 - 1e-6, size=log_probs.shape)
g = -np.log(-np.log(u))
return np.argmax(log_probs + g * temperature, axis=-1)
def predict(num_chars, prefix):
inp = [ord(c) for c in prefix]
result = [c for c in prefix]
max_len = len(prefix) + num_chars
for _ in range(num_chars):
cur_inp = np.array(inp + [0] * (max_len - len(inp)))
outp = model(cur_inp[None, :]) # Add batch dim.
next_char = gumbel_sample(outp[0, len(inp)])
inp += [int(next_char)]
if inp[-1] == 1:
break # EOS
result.append(chr(int(next_char)))
return "".join(result)
print(predict(32, ""))
print(predict(32, ""))
print(predict(32, ""))
print(predict(32, ""))
```
In the generated text above, you can see that the model generates text that makes sense capturing dependencies between words and without any input. A simple n-gram model would have not been able to capture all of that in one sentence.
<a name='6'></a>
### <span style="color:blue"> On statistical methods </span>
Using a statistical method like the one you implemented in course 2 will not give you results that are as good. Your model will not be able to encode information seen previously in the data set and as a result, the perplexity will increase. Remember from course 2 that the higher the perplexity, the worse your model is. Furthermore, statistical ngram models take up too much space and memory. As a result, it will be inefficient and too slow. Conversely, with deepnets, you can get a better perplexity. Note, learning about n-gram language models is still important and allows you to better understand deepnets.
| github_jupyter |
# T81-558: Applications of Deep Neural Networks
**Module 7: Generative Adversarial Networks**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 7 Material
* Part 7.1: Introduction to GANS for Image and Data Generation [[Video]](https://www.youtube.com/watch?v=u8xn393mDPM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=21) [[Notebook]](t81_558_class_07_1_gan_intro.ipynb)
* Part 7.2: Implementing a GAN in Keras [[Video]](https://www.youtube.com/watch?v=cf6FDLFNWEk&index=22&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_07_2_Keras_gan.ipynb)
* **Part 7.3: Face Generation with StyleGAN and Python** [[Video]](https://www.youtube.com/watch?v=LSSH_NdXwhU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=23) [[Notebook]](t81_558_class_07_3_style_gan.ipynb)
* Part 7.4: GANS for Semi-Supervised Learning in Keras [[Video]](https://www.youtube.com/watch?v=LSSH_NdXwhU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=23) [[Notebook]](t81_558_class_07_4_gan_semi_supervised.ipynb)
* Part 7.5: An Overview of GAN Research [[Video]](https://www.youtube.com/watch?v=LSSH_NdXwhU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=23) [[Notebook]](t81_558_class_07_5_gan_research.ipynb)
# Part 7.3: Face Generation with StyleGAN and Python
GANs have appeared frequently in the media, showcasing their ability to generate extremely photorealistic faces. One significant step forward for realistic face generation was nVidia StyleGAN, which was introduced in the following paper.
* Karras, T., Laine, S., & Aila, T. (2018). [A style-based generator architecture for generative adversarial networks](https://arxiv.org/abs/1812.04948). *arXiv preprint arXiv:1812.04948*.
In this part we will make use of StyleGAN. We will also preload weights that nVidia trained on. This will allow us to generate high resolution photorealistic looking faces, such as this one.

The above image was generated with StyleGAN, using Google CoLab. Following the instructions in this section, you will be able to create faces like this of your own.
While the above image looks much more realistic than the previous set of images, it is not perfect. There are usually a number of tell-tail signs that you are looking at a computer generated image. One of the most obvious is usually the surreal, dream-like backgrounds. The background does not look obviously fake, at first glance; however, upon closer inspection you usually can't quite discern exactly what a GAN generated background actually is. Also look at the image character's left eye. It is slightly unrealistic looking, especially near the eyelashes.
Look at the following GAN face. Can you spot any imperfections?

Notice the earrings? GANs sometimes have problems with symmetry, particularly earrings.
### Keras Sequence vs Functional Model API
Most of the neural networks create in this course have made use of the Keras sequence object. You might have noticed that we briefly made use of another type of neural network object for the ResNet, the Model. These are the [two major means](https://keras.io/getting-started/functional-api-guide/) of constructing a neural network in Keras:
* [Sequential](https://keras.io/getting-started/sequential-model-guide/) - Simplified interface to Keras that supports most models where the flow of information is a simple sequence from input to output.
* [Keras Functional API](https://keras.io/getting-started/functional-api-guide/) - More complex interface that allows neural networks to be constructed of reused layers, multiple input layers, and supports building your own recurrent connections.
It is important to point out that these are not two specific types of neural network. Rather, they are two means of constructing neural networks in Keras. Some types of neural network can be implemented in either, such as dense feedforward neural networks (like we used for the Iris and MPG datasets). However, other types of neural network, like ResNet and GANs can only be used in the Functional Model API.
### Generating High Rez GAN Faces with Google CoLab
This notebook demonstrates how to run [NVidia StyleGAN](https://github.com/NVlabs/stylegan) inside of a Google CoLab notebook. I suggest you use this to generate GAN faces from a pretrained model. If you try to train your own, you will run into compute limitations of Google CoLab.
Make sure to run this code on a GPU instance. GPU is assumed.
First, map your G-Drive, this is where your GANs will be written to.
```
# Run this for Google CoLab
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
```
Next, clone StyleGAN from GitHub.
```
!git clone https://github.com/NVlabs/stylegan.git
```
Verify that StyleGAN has been cloned.
```
!ls /content/stylegan/
```
Add the StyleGAN folder to Python so that you can import it.
```
import sys
sys.path.insert(0, "/content/stylegan")
import dnnlib
```
The code below is based on code from NVidia. This actually generates your images.
```
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Minimal script for generating an image using pre-trained StyleGAN generator."""
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
def main():
# Initialize TensorFlow.
tflib.init_tf()
# Load pre-trained network.
url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
# Print network details.
Gs.print_layers()
# Pick latent vector.
rnd = np.random.RandomState()
latents = rnd.randn(1, Gs.input_shape[1])
# Generate image.
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
# Save image.
os.makedirs(config.result_dir, exist_ok=True)
png_filename = os.path.join(config.result_dir, f'/content/drive/My Drive/images/example1.png')
PIL.Image.fromarray(images[0], 'RGB').save(png_filename)
if __name__ == "__main__":
main()
```
# Part 7.4: GANS for Semi-Supervised Training in Keras
GANs can also be used to implement semi-supervised learning/training. Normally GANs implement un-supervised training. This is because there are no y's (expected outcomes) provided in the dataset. The y-values are usually called labels. For the face generating GANs, there is typically no y-value, only images. This is unsupervised training. Supervised training occurs when we are training a model to

The following paper describes the application of GANs to semi-supervised training.
* [Odena, A. (2016). Semi-supervised learning with generative adversarial networks. *arXiv preprint* arXiv:1606.01583.](https://arxiv.org/abs/1606.01583)
As you can see, supervised learning is where all data have labels. Supervised learning attempts to learn the labels from the training data to predict these labels for new data. Un-supervised learning has no labels and usually simply clusters the data or in the case of a GAN, learns to produce new data that resembles the training data. Semi-supervised training has a small number of labels for mostly unlabeled data. Semi-supervised learning is usually similar to supervised learning in that the goal is ultimately to predict labels for new data.
Traditionally, unlabeled data would simply be discarded if the overall goal was to create a supervised model. However, the unlabeled data is not without value. Semi-supervised training attempts to use this unlabeled data to help learn additional insights about what labels we do have. There are limits, however. Even semi-supervised training cannot learn entirely new labels that were not in the training set. This would include new classes for classification or learning to predict values outside of the range of the y-values.
Semi-supervised GANs can perform either classification or regression. Previously, we made use of the generator and discarded the discriminator. We simply wanted to create new photo-realistic faces, so we just needed the generator. Semi-supervised learning flips this, as we now discard the generator and make use of the discriminator as our final model.
### Semi-Supervised Classification Training
The following diagram shows how to apply GANs for semi-supervised classification training.

Semi-supervised classification training is laid exactly the same as a regular GAN. The only differences is that it is not a simple true/false classifier as was the case for image GANs that simply classified if the generated image was a real or fake. The additional classes are also added. Later in this module I will provide a link to an example of [The Street View House Numbers (SVHN) Dataset](http://ufldl.stanford.edu/housenumbers/). This dataset contains house numbers, as seen in the following image.

Perhaps all of the digits are not labeled. The GAN is setup to classify a real or fake digit, just as we did with the faces. However, we also expand upon the real digits to include classes 0-9. The GAN discriminator is classifying between the 0-9 digits and also fake digits. A semi-supervised GAN classifier always classifies to the number of classes plus one. The additional class indicates a fake classification.
### Semi-Supervised Regression Training
The following diagram shows how to apply GANs for semi-supervised regression training.

Neural networks can perform both classification and regression simultaneously, it is simply a matter of how the output neurons are mapped. A hybrid classification-regression neural network simply maps groups of output neurons to be each of the groups of classes to be predicted, along with individual neurons to perform any regression predictions needed.
A regression semi-supervised GAN is one such hybrid. The discriminator has two output neurons. The first output neuron performs the requested regression prediction. The second predicts the probability that the input was fake.
### Application of Semi-Supervised Regression
An example of using Keras for Semi-Supervised classification is provided here.
* [Semi-supervised learning with Generative Adversarial Networks (GANs)](https://towardsdatascience.com/semi-supervised-learning-with-gans-9f3cb128c5e)
* [Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](https://arxiv.org/abs/1511.06434)
* [The Street View House Numbers (SVHN) Dataset](http://ufldl.stanford.edu/housenumbers/)
# Part 7.5: An Overview of GAN Research
* [Keras Implementations of Generative Adversarial Networks](https://github.com/eriklindernoren/Keras-GAN)
* [Curated List of Awesome GAN Applications and Demo](https://github.com/nashory/gans-awesome-applications)
### Select Projects
* [Few-Shot Adversarial Learning of Realistic Neural Talking Head Models](https://arxiv.org/abs/1905.08233v1), [YouTube of Talking Heads](https://www.youtube.com/watch?v=p1b5aiTrGzY)
* [Pose Guided Person Image Generation](https://papers.nips.cc/paper/6644-pose-guided-person-image-generation.pdf)
* [Deep Fake](https://www.youtube.com/watch?v=cQ54GDm1eL0)
# Module 7 Assignment
You can find the first assignment here: [assignment 7](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class7.ipynb)
| github_jupyter |
# Python Tutorial for Data Science
## Introduction to Machine Learning: Classification with k-Nearest Neighbors
#### (Adapted from Data 8 Fall 2017 Project 3)
#### Patrick Chao 1/21/18
# Introduction
The purpose of this notebook is to serve as an elementary python tutorial introducing fundamental data science concepts including data exploration, classification, hyper-parameter tuning, plotting, and loss functions.
The tutorial is centered around the third project from Data 8 Fall 2017. This project involves classifying a movie's genre as either action or romance. In this notebook, we will explore how to process and understand the data, create a model, tune, and test.
## How to Avoid Overfitting: Train/Validation/Test
A huge part of machine learning is ensuring that our model performs well. With current technology, we have access to massive datasets and information, but the difficulty is parsing through all the numbers to understand something. Models can require huge amounts of data and can take hours or days to train on the data to perform well.
The first phase of training a model is, well, *training*.
### Training
In this part, the model continually learns on the data and improves. We use a subset of the data known as the **training set**. Given some model $f$, assume we have input $\vec{x}$ and a true label/output $\vec y$. We would like $f(\vec x)\approx \vec y$, or alternatively we would like to minimize $\|f(\vec x)-\vec y\|$. This value $\|f(\vec x)-\vec y\|$ is known as the **error** or **loss**, how close our model is to the correct value. When training on the training set, the error is more specifically known as the **training error**.
Our model looks through each training data instance and will have some prediction $f(\vec x)$. Based on the value of $\|f(\vec x)-\vec y\|$, the model will change slightly and improve. The more incorrect the prediction was, the more it will change. One method of optimizing our model $f$ is **gradient descent**.
When training, the error follows this form of a curve.
<img src='train.png' width="400" height="400">
You may consider model order as the "complexity" of the model. This may be more parameters, higher dimensionality, or more training. As your model trains, the training error will continually decrease. An analogy would be predicting a line using a $10$ degree polynomial. Since any $10$ degree polynomial has linear degree terms, a $10$ degree polynomial should be at least as good as a linear model for the training data. However, we shall see that this may not always be the case for all data.
### Pitfalls of Training: Overfitting
One trap that models may run into is **overfitting**. This is where the model over-trained on the data and does not extrapolate to other real world examples. The model becomes overly complex and attempts to fit every nuance of the data, and fails to generalize. An analogy would be using a $10$ degree polynomial to fit a line. It may be able to fit the training data extremely well, better than a line would, but it may fail for other points.
The best way to understand this is an example.
Consider the line $y=2x+1$. Assume that for a given value of $x$, the ground-truth value of $y$ is $2x+1$. We would like find a model $f(x)$ that predicts $y$ as best as possible. To do this, we will have some slightly perturbed input data from the range $100$ to $110$, denoted by the dotted black lines. Mess around with the demo by using various parameters.
```
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(3)
trainDataRange = [100,110]
trainDataRange = np.array(trainDataRange)
#Generate random training data within the trainDataRange
#Parameter for showing plots and number of samples
def generateTrainData(numSamples=500,showPlot=True):
#Generate X data
inputX = np.random.random_sample((numSamples,))*(trainDataRange[1]-trainDataRange[0])+trainDataRange[0]
trueY = a*inputX+b
#Add noise with variance 10
noisyY = trueY+np.random.randn(numSamples)*10
#Plot the data
if showPlot:
plt.plot(inputX,noisyY,'bo')
plt.plot(trainDataRange, trainDataRange*a+b, 'r-', lw=2)
plt.ylabel("Noisy Y")
plt.xlabel("Input X")
plt.title("Clean X and Noisy Y from Linear Relationship")
plt.show()
plt.clf()
return inputX,noisyY
#Validate the data on a larger range
#Default is 70-140
#Training region is denoted by dotted lines
def validate(model,numSamples=500,dataRange=[70,140],showPlot=True):
dataRange=np.array(dataRange)
#Generate x values from the data range
inputX = np.random.random_sample((numSamples,))*(dataRange[1]-dataRange[0])+dataRange[0]
trueY = a*inputX+b
predY=predict(model,inputX)
#Plot graphs
if showPlot:
#A bit of math to determine where to draw the dotted lines
coordX1 = [trainDataRange[0]]*2
coordX2 = [trainDataRange[1]]*2
minY = min(min(dataRange*a+b),min(predY))
maxY = max(max(dataRange*a+b),max(predY))
plt.plot(coordX1, [minY,maxY], 'k-', lw=1,linestyle="--")
plt.plot(coordX2, [minY,maxY], 'k-', lw=1,linestyle="--")
plt.plot(inputX,predY,'bo')
plt.plot(dataRange, dataRange*a+b, 'r-', lw=2)
plt.ylabel("Predicted Y")
plt.xlabel("Input X")
plt.title("Degree " + str (len(model)-1)+ " Model")
plt.show()
return error(trueY,predY)
#Train the data
def model(trainX,trainY,degree=1):
#Creates the vandermonde matrix https://en.wikipedia.org/wiki/Vandermonde_matrix
powers=np.vander(trainX,degree+1)
A=powers
#Solves the normal equation
model = np.linalg.solve(A.T@A,A.T@trainY)
return model
#Predicts given x values based on a model
def predict(model,x):
degree=len(model)-1
powers=np.vander(x,degree+1)
return powers@model
#Determines the error between true Y values and predicted
def error(trueY,predY):
return np.linalg.norm((trueY-predY))/len(trueY)
#Generates graphs of different degree models
#Plots training error and test error
def overfittingDemo(maxDegree=6):
trainX,trainY = generateTrainData(showPlot=False)
trainError = []
testError = []
#Iterate over all model orders
for deg in range(maxDegree+1):
currModel = model(trainX,trainY,degree=deg)
predTrainY = predict(currModel,trainX)
currTrainErr = error(trainY,predTrainY)
currTestErr = validate(currModel,showPlot=True)
trainError.append(currTrainErr)
testError.append(currTestErr)
#Plot the errors
plt.figure(figsize=(15,4))
plt.subplot(1,3,1)
plt.plot(range(maxDegree+1),trainError)
plt.ylabel("Error")
plt.title("Training Error")
plt.subplot(1,3,2)
plt.plot(range(1,maxDegree+1),trainError[1:])
plt.xlabel("Degree of Model")
plt.title("Training Error w/o Deg 0")
plt.subplot(1,3,3)
plt.plot(range(maxDegree+1),testError)
plt.title("Test Error")
plt.show()
# Uncomment if you are curious about the actual error values
# print("Training Errors:",trainError)
# print("Test Errors:",testError)
#True model: y=ax+b
a = 2
b = 1
#To try your own examples
#Uncomment to test around yourself!
overfittingDemo()
#Comment the overfitting demo to try your own parameters
# #Create and Visualize Training Data
# trainX,trainY = generateTrainData()
#Degree 1 Model
# model1 = model(trainX,trainY,degree=1)
# err1 = validate(model1)
# #Degree 2 Model
# model2 = model(trainX,trainY,degree=2)
# err2 = validate(model2)
# #Model Parameters (how close is it to a and b?)
# print("Degree 1 parameters",model1)
# print("Degree 2 parameters",model2)
# #Error Values
# print(err1,err2)
```
### Overfitting Continued
Hopefully from the demo, it is clear that the best model is the linear model. The other higher order polynomials obtain slightly lower training errors from about $0.4405$ to $0.4385$, a $.45\%$ decrease in training error. One might think that just obtaining the lowest training error is best, but from the test error, we find that this results in drastically worse test errors, from $0.077$ for a linear polynomial to $282.6$ for a degree $6$ polynomial, a huge decrease in performance.
Overfitting is shown in the graph below. On the y-axis is true error, some undeterminable quantity, and the x-axis is how complex our model is.
<img src='trueError.png' width="400" height="400">
Another important note is **underfitting**! At the lefthand side of the graph, our model is not complex enough to properly grasp the data and does not perform well on the data. This may be seen through our degree $0$ model.
Overfitting and underfitting are major pitfalls in machine learning. It originally seems that we are doing great because our training error gets lower and lower, but we may have already crossed the threshold where we are overfitting to our data. How do we avoid this?
### Validation
Our savior is validation! The essence of validation is to set aside some data, called **validation data** that we do not train on, and we find the error of our model on this validation data. Now using this as a form of "safety check", we can determine when our model begins to overfit and stop training there. There are many methods of implementing this validation data, such as initially setting aside $20\%$ of your data from the start to serve as validation. Another method is known as **k-fold cross-validation**. I will not go into it here, but it is relatively straight forward so I encourage anyone interested to read [here](https://en.wikipedia.org/wiki/Cross-validation_(statistics%29).
This is one form of ensuring that we do not overfit. Another largely used method is known as **regularization**. This is applying some kind of prior belief on our model. If we believe that our model should rely on only a small number of features of small magnitude, then coefficients of the form $(0,1.99,1.01)$ make more sense than $(-0.0165,5.47, -181)$. One method of regularization is **ridge regression**, where we add a penalty **$\lambda$** that essentially adds the magnitude of the weight vector as part of the loss function. This is a heavily used method of preventing overfitting as it does not necesarily require you to watch over the model. Just for kicks, try the ridge demo below! If you are curious about ridge regression and how it works, read [here!](https://en.wikipedia.org/wiki/Tikhonov_regularization)
```
np.random.seed(4)
def ridgeDemo(lambdaCoeff,maxDegree=6):
trainX,trainY = generateTrainData(showPlot=False)
currModel = ridgeModel(trainX,trainY,lambdaCoeff=lambdaCoeff,degree=maxDegree)
predTrainY = predict(currModel,trainX)
currTrainErr = error(trainY,predTrainY)
currTestErr = validate(currModel,showPlot=True)
print("Model params with degree",maxDegree,":",currModel)
print("Training Error:",currTrainErr)
print("Test Error:",currTestErr)
def ridgeModel(trainX,trainY,lambdaCoeff,degree=10):
powers=np.vander(trainX,degree+1)
A=powers
regularizationMatrix = lambdaCoeff*np.eye((A.shape[1]))
model = np.linalg.solve(A.T@A+regularizationMatrix,A.T@trainY)
return model
ridgeDemo(lambdaCoeff = 1)
```
The curve is still not a great prediction, but the parameters are incredibly small. For the $x^6$ term, the coefficient is on the order of magnitude $10^{-8}$, and the test error is only $7.97$, significantly better than the $282$ from before. If there was some way to set the extremely small values to $0$, that would be fantastic! ([lasso](https://en.wikipedia.org/wiki/Lasso_(statistics))
## Classification vs Regression
In general, there are two major types of machine learning problems, classification and regression.
*Classification* is a problem where we would like to *classify* some sample input into a class or category. For example, we could classify the genre of a movie or classify a handwritten digit as $0-9$. The input may be a list of **features**, or *qualities* of a sample (for digits this would the individual pixels of the image), and the output is a class or label. Note that the bins are discrete and often categorical, and there are a finite number of classes.
For modeling classification problems, this may involve generating a probability for each class, and selecting the class with the highest probability. In this notebook, we will investigate a simpler model, using a method called *k-nearest neighbors*.
*Regression* does not depend on distinct classes for labels. The input is still a set of features, but the output is instead a continuous value. This may be predicting the population in $5$ years or the temperature tomorrow. In this situation, the "right answer" is more vague. If we predict the temperature to be $70$ degrees tomorrow but it is actually $71$, are we right? What if we predicted $70.5$ degrees? This adds a layer of complexity between regression and classification.
For modeling regression problems this may be done by creating some function approximation in terms of the input. For example, linear regression is the simplest model, and outputs a continuous value. There are more complex models such as *neural networks* that act as universal function approximators.
# k-Nearest Neighbors
The **k-Nearest Neighbors** (kNN) algorithm is one of the simplest models. The core idea is that a similar set of features should have the same label. For example if we receive an image $A$ as input where we would like to classify the digit, we could look at what other images look like $A$ in our training set. If we were doing $5$-nearest neighbors, we would find the $5$ images closest to $A$ in our data set, and return the most common digit among the $5$. In general, you may choose any value for $k$, $5$ may not be the best choice. Note, this has a $O(1)$ training time! This is the fastest algorithm for training, as there is no training!
However, some questions immediately arise. How do you determine how close two images are? Why choose $5$, not $10$ or $100$? There are other consequences as well; you need to look through your entire dataset each time to determine the $k$ closest images, which could take a long time if your training set is huge. The prediction time for kNN is $O(n)$, which is much slower than something like linear regression, where the prediction is $O(1)$.
We will address these questions and the shortcomings of kNN.
A few conceptual questions for understanding:
1. In binary classification (two classes), why is choosing an odd value for $k$ better than an even value?
2. Given two separate ordered pairs of two values, $(a,b)$ and $(x,y)$, what possibilities are there for calculating the "distance" between them? What are the differences between approaches?
3. Assume we are doing image classification. List any possible issues with image classification.
4. What does 1-NN mean? If we have $n$ training data, what is $n$-NN? What are some of the *tradeoffs* for varying $k$ between $1$ and $n$?
```
# Run this cell to set up the notebook, but please don't change it.
import numpy as np
import math
# These lines set up the plotting functionality and formatting.
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import pandas as pd
#You may need to pip install pandas/matplotlib
#Given a movie title, this returns the frequency of given words
def wordFreq(movieTitle,words):
#Change movieTitle to lower case
movieTitle=movieTitle.lower()
#Change words to lower case
words = [word.lower() for word in words]
#Check if movie title is found
try:
#*Your Code Here*
movie = None
except:
print("Movie title not found!")
return ""
#Check if given words are not found
try:
wordFrequencies = movie[words].as_matrix()[0]
except:
print("Words not found")
return ""
return wordFrequencies
#Let's see what our dataset looks like!
movies = pd.read_csv('movies.csv')
movies.head()
#What type is movies?
print(type(movies))
#What is the frequency of the words "hey" and "i" in the matrix? Try some yourself!
print(wordFreq("batman",["Hey","i"]))
```
## kNN Classification and Feature Selection
Our goal is to be able to classify songs based on the frequency of various words in the script. However it is not feasible to use all the words as that is very calculation intensive. An alternative is to select certain features, but what features do we select? One method to look at which words are often in romance movies but not action, and vice versa. This is called **feature selection**.
First, we will separate the data into training and validation data. Next, we may create some elementary functions such as the distance between movies, getting movies as pandas series, and finding the $k$ movies closest to some given movie.
```
#Split the data into 80 training and 20 validation
trainingPercentage = 80/100
numMovies = movies.shape[0]
numTraining = (int)(numMovies*trainingPercentage)
numValidation = numMovies - numTraining
#Training Set
trainingSet = movies[0:numTraining]
#Validation Set
validationSet = movies[numTraining:]
#Separate into action and romance
action = trainingSet[trainingSet["Genre"]=="action"]
romance = trainingSet[trainingSet["Genre"]=="romance"]
#Given two movie titles mov1,mov2, and a list of words
#distance returns the euclidean distance between the two movies using the words as features
def distance(mov1,mov2,words):
#*Your Code Here*
mov1Freq = None
mov2Freq = None
dist = None
return dist
#Given a movie title, this returns the row as a pandas series
def getMovie(title):
#*Your Code Here*
title = title.lower()
movie = None
return movie
#Given a movie as a panda series, determines the k closest movies using words as features
#Returns the dataframe of movies
def kShortestDistance(k,movie,movieSet,words):
distances=[]
#Iterate over all movies
for i in range(movieSet.shape[0]):
currMovieTitle = movieSet.iloc[i]["Title"]
#Get the distance of two movies from two movies
#*Your Code Here*
#Sort the array
distances = sorted(distances,key=lambda x:x[0])
#Get the indices of the movies
indices = [x[1] for x in distances]
return movieSet.iloc[indices[1:k+1]]
#Faster kShortestDistance using subsetting
def kShortestDistanceFast(k,movie,movieSet,words):
#Subset out the words
movieSubset = movieSet[words]
currMovie = movie[words].squeeze()
#Calculate Distances and sort
distances = ((movieSubset-currMovie)**2).sum(axis=1)
distances = distances.sort_values()
#Shift by the minimum index if the movies do not start at 0
indices = distances.index.tolist()
minIndex = min(indices)
shiftedIndices=(np.array(indices)-minIndex).tolist()
return movieSet.iloc[shiftedIndices[1:k+1]]
#Given a list of movies, returns the majority genre
def getMajority(nearestMovies):
numMovies = nearestMovies.shape[0]
#Count frequency of genres
counts = nearestMovies['Genre'].value_counts()
if len(counts)==1:
return [x[0] for x in counts.items()][0]
if counts["action"]>numMovies/2:
return "action"
return "romance"
#Given a dataset, a set of word features, and the value of k
#Returns the percentage correct (0-100)
def accuracy(dataset,features,k):
numCorrect = 0
#Iterate over all movies
for i in range(dataset.shape[0]):
currMovie = dataset.iloc[i].squeeze()
currMovieGenre = currMovie["Genre"]
#Calculate k closest movies
kClosest = kShortestDistanceFast(k,currMovie,dataset,features)
predGenre = getMajority(kClosest)
#Keep track of number of correct predictions
if predGenre == currMovieGenre:
numCorrect +=1
#Return accuracy as percentage
return numCorrect*1.0/dataset.shape[0]*100
```
The code below uses "power" and "love" as features to find the $5$ closest movies to "batman returns". Then we get the majority of the genres of those five movies, and we find that batman returns is predicted to be action based on those $5$ movies.
```
#Use "money" and "feel" as features
features = ["power","love"]
movie = getMovie("batman returns")
#Get the five closest movies to the "batman returns" using the training set
closest=kShortestDistance(5,movie,trainingSet,features)
#Given the closest movies, returns the majority
#Represents the kNN Prediction
getMajority(closest)
```
Use this word plot (courtesy of Data 8) to construct some of your own features!
<img src='wordplot.png' width="700" height="700">
```
#Try with some of your own features!
features = ["power","feel"]
k=5
accuracy(trainingSet,features,k)
```
With our own chosen features, we then use the training set to determine the optimal value for $k$. Afterwards, we use this value of $k$ to find the accuracy on the validation data.
```
#Modify this!
features = ["power","feel"]
#Determine the best value for k
trainAccuracies = []
numKValues = 30
for i in range(numKValues):
acc =accuracy(trainingSet,features,2*i+1)
trainAccuracies.append(acc)
xAxis = ([2*i+1 for i in range(numKValues)])
plt.plot(xAxis,trainAccuracies)
plt.show()
```
Using the previous information about overfitting and underfitting, explain the shape of the graph! Why is the accuracy low for $k=1$ and as $k$ increases past $15$?
```
#Determine best value for k
optimalK=xAxis[np.argmax(trainAccuracies)]
#Best kNN was found with k=11
print("Best k:",optimalK)
#Determine validation error with this value for k
optimalKNNVal = accuracy(validationSet,features,optimalK)
print("Test Accuracy:",optimalKNNVal)
```
Why is the accuracy for the validation set lower than the training accuracy (about $75\%$)?
| github_jupyter |
```
from config import (BATCH_SIZE, CLIP_REWARD, DISCOUNT_FACTOR, ENV_NAME,
EVAL_LENGTH, FRAMES_BETWEEN_EVAL, INPUT_SHAPE,
LEARNING_RATE, LOAD_FROM, LOAD_REPLAY_BUFFER,
MAX_EPISODE_LENGTH, MAX_NOOP_STEPS, MEM_SIZE,
MIN_REPLAY_BUFFER_SIZE, PRIORITY_SCALE, SAVE_PATH,
TENSORBOARD_DIR, TOTAL_FRAMES, UPDATE_FREQ, USE_PER,
WRITE_TENSORBOARD)
import numpy as np
import cv2
import random
import os
import json
import time
import gym
import tensorflow as tf
from tensorflow.keras.initializers import VarianceScaling
from tensorflow.keras.layers import (Add, Conv2D, Dense, Flatten, Input,
Lambda, Subtract)
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, RMSprop
# This function can resize to any shape, but was built to resize to 84x84
def process_frame(frame, shape=(84, 84)):
"""Preprocesses a 210x160x3 frame to 84x84x1 grayscale
Arguments:
frame: The frame to process. Must have values ranging from 0-255
Returns:
The processed frame
"""
frame = frame.astype(np.uint8) # cv2 requires np.uint8, other dtypes will not work
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = frame[34:34+160, :160] # crop image
frame = cv2.resize(frame, shape, interpolation=cv2.INTER_NEAREST)
frame = frame.reshape((*shape, 1))
return frame
def build_q_network(n_actions, learning_rate=0.00001, input_shape=(84, 84), history_length=4):
"""Builds a dueling DQN as a Keras model
Arguments:
n_actions: Number of possible action the agent can take
learning_rate: Learning rate
input_shape: Shape of the preprocessed frame the model sees
history_length: Number of historical frames the agent can see
Returns:
A compiled Keras model
"""
model_input = Input(shape=(input_shape[0], input_shape[1], history_length))
x = Lambda(lambda layer: layer / 255)(model_input) # normalize by 255
x = Conv2D(32, (8, 8), strides=4, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
x = Conv2D(64, (4, 4), strides=2, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
x = Conv2D(64, (3, 3), strides=1, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
x = Conv2D(1024, (7, 7), strides=1, kernel_initializer=VarianceScaling(scale=2.), activation='relu', use_bias=False)(x)
# Split into value and advantage streams
val_stream, adv_stream = Lambda(lambda w: tf.split(w, 2, 3))(x) # custom splitting layer
val_stream = Flatten()(val_stream)
val = Dense(1, kernel_initializer=VarianceScaling(scale=2.))(val_stream)
adv_stream = Flatten()(adv_stream)
adv = Dense(n_actions, kernel_initializer=VarianceScaling(scale=2.))(adv_stream)
# Combine streams into Q-Values
reduce_mean = Lambda(lambda w: tf.reduce_mean(w, axis=1, keepdims=True)) # custom layer for reduce mean
q_vals = Add()([val, Subtract()([adv, reduce_mean(adv)])])
# Build model
model = Model(model_input, q_vals)
model.compile(Adam(learning_rate), loss=tf.keras.losses.Huber())
return model
class GameWrapper:
"""Wrapper for the environment provided by Gym"""
def __init__(self, env_name, no_op_steps=10, history_length=4):
self.env = gym.make(env_name)
self.no_op_steps = no_op_steps
self.history_length = 4
self.state = None
self.last_lives = 0
def reset(self, evaluation=False):
"""Resets the environment
Arguments:
evaluation: Set to True when the agent is being evaluated. Takes a random number of no-op steps if True.
"""
self.frame = self.env.reset()
self.last_lives = 0
# If evaluating, take a random number of no-op steps.
# This adds an element of randomness, so that the each
# evaluation is slightly different.
if evaluation:
for _ in range(random.randint(0, self.no_op_steps)):
self.env.step(1)
# For the initial state, we stack the first frame four times
self.state = np.repeat(process_frame(self.frame), self.history_length, axis=2)
def step(self, action, render_mode=None):
"""Performs an action and observes the result
Arguments:
action: An integer describe action the agent chose
render_mode: None doesn't render anything, 'human' renders the screen in a new window, 'rgb_array' returns an np.array with rgb values
Returns:
processed_frame: The processed new frame as a result of that action
reward: The reward for taking that action
terminal: Whether the game has ended
life_lost: Whether a life has been lost
new_frame: The raw new frame as a result of that action
If render_mode is set to 'rgb_array' this also returns the rendered rgb_array
"""
new_frame, reward, terminal, info = self.env.step(action)
# In the commonly ignored 'info' or 'meta' data returned by env.step
# we can get information such as the number of lives the agent has.
# We use this here to find out when the agent loses a life, and
# if so, we set life_lost to True.
# We use life_lost to force the agent to start the game
# and not sit around doing nothing.
if info['ale.lives'] < self.last_lives:
life_lost = True
else:
life_lost = terminal
self.last_lives = info['ale.lives']
processed_frame = process_frame(new_frame)
self.state = np.append(self.state[:, :, 1:], processed_frame, axis=2)
if render_mode == 'rgb_array':
return processed_frame, reward, terminal, life_lost, self.env.render(render_mode)
elif render_mode == 'human':
self.env.render()
return processed_frame, reward, terminal, life_lost
class ReplayBuffer:
"""Replay Buffer to store transitions.
This implementation was heavily inspired by Fabio M. Graetz's replay buffer
here: https://github.com/fg91/Deep-Q-Learning/blob/master/DQN.ipynb"""
def __init__(self, size=1000000, input_shape=(84, 84), history_length=4, use_per=True):
"""
Arguments:
size: Integer, Number of stored transitions
input_shape: Shape of the preprocessed frame
history_length: Integer, Number of frames stacked together to create a state for the agent
use_per: Use PER instead of classic experience replay
"""
self.size = size
self.input_shape = input_shape
self.history_length = history_length
self.count = 0 # total index of memory written to, always less than self.size
self.current = 0 # index to write to
# Pre-allocate memory
self.actions = np.empty(self.size, dtype=np.int32)
self.rewards = np.empty(self.size, dtype=np.float32)
self.frames = np.empty((self.size, self.input_shape[0], self.input_shape[1]), dtype=np.uint8)
self.terminal_flags = np.empty(self.size, dtype=np.bool)
self.priorities = np.zeros(self.size, dtype=np.float32)
self.use_per = use_per
def add_experience(self, action, frame, reward, terminal, clip_reward=True):
"""Saves a transition to the replay buffer
Arguments:
action: An integer between 0 and env.action_space.n - 1
determining the action the agent perfomed
frame: A (84, 84, 1) frame of the game in grayscale
reward: A float determining the reward the agend received for performing an action
terminal: A bool stating whether the episode terminated
"""
if frame.shape != self.input_shape:
raise ValueError('Dimension of frame is wrong!')
if clip_reward:
reward = np.sign(reward)
# Write memory
self.actions[self.current] = action
self.frames[self.current, ...] = frame
self.rewards[self.current] = reward
self.terminal_flags[self.current] = terminal
self.priorities[self.current] = max(self.priorities.max(), 1) # make the most recent experience important
self.count = max(self.count, self.current+1)
self.current = (self.current + 1) % self.size
def get_minibatch(self, batch_size=32, priority_scale=0.0):
"""Returns a minibatch of self.batch_size = 32 transitions
Arguments:
batch_size: How many samples to return
priority_scale: How much to weight priorities. 0 = completely random, 1 = completely based on priority
Returns:
A tuple of states, actions, rewards, new_states, and terminals
If use_per is True:
An array describing the importance of transition. Used for scaling gradient steps.
An array of each index that was sampled
"""
if self.count < self.history_length:
raise ValueError('Not enough memories to get a minibatch')
# Get sampling probabilities from priority list
if self.use_per:
scaled_priorities = self.priorities[self.history_length:self.count-1] ** priority_scale
sample_probabilities = scaled_priorities / sum(scaled_priorities)
# Get a list of valid indices
indices = []
for i in range(batch_size):
while True:
# Get a random number from history_length to maximum frame written with probabilities based on priority weights
if self.use_per:
index = np.random.choice(np.arange(self.history_length, self.count-1), p=sample_probabilities)
else:
index = random.randint(self.history_length, self.count - 1)
# We check that all frames are from same episode with the two following if statements. If either are True, the index is invalid.
if index >= self.current and index - self.history_length <= self.current:
continue
if self.terminal_flags[index - self.history_length:index].any():
continue
break
indices.append(index)
# Retrieve states from memory
states = []
new_states = []
for idx in indices:
states.append(self.frames[idx-self.history_length:idx, ...])
new_states.append(self.frames[idx-self.history_length+1:idx+1, ...])
states = np.transpose(np.asarray(states), axes=(0, 2, 3, 1))
new_states = np.transpose(np.asarray(new_states), axes=(0, 2, 3, 1))
if self.use_per:
# Get importance weights from probabilities calculated earlier
importance = 1/self.count * 1/sample_probabilities[[index - self.history_length for index in indices]]
importance = importance / importance.max()
return (states, self.actions[indices], self.rewards[indices], new_states, self.terminal_flags[indices]), importance, indices
else:
return states, self.actions[indices], self.rewards[indices], new_states, self.terminal_flags[indices]
def set_priorities(self, indices, errors, offset=0.1):
"""Update priorities for PER
Arguments:
indices: Indices to update
errors: For each index, the error between the target Q-vals and the predicted Q-vals
"""
for i, e in zip(indices, errors):
self.priorities[i] = abs(e) + offset
def save(self, folder_name):
"""Save the replay buffer to a folder"""
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
np.save(folder_name + '/actions.npy', self.actions)
np.save(folder_name + '/frames.npy', self.frames)
np.save(folder_name + '/rewards.npy', self.rewards)
np.save(folder_name + '/terminal_flags.npy', self.terminal_flags)
def load(self, folder_name):
"""Loads the replay buffer from a folder"""
self.actions = np.load(folder_name + '/actions.npy')
self.frames = np.load(folder_name + '/frames.npy')
self.rewards = np.load(folder_name + '/rewards.npy')
self.terminal_flags = np.load(folder_name + '/terminal_flags.npy')
class Agent(object):
"""Implements a standard DDDQN agent"""
def __init__(self,
dqn,
target_dqn,
replay_buffer,
n_actions,
input_shape=(84, 84),
batch_size=32,
history_length=4,
eps_initial=1,
eps_final=0.1,
eps_final_frame=0.01,
eps_evaluation=0.0,
eps_annealing_frames=1000000,
replay_buffer_start_size=50000,
max_frames=25000000,
use_per=True):
"""
Arguments:
dqn: A DQN (returned by the DQN function) to predict moves
target_dqn: A DQN (returned by the DQN function) to predict target-q values. This can be initialized in the same way as the dqn argument
replay_buffer: A ReplayBuffer object for holding all previous experiences
n_actions: Number of possible actions for the given environment
input_shape: Tuple/list describing the shape of the pre-processed environment
batch_size: Number of samples to draw from the replay memory every updating session
history_length: Number of historical frames available to the agent
eps_initial: Initial epsilon value.
eps_final: The "half-way" epsilon value. The epsilon value decreases more slowly after this
eps_final_frame: The final epsilon value
eps_evaluation: The epsilon value used during evaluation
eps_annealing_frames: Number of frames during which epsilon will be annealed to eps_final, then eps_final_frame
replay_buffer_start_size: Size of replay buffer before beginning to learn (after this many frames, epsilon is decreased more slowly)
max_frames: Number of total frames the agent will be trained for
use_per: Use PER instead of classic experience replay
"""
self.n_actions = n_actions
self.input_shape = input_shape
self.history_length = history_length
# Memory information
self.replay_buffer_start_size = replay_buffer_start_size
self.max_frames = max_frames
self.batch_size = batch_size
self.replay_buffer = replay_buffer
self.use_per = use_per
# Epsilon information
self.eps_initial = eps_initial
self.eps_final = eps_final
self.eps_final_frame = eps_final_frame
self.eps_evaluation = eps_evaluation
self.eps_annealing_frames = eps_annealing_frames
# Slopes and intercepts for exploration decrease
# (Credit to Fabio M. Graetz for this and calculating epsilon based on frame number)
self.slope = -(self.eps_initial - self.eps_final) / self.eps_annealing_frames
self.intercept = self.eps_initial - self.slope*self.replay_buffer_start_size
self.slope_2 = -(self.eps_final - self.eps_final_frame) / (self.max_frames - self.eps_annealing_frames - self.replay_buffer_start_size)
self.intercept_2 = self.eps_final_frame - self.slope_2*self.max_frames
# DQN
self.DQN = dqn
self.target_dqn = target_dqn
def calc_epsilon(self, frame_number, evaluation=False):
"""Get the appropriate epsilon value from a given frame number
Arguments:
frame_number: Global frame number (used for epsilon)
evaluation: True if the model is evaluating, False otherwise (uses eps_evaluation instead of default epsilon value)
Returns:
The appropriate epsilon value
"""
if evaluation:
return self.eps_evaluation
elif frame_number < self.replay_buffer_start_size:
return self.eps_initial
elif frame_number >= self.replay_buffer_start_size and frame_number < self.replay_buffer_start_size + self.eps_annealing_frames:
return self.slope*frame_number + self.intercept
elif frame_number >= self.replay_buffer_start_size + self.eps_annealing_frames:
return self.slope_2*frame_number + self.intercept_2
def get_action(self, frame_number, state, evaluation=False):
"""Query the DQN for an action given a state
Arguments:
frame_number: Global frame number (used for epsilon)
state: State to give an action for
evaluation: True if the model is evaluating, False otherwise (uses eps_evaluation instead of default epsilon value)
Returns:
An integer as the predicted move
"""
# Calculate epsilon based on the frame number
eps = self.calc_epsilon(frame_number, evaluation)
# With chance epsilon, take a random action
if np.random.rand(1) < eps:
return np.random.randint(0, self.n_actions)
# Otherwise, query the DQN for an action
q_vals = self.DQN.predict(state.reshape((-1, self.input_shape[0], self.input_shape[1], self.history_length)))[0]
return q_vals.argmax()
def get_intermediate_representation(self, state, layer_names=None, stack_state=True):
"""
Get the output of a hidden layer inside the model. This will be/is used for visualizing model
Arguments:
state: The input to the model to get outputs for hidden layers from
layer_names: Names of the layers to get outputs from. This can be a list of multiple names, or a single name
stack_state: Stack `state` four times so the model can take input on a single (84, 84, 1) frame
Returns:
Outputs to the hidden layers specified, in the order they were specified.
"""
# Prepare list of layers
if isinstance(layer_names, list) or isinstance(layer_names, tuple):
layers = [self.DQN.get_layer(name=layer_name).output for layer_name in layer_names]
else:
layers = self.DQN.get_layer(name=layer_names).output
# Model for getting intermediate output
temp_model = tf.keras.Model(self.DQN.inputs, layers)
# Stack state 4 times
if stack_state:
if len(state.shape) == 2:
state = state[:, :, np.newaxis]
state = np.repeat(state, self.history_length, axis=2)
# Put it all together
return temp_model.predict(state.reshape((-1, self.input_shape[0], self.input_shape[1], self.history_length)))
def update_target_network(self):
"""Update the target Q network"""
self.target_dqn.set_weights(self.DQN.get_weights())
def add_experience(self, action, frame, reward, terminal, clip_reward=True):
"""Wrapper function for adding an experience to the Agent's replay buffer"""
self.replay_buffer.add_experience(action, frame, reward, terminal, clip_reward)
def learn(self, batch_size, gamma, frame_number, priority_scale=1.0):
"""Sample a batch and use it to improve the DQN
Arguments:
batch_size: How many samples to draw for an update
gamma: Reward discount
frame_number: Global frame number (used for calculating importances)
priority_scale: How much to weight priorities when sampling the replay buffer. 0 = completely random, 1 = completely based on priority
Returns:
The loss between the predicted and target Q as a float
"""
if self.use_per:
(states, actions, rewards, new_states, terminal_flags), importance, indices = self.replay_buffer.get_minibatch(batch_size=self.batch_size, priority_scale=priority_scale)
importance = importance ** (1-self.calc_epsilon(frame_number))
else:
states, actions, rewards, new_states, terminal_flags = self.replay_buffer.get_minibatch(batch_size=self.batch_size, priority_scale=priority_scale)
# Main DQN estimates best action in new states
arg_q_max = self.DQN.predict(new_states).argmax(axis=1)
# Target DQN estimates q-vals for new states
future_q_vals = self.target_dqn.predict(new_states)
double_q = future_q_vals[range(batch_size), arg_q_max]
# Calculate targets (bellman equation)
target_q = rewards + (gamma*double_q * (1-terminal_flags))
# Use targets to calculate loss (and use loss to calculate gradients)
with tf.GradientTape() as tape:
q_values = self.DQN(states)
one_hot_actions = tf.keras.utils.to_categorical(actions, self.n_actions, dtype=np.float32) # using tf.one_hot causes strange errors
Q = tf.reduce_sum(tf.multiply(q_values, one_hot_actions), axis=1)
error = Q - target_q
loss = tf.keras.losses.Huber()(target_q, Q)
if self.use_per:
# Multiply the loss by importance, so that the gradient is also scaled.
# The importance scale reduces bias against situataions that are sampled
# more frequently.
loss = tf.reduce_mean(loss * importance)
model_gradients = tape.gradient(loss, self.DQN.trainable_variables)
self.DQN.optimizer.apply_gradients(zip(model_gradients, self.DQN.trainable_variables))
if self.use_per:
self.replay_buffer.set_priorities(indices, error)
return float(loss.numpy()), error
def save(self, folder_name, **kwargs):
"""Saves the Agent and all corresponding properties into a folder
Arguments:
folder_name: Folder in which to save the Agent
**kwargs: Agent.save will also save any keyword arguments passed. This is used for saving the frame_number
"""
# Create the folder for saving the agent
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
# Save DQN and target DQN
self.DQN.save(folder_name + '/dqn.h5')
self.target_dqn.save(folder_name + '/target_dqn.h5')
# Save replay buffer
self.replay_buffer.save(folder_name + '/replay-buffer')
# Save meta
with open(folder_name + '/meta.json', 'w+') as f:
f.write(json.dumps({**{'buff_count': self.replay_buffer.count, 'buff_curr': self.replay_buffer.current}, **kwargs})) # save replay_buffer information and any other information
def load(self, folder_name, load_replay_buffer=True):
"""Load a previously saved Agent from a folder
Arguments:
folder_name: Folder from which to load the Agent
Returns:
All other saved attributes, e.g., frame number
"""
if not os.path.isdir(folder_name):
raise ValueError(f'{folder_name} is not a valid directory')
# Load DQNs
self.DQN = tf.keras.models.load_model(folder_name + '/dqn.h5')
self.target_dqn = tf.keras.models.load_model(folder_name + '/target_dqn.h5')
self.optimizer = self.DQN.optimizer
# Load replay buffer
if load_replay_buffer:
self.replay_buffer.load(folder_name + '/replay-buffer')
# Load meta
with open(folder_name + '/meta.json', 'r') as f:
meta = json.load(f)
if load_replay_buffer:
self.replay_buffer.count = meta['buff_count']
self.replay_buffer.current = meta['buff_curr']
del meta['buff_count'], meta['buff_curr'] # we don't want to return this information
return meta
# Create environment
game_wrapper = GameWrapper(ENV_NAME, MAX_NOOP_STEPS)
print("The environment has the following {} actions: {}".format(game_wrapper.env.action_space.n, game_wrapper.env.unwrapped.get_action_meanings()))
# TensorBoard writer
writer = tf.summary.create_file_writer(TENSORBOARD_DIR)
# Build main and target networks
MAIN_DQN = build_q_network(game_wrapper.env.action_space.n, LEARNING_RATE, input_shape=INPUT_SHAPE)
TARGET_DQN = build_q_network(game_wrapper.env.action_space.n, input_shape=INPUT_SHAPE)
replay_buffer = ReplayBuffer(size=MEM_SIZE, input_shape=INPUT_SHAPE, use_per=USE_PER)
agent = Agent(MAIN_DQN, TARGET_DQN, replay_buffer, game_wrapper.env.action_space.n, input_shape=INPUT_SHAPE, batch_size=BATCH_SIZE, use_per=USE_PER)
# Training and evaluation
if LOAD_FROM is None:
frame_number = 0
rewards = []
loss_list = []
else:
print('Loading from', LOAD_FROM)
meta = agent.load(LOAD_FROM, LOAD_REPLAY_BUFFER)
# Apply information loaded from meta
frame_number = meta['frame_number']
rewards = meta['rewards']
loss_list = meta['loss_list']
# Main loop
try:
with writer.as_default():
while frame_number < TOTAL_FRAMES:
# Training
epoch_frame = 0
while epoch_frame < FRAMES_BETWEEN_EVAL:
start_time = time.time()
game_wrapper.reset()
life_lost = True
episode_reward_sum = 0
for _ in range(MAX_EPISODE_LENGTH):
# Get action
action = agent.get_action(frame_number, game_wrapper.state)
# Take step
processed_frame, reward, terminal, life_lost = game_wrapper.step(action)
frame_number += 1
epoch_frame += 1
episode_reward_sum += reward
# Add experience to replay memory
agent.add_experience(action=action,
frame=processed_frame[:, :, 0],
reward=reward, clip_reward=CLIP_REWARD,
terminal=life_lost)
# Update agent
if frame_number % UPDATE_FREQ == 0 and agent.replay_buffer.count > MIN_REPLAY_BUFFER_SIZE:
loss, _ = agent.learn(BATCH_SIZE, gamma=DISCOUNT_FACTOR, frame_number=frame_number, priority_scale=PRIORITY_SCALE)
loss_list.append(loss)
# Update target network
if frame_number % UPDATE_FREQ == 0 and frame_number > MIN_REPLAY_BUFFER_SIZE:
agent.update_target_network()
# Break the loop when the game is over
if terminal:
terminal = False
break
rewards.append(episode_reward_sum)
# Output the progress every 10 games
if len(rewards) % 10 == 0:
# Write to TensorBoard
if WRITE_TENSORBOARD:
tf.summary.scalar('Reward', np.mean(rewards[-10:]), frame_number)
tf.summary.scalar('Loss', np.mean(loss_list[-100:]), frame_number)
writer.flush()
print(f'Game number: {str(len(rewards)).zfill(6)} Frame number: {str(frame_number).zfill(8)} Average reward: {np.mean(rewards[-10:]):0.1f} Time taken: {(time.time() - start_time):.1f}s')
# Evaluation every `FRAMES_BETWEEN_EVAL` frames
terminal = True
eval_rewards = []
evaluate_frame_number = 0
for _ in range(EVAL_LENGTH):
if terminal:
game_wrapper.reset(evaluation=True)
life_lost = True
episode_reward_sum = 0
terminal = False
# Breakout requires a "fire" action (action #1) to start the
# game each time a life is lost.
# Otherwise, the agent would sit around doing nothing.
action = 1 if life_lost else agent.get_action(frame_number, game_wrapper.state, evaluation=True)
# Step action
_, reward, terminal, life_lost = game_wrapper.step(action)
evaluate_frame_number += 1
episode_reward_sum += reward
# On game-over
if terminal:
eval_rewards.append(episode_reward_sum)
if len(eval_rewards) > 0:
final_score = np.mean(eval_rewards)
else:
# In case the game is longer than the number of frames allowed
final_score = episode_reward_sum
# Print score and write to tensorboard
print('Evaluation score:', final_score)
if WRITE_TENSORBOARD:
tf.summary.scalar('Evaluation score', final_score, frame_number)
writer.flush()
# Save model
if len(rewards) > 300 and SAVE_PATH is not None:
agent.save(f'{SAVE_PATH}/save-{str(frame_number).zfill(8)}', frame_number=frame_number, rewards=rewards, loss_list=loss_list)
except KeyboardInterrupt:
print('\nTraining exited early.')
writer.close()
if SAVE_PATH is None:
try:
SAVE_PATH = input('Would you like to save the trained model? If so, type in a save path, otherwise, interrupt with ctrl+c. ')
except KeyboardInterrupt:
print('\nExiting...')
if SAVE_PATH is not None:
print('Saving...')
agent.save(f'{SAVE_PATH}/save-{str(frame_number).zfill(8)}', frame_number=frame_number, rewards=rewards, loss_list=loss_list)
print('Saved.')
# To watch the agent learn to play, head on over to evaluation.py and
# change `RESTORE_PATH` to the location your model is saved.
```
| github_jupyter |
<div style="text-align: right">NEU Skunkworks AI workshop at Northeastern with EM Lyon Business School</div>
## Predicting Ad Lift with a Neural Network
### What is lift?
When one serves ads one has a choice of various channels to place ads. For an individual that choice might be to place ads on facebook, Twitter, Instagram, etc. For large ad companies they place ads with large Digital Service Providers (DSPs) such as Google’s Walled Garden, Open Web, and others which allow for an ad to be placed across many thousands of digital properties.
The effectiveness of an ad is usually measured through a metric called "lift." As different digital properties have different measures of effectiveness such as views, clicks, likes, shares, etc. and these measures are combined into a single metric, _lift_, which can be used to estimate the increase in sales over no advertising.
_Suprevised prediction of lift_
We use historical lift data to predict the lift that occured in the past. When what we want to predict is known this is called _supervised learning_. Supervised learning is the machine learning task of learning a function that maps an input to an output based on example input-output pairs. It infers a function from labeled training data consisting of a set of training examples. The idea is to create a function that can predict known data well under that assumption it well continue tp predict accurately with new unkown data.
### Neural networks
[Artificial neural networks](https://en.wikipedia.org/wiki/Artificial_neural_network) (**ANNs**) or **[connectionist] systems** are computing systems inspired by the biological neural networks that constitute animal brains. Such systems learn (progressively improve
performance) to do tasks by considering examples, generally without task-specific programming. For example, in image recognition, they might learn to identify images that contain cats by analyzing example images that have been manually labeled as “cat” or “no cat” and using the
analytic results to identify cats in other images. They have found most use in applications difficult to express in a traditional computer algorithm using rule-based programming.
An ANN is based on a collection of connected units called artificial neurons, (analogous to axons in a biological brain). Each connection synapse) between neurons can transmit a signal to another neuron. The receiving (postsynaptic) neuron can process the signal(s) and then signal downstream neurons connected to it. Neurons may have state, generally represented by [real numbers], typically between 0 and 1.
Neurons and synapses may also have a weight that varies as learning proceeds, which can increase or decrease the strength of the signal that it sends downstream. Further, they may have a threshold such that only if the aggregate signal is below (or above) that level is the downstream signal sent.
Typically, neurons are organized in layers. Different layers may perform different kinds of transformations on their inputs. Signals travel from the first (input), to the last (output) layer, possibly after traversing the layers multiple times.
_Multiclass perceptron (MLP)_
We will be using a Multiclass perceptron (MLP) to predict lift.
Like most other techniques for training classifiers, the perceptron generalizes naturally to multiclass classification. Here, the input $x$ and the output $y$ are drawn from arbitrary sets. A feature representation function $f(x,y)$ maps each possible input/output pair to a finite-dimensional real-valued feature vector. As before, the feature vector is multiplied by a weight vector $w$, but now the resulting score is used to choose among many possible outputs:
$$\hat y = \operatorname{argmax}_y f(x,y) \cdot w.$$ ≈ Learning again iterates over the examples, predicting an output for each, leaving the weights unchanged when the predicted output matches the target, and changing them when it does not. The update becomes:
$$w_{t+1} = w_t + f(x, y) - f(x,\hat y).$$
This multiclass feedback formulation reduces to the original perceptron when $x$ is a real-valued vector, $y$ is chosen from $\{0,1\}$, and $f(x,y) = y x$.
For certain problems, input/output representations and features can be chosen so that $\mathrm{argmax}_y f(x,y) \cdot w$ can be found efficiently even though $y$ is chosen from a very large or even infinite set.
### Backpropagation Algorithm
**Backpropagation** is a method used in artificial neural networks to calculate a gradient that is needed in the calculation of the weights to be used in the network. It is commonly used to train deep neural networks, a term referring to neural networks with more than one hidden layer.
Backpropagation is a special case of an older and more general technique called automatic differentiation. In the context of learning, backpropagation is commonly used by the gradient descent optimization algorithm to adjust the weight of neurons by calculating the gradient
of the loss function. This technique is also sometimes called **backward propagation of errors**, because the error is calculated at the output and distributed back through the network layers.
These videos are great for learning more about backpropagation:
* [What is backpropagation really doing?](https://youtu.be/Ilg3gGewQ5U)
* [Gradient descent, how neural networks learn?](https://youtu.be/IHZwWFHWa-w)
* [Backpropagation calculus](https://youtu.be/tIeHLnjs5U8)
**Loss function**
Sometimes referred to as the **cost function** or **error function**, the loss function
is a function that maps values of one or more variables onto a real number intuitively representing some \"cost\" associated with those values. For backpropagation, the loss function calculates the difference between the network output and its expected output, after a case propagates through the network.
The cost function will the root-mean-square error (RMSE). The **root-mean-square deviation (RMSD)** or **root-mean-square error (RMSE)** (or sometimes **root-mean-square*d* error**) is a frequently used measure of the differences between values (sample or population
values) predicted by a model or an estimator and the values observed.
The RMSD represents the square root of the second sample moment of the differences between predicted values and observed values or the quadratic mean of these differences. These deviations are called *residuals* when the calculations are performed over the data sample
that was used for estimation and are called *errors* (or prediction errors) when computed out-of-sample. The RMSD serves to aggregate the magnitudes of the errors in predictions for various times into a single measure of predictive power. RMSD is a measure of accuracy, to compare forecasting errors of different models for a particular dataset and not
between datasets, as it is scale-dependent.
RMSD is always non-negative, and a value of 0 (almost never achieved in practice) would indicate a perfect fit to the data. In general, a lower RMSD is better than a higher one. However, comparisons across different types of data would be invalid because the measure is dependent on the scale of the numbers used.
_Formula_
The RMSD of an estimator $\hat{\theta}$ with respect to an estimated parameter $\theta$ is defined as the square root of the mean square error:
$$\operatorname{RMSD}(\hat{\theta}) = \sqrt{\operatorname{MSE}(\hat{\theta})} = \sqrt{\operatorname{E}((\hat{\theta}-\theta)^2)}.$$
For an unbiased estimator, the RMSD is the square root of the variance, known as the standard deviation. The RMSD of predicted values $\hat y_t$ for times *t* of a regression\'s dependent variable $y_t,$ with variables observed over *T* times, is computed for *T* different predictions as the square root of the mean of the squares of the deviations:
$$\operatorname{RMSD}=\sqrt{\frac{\sum_{t=1}^T (\hat y_t - y_t)^2}{T}}.$$
(For regressions on cross-sectional data, the subscript *t* is replaced by *i* and *T* is replaced by *n*.)
In some disciplines, the RMSD is used to compare differences between two things that may vary, neither of which is accepted as the \"standard\". For example, when measuring the average difference between two time series $x_{1,t}$ and $x_{2,t}$, the formula becomes
$$\operatorname{RMSD}= \sqrt{\frac{\sum_{t=1}^T (x_{1,t} - x_{2,t})^2}{T}}.$$
_Example loss function_
Let $y,y'$ be vectors in $\mathbb{R}^n$.
Select an error function $E(y,y')$ measuring the difference between two
outputs.
The standard choice is $E(y,y') = \tfrac{1}{2} \lVert y-y'\rVert^2$,
the square of the Euclidean distance between the vectors $y$ and $y'$.
The factor of $\tfrac{1}{2}$ conveniently cancels the exponent when the
error function is subsequently differentiated.
The error function over $n$ training examples can be written as an average $$E=\frac{1}{2n}\sum_x\lVert (y(x)-y'(x)) \rVert^2$$And the partial derivative with respect to the
outputs $$\frac{\partial E}{\partial y'} = y'-y$$
_Cross entropy_
In our case we are doing a regression on the value of lift so RSME makes sense. If we were measuring the differnce between two probability distributions we may choose a loss function like cross entropy or KL-divergence. In information theory, the cross entropy (https://en.wikipedia.org/wiki/Cross_entropy) between two probability distributions $p$ and $q$ over the same underlying set of events measures the average number of bits needed to identify an event drawn from the set, if a coding scheme is used that is optimized for an
“unnatural” probability distribution $q$, rather than the “true” distribution $p$.
The cross entropy for the distributions $p$ and $q$ over a given set is
defined as follows:
$$H(p, q) = \operatorname{E}_p[-\log q] = H(p) + D_{\mathrm{KL}}(p \| q),\!$$
where $H(p)$ is the entropy of $p$, and $D_{\mathrm{KL}}(p \| q)$ is
the [Kullback–Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) of $q$ from $p$ (also known as the *relative entropy* of *p* with respect to *q* — note the reversal of
emphasis).
For discrete $p$ and $q$ this means
$$H(p, q) = -\sum_x p(x)\, \log q(x). \!$$
The situation for continuous distributions is analogous. We have to
assume that $p$ and $q$ are absolutely continuous with respect to some
reference measure $r$ (usually $r$ is a Lebesgue measure on a
Borel σ-algebra). Let $P$ and $Q$ be probability density functions
of $p$ and $q$ with respect to $r$. Then
$$-\int_X P(x)\, \log Q(x)\, dr(x) = \operatorname{E}_p[-\log Q]. \!$$
NB: The notation $H(p,q)$ is also used for a different concept, the joint entropy of $p$ and $q$.
### Assumptions
Two assumptions must be made about the form of the error function. The first is that it can be written as an average $E=\frac{1}{n}\sum_xE_x$ over error functions $E_x$, for $n$ individual
training examples, $x$. The reason for this assumption is that the backpropagation algorithm calculates the gradient of the error function for a single training example, which needs to be generalized to the overall error function. The second assumption is that it can be written
as a function of the outputs from the neural network.
**Algorithm**
Let $N$ be a neural network with $e$ connections, $m$ inputs, and $n$
outputs.
Below, $x_1,x_2,\dots$ will denote vectors in $\mathbb{R}^m$, $y_1,y_2,\dots$ vectors in $\mathbb{R}^n$, and $w_0, w_1, w_2, \ldots$ vectors in $\mathbb{R}^e$. These are called *inputs*, *outputs* and *weights* respectively.
The neural network corresponds to a function $y = f_N(w, x)$ which, given a weight $w$, maps an input $x$ to an output $y$.
The optimization takes as input a sequence of *training examples* $(x_1,y_1), \dots, (x_p, y_p)$ and produces a sequence of weights $w_0, w_1, \dots, w_p$ starting from some initial weight $w_0$, usually chosen at random.
These weights are computed in turn: first compute $w_i$ using only $(x_i, y_i, w_{i-1})$ for $i = 1, \dots, p$. The output of the algorithm is then $w_p$, giving us a new function $x \mapsto f_N(w_p, x)$. The computation is the same in each step, hence only the case $i = 1$ is
described.
Calculating $w_1$ from $(x_1, y_1, w_0)$ is done by considering a variable weight $w$ and applying gradient descent to the function $w\mapsto E(f_N(w, x_1), y_1)$ to find a local minimum, starting at $w = w_0$.
This makes $w_1$ the minimizing weight found by gradient descent.
**Algorithm in code**
To implement the algorithm above, explicit formulas are required for the gradient of the function $w \mapsto E(f_N(w, x), y)$ where the function is $E(y,y')= |y-y'|^2$.
The learning algorithm can be divided into two phases: propagation and weight update.
### Phase 1: propagation
Each propagation involves the following steps:
1. Propagation forward through the network to generate the output
value(s)
2. Calculation of the cost (error term)
3. Propagation of the output activations back through the network using
the training pattern target in order to generate the deltas (the
difference between the targeted and actual output values) of all
output and hidden neurons.
### Phase 2: weight update
For each weight, the following steps must be followed:
1. The weight\'s output delta and input activation are multiplied to
find the gradient of the weight.
2. A ratio (percentage) of the weight\'s gradient is subtracted from
the weight.
This ratio (percentage) influences the speed and quality of learning; it is called the *learning rate*. The greater the ratio, the faster the neuron trains, but the lower the ratio, the more accurate the training is. The sign of the gradient of a weight indicates whether the error varies directly with, or inversely to, the weight. Therefore, the weight
must be updated in the opposite direction, \"descending\" the gradient.
Learning is repeated (on new batches) until the network performs adequately.
### Pseudocode
The following is pseudocode for a stochastic gradient descent
algorithm for training a three-layer network (only one hidden layer):
```python
initialize network weights (often small random values)\
**do**\
**forEach** training example named ex\
prediction = _neural-net-output_(network, ex) *// forward pass*\
actual = _teacher-output_(ex)\
compute error (prediction - actual) at the output units\
*// backward pass*\
*// backward pass continued*\
update network weights *// input layer not modified by error estimate*\
**until** all examples classified correctly or another stopping criterion satisfied\
**return** the network
```
The lines labeled \"backward pass\" can be implemented using the backpropagation algorithm, which calculates the gradient of the error of the network regarding the network\'s modifiable weights.
To learn more about MLPS and neural networks:
```
# import necessary libraries
# Use pip install or conda install if missing a library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import warnings
random.seed(5)
warnings.filterwarnings('ignore')
%matplotlib inline
```
## Load Lift Data
Let's load the lift data plot it and look at desriptive statistics.
```
df = pd.read_csv("data/Ad_Lift_Channels.csv",na_values=['null'],index_col='timestamp',parse_dates=True,infer_datetime_format=True)
# Visually check the data to make sure it loaded properly
df.head(5)
# Make a copy of the data called df_ma to add some moving average features
# It is a good idea to play with copies of the data until you are sure of an analysis
df_ma = df
df_ma.head(5)
df_ma.tail(5)
# Let's plot the data at indices 0,2,4,6 lift a through D
df_ma.iloc[:,[0,2,4,6]].plot()
df_ma.loc[:,['lift_A']].plot()
```
### What supervised learning algorithms to choose?
This simulated lift data looks like stock data, trending up and down based on many complex external factors that end up being reflected in a single price. If the data looked closer to a we might use some form of linear regression to predict it. Linear regression predicts the response variable $y$ assuming it has a linear relationship with predictor variable(s) $x$ or $x_1, x_2, ,,, x_n$.
$$y = \beta_0 + \beta_1 x + \varepsilon .$$
**Note: Linear regression IS NOT our first choice as the data does not look linear.**
We could also use traditional time series models such as an Autoregressive integrated moving average (ARIMA). An [autoregressive integrated moving average (ARIMA or ARMA)](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average) model is a generalization of an autoregressive moving average (ARMA) model. These models are fitted to time series data either to better understand the data or to predict future points in the series (forecasting). They are applied in some cases where data show evidence of non-stationarity, where an initial differencing step (corresponding to the "integrated" part of the model) can be applied to reduce the non-stationarity.
Non-seasonal ARIMA models are generally denoted $ARIMA(p, d, q)$ where parameters $p, d, \quad and \quad q$ are non-negative integers, $p$ is the order of the Autoregressive model, $d$ is the degree of differencing, and $q$ is the order of the Moving-average model. The he number of differences $d$ is determined using repeated statistical tests. The values of $p$ and $q$ are then chosen by minimizing the AIC after differencing the data $d$ times.
* AR: Autoregression. A dependent relationship between an observation and some number of lagged observations.
* I: Integrated. The use of differencing of raw observations in order to make the time series stationary.
* MA: Moving Average. A model that uses the dependency between an observation and a residual error from a moving average model applied to lagged observations.
A non-seasonal ARIMA model can be (almost) completely summarized by three numbers:
* $p$ = the number of autoregressive terms. he number of lag observations included in the model.
* $d$ = the number of nonseasonal differences. The number of times that the raw observations are differenced, also called the degree of differencing.
* $q$ = the number of moving-average terms. The size of the moving average window, also called the order of moving average.
When a value of 0 is used as a parameter, it means that parameter is not used the model.
The ARIMA model uses an iterative three-stage modeling approach:
Model identification and model selection: making sure that the variables are stationary, identifying seasonality in the dependent series (seasonally differencing it if necessary), and using plots of the autocorrelation and partial autocorrelation functions of the dependent time series to decide which (if any) autoregressive or moving average component should be used in the model.
Parameter estimation using computation algorithms to arrive at coefficients that best fit the selected ARIMA model. The most common methods use maximum likelihood estimation or non-linear least-squares estimation.
Model checking by testing whether the estimated model conforms to the specifications of a stationary univariate process. In particular, the residuals should be independent of each other and constant in mean and variance over time. (Plotting the mean and variance of residuals over time and performing a Ljung-Box test or plotting autocorrelation and partial autocorrelation of the residuals are helpful to identify misspecification.) If the estimation is inadequate, we have to return to step one and attempt to build a better model.
**Note: ARIMA IS NOT our first choice, in spite of the data being time-series data, as eventually we would like to incorporate features other than previous mmeasures of lift that are specific to an adverister, their campaign, their target demographic. The mathematics of ARIMA make customization and personlazation to advertisers, campaigns and consumers**
A neural network can fit data like this simulated lift data, unlike linear regression, and unlike the ARIMA, it can take any number of input varibles called independent variables. If we start with a simple model and get a pretty good fit we can iteratiively improve the model by finding and feeding it more and more data relevant to making a good prediction of lift.
### Create lift indicators (Feature Engineering)
Note that we only has lift and volume and we are trying to predict lift. The is called feature engineering. We will start by creating the following features:
* The momentum difference between the 2-day moving volume average and 5-day volume moving average
* The momentum difference between the 2-day moving lift average and 5-day moving lift average
* 3-day moving lift average
* 7-day moving lift average
* 15-day moving lift average
* Average lift standard deviation within 15
_Naive & Simple Averaging_
The idea is to use past neasures of lift to predict future lift. Naive or "simple" averaging is just the mean (or median) of past data. Of course, the "simple" average or mean of all past observations is only a useful estimate for forecasting when there are no trends. Note that we can detrend these data but that is beyond the scope of this workshop.
For example, if we have prices are $p_M, p_{M-1},\dots,p_{M-(n-1)}$ then the formula is
$$
\textit{SMA} = { p_M + p_{M-1} + \cdots + p_{M-(n-1)} \over n }
$$
_Moving Averages_
A moving average (rolling average or running average) is an average that is updated for a window or history of n events. This is sometimes called the arithmetic moving average the most recent n data values. For an an equally weighted average of the sequence of n values $x_1. \ldots, x_n$ up to the current time:
$$
\textit{MA}_n = {{x_1 + \cdots + x_n} \over n}\,.
$$
There are other kinds of averaging such as weighted moving averages, exponential moving average (also called Single Exponential Smoothing). and many others that won't be discussed.
**What about dates and time?**
We also have a timestamp of the form _6/1/2019 4:00:00 AM_. We could easily add features like the month, the season, the year, the hour, morning or night, etc.. In fact, this will be a suggested excercise for the lab. This is simulated lift data but in real lift data, the prefered use of various digital properties on the weekend, versus weekday, and day or night shows strong association with various demogrpahic groups.
_One hot encoding_
While it is easy to extract the month as the numbers 1 though 12, when we keep them in that form it tells a mathematical model that December (12) is somehow 12 times that of January (1). This doesn't really make sense. But we can always use a boolean yes/no December. This is called one hot encoding. In machine learning, one-hot is a group of bits among which the legal combinations of values are only those with a single high (1) bit and all the others low (0).
```bash
Binary Gray code One-hot
-------- ----------- ----------
000 000 00000001
001 001 00000010
010 011 00000100
011 010 00001000
100 110 00010000
101 111 00100000
110 101 01000000
111 100 10000000
```
It is a suggsted excercise that you feature extract some time information from the timestamp field and check how it effects the model. Do a google search something like "How To Convert Timestamp To Date and Time in Python" to learn how extract something like "Summer" from a string like _6/1/2019 4:00:00 AM_.
```
# Extract some Naive & Simple Averaging features
df_ma['volume_A_Momentum'] = df_ma['volume_A'].shift(1).rolling(window = 2).mean() - df_ma['volume_A'].shift(1).rolling(window = 5).mean()
df_ma['lift_A_Momentum'] = df_ma['lift_A'].shift(1).rolling(window = 2).mean() - df_ma['lift_A'].shift(1).rolling(window = 5).mean()
df_ma['3step_MA_lift_A'] = df_ma['lift_A'].shift(1).rolling(window = 3).mean()
df_ma['7step_MA_lift_A'] = df_ma['lift_A'].shift(1).rolling(window = 7).mean()
df_ma['15step_MA_lift_A'] = df_ma['lift_A'].shift(1).rolling(window = 15).mean()
df_ma['Std_Dev_lift_A']= df_ma['lift_A'].rolling(15).std()
df_ma['volume_B_Momentum'] = df_ma['volume_B'].shift(1).rolling(window = 2).mean() - df_ma['volume_B'].shift(1).rolling(window = 5).mean()
df_ma['lift_B_Momentum'] = df_ma['lift_B'].shift(1).rolling(window = 2).mean() - df_ma['lift_B'].shift(1).rolling(window = 5).mean()
df_ma['3step_MA_lift_B'] = df_ma['lift_B'].shift(1).rolling(window = 3).mean()
df_ma['7step_MA_lift_B'] = df_ma['lift_B'].shift(1).rolling(window = 7).mean()
df_ma['15step_MA_lift_B'] = df_ma['lift_B'].shift(1).rolling(window = 15).mean()
df_ma['Std_Dev_lift_B']= df_ma['lift_B'].rolling(15).std()
df_ma['volume_C_Momentum'] = df_ma['volume_C'].shift(1).rolling(window = 2).mean() - df_ma['volume_C'].shift(1).rolling(window = 5).mean()
df_ma['lift_C_Momentum'] = df_ma['lift_C'].shift(1).rolling(window = 2).mean() - df_ma['lift_C'].shift(1).rolling(window = 5).mean()
df_ma['3step_MA_lift_C'] = df_ma['lift_C'].shift(1).rolling(window = 3).mean()
df_ma['7step_MA_lift_C'] = df_ma['lift_C'].shift(1).rolling(window = 7).mean()
df_ma['15step_MA_lift_C'] = df_ma['lift_C'].shift(1).rolling(window = 15).mean()
df_ma['Std_Dev_lift_C']= df_ma['lift_C'].rolling(15).std()
df_ma['volume_D_Momentum'] = df_ma['volume_D'].shift(1).rolling(window = 2).mean() - df_ma['volume_D'].shift(1).rolling(window = 5).mean()
df_ma['lift_D_Momentum'] = df_ma['lift_D'].shift(1).rolling(window = 2).mean() - df_ma['lift_D'].shift(1).rolling(window = 5).mean()
df_ma['3step_MA_lift_D'] = df_ma['lift_D'].shift(1).rolling(window = 3).mean()
df_ma['7step_MA_lift_D'] = df_ma['lift_D'].shift(1).rolling(window = 7).mean()
df_ma['15step_MA_lift_D'] = df_ma['lift_D'].shift(1).rolling(window = 15).mean()
df_ma['Std_Dev_lift_D']= df_ma['lift_D'].rolling(15).std()
# Check the size
df_ma.shape
# Check the transformed data
df_ma.head()
```
### The moving average created NaNs
Note that we can't create a 15-step moving average until we have 15 time steps. The simplest way to deal with this is to drop the first 15 rows of transformed data.
```
df_ma.tail()
```
## Drop all NaN
```
# Drop early rows
df_ma = df_ma.dropna()
df_ma.shape
df_ma.head()
# Let's save the transformed data for the other analysis
df_ma.to_csv("data/Ad_Lift_Channels_MA.csv")
```
## Deep Learning Model
We will be using a Multiclass perceptron (MLP) to predict lift. An MLP is a very simple neural network. There are a few things we must decide before running the model. These are called hyperparameters. In machine learning, a hyperparameter is a parameter whose value is set before the learning process begins. By contrast, the values of other parameters are derived via training.
### Hyperparameters
The input and output are defined for us.
X is the inputs. The data indicators we calculated before. There will be one neuron for each feature/independent variable. One great strength of these models is the we can have many, many neurons in our input layer, each representing a feature.
y is output the lift. We are predicting a single value which is typical of regression. If we wanted to predict several classes then we might have a neuron in the output layer for each class that we wanted to represent.
_Network Architecture_
For an MLP, our choice relates to the "hidden layers." We will make the arbirary choice of 128x64x64 nodes for our hidden layers. This choice is a starting point based on that the data doesn't seem that complex. To prevent overfitting and for speed one may reduce to network to as small as possible to create a great fit, but the first step is to determine whether a great fit is possible.
_Cost function_
For our cost function RSME is a common one used for regression. Mean residual deviance, MSE, MAE, RMSLE and Coefficient of Determination (R Squared) are common choices for regression. To understand the difference please read [Metrics to Understand Regression Models in Plain English](https://towardsdatascience.com/metrics-to-understand-regression-models-in-plain-english-part-1-c902b2f4156f).
_Activation function_
RELU is a very common activation function used in teaching neural networks so we will use it. However leaky RELU and others are probably better (for reason beyond the focus of this workshop). You are encouraged in the lab to try other activation functions and look at their effect on the model.
## Activation functions
In computational networks, the [activation function](https://en.wikipedia.org/wiki/Activation_function) of a node defines the output of that node given an input or set of inputs. A standard computer chip circuit can be seen as a digital network of activation functions that can be “ON” (1) or “OFF” (0), depending on input. This is similar to the behavior of the linear perceptron in neural networks. However, only *nonlinear* activation functions allow such networks to compute nontrivial problems using only a small number of nodes. In artificial neural networks this function is also called the **transfer function**.
_Functions_
In biologically inspired neural networks, the activation function is usually an abstraction representing the rate of action potential firing in the cell. In its simplest form, this function is binary—that is, either the neuron is firing or not. The function looks like
$\phi(v_i)=U(v_i)$, where $U$ is the Heaviside step function. In this case many neurons must be used in computation beyond linear separation of categories.
A line of positive slope may be used to reflect the increase in firing rate that occurs as input current increases. Such a function would be of the form $\phi(v_i)=\mu v_i$, where $\mu$ is the slope. This activation function is linear, and therefore has the same problems as the binary function. In addition, networks constructed using this model have unstable convergence because neuron inputs along favored paths tend to increase without bound, as this function is not normalizable.
All problems mentioned above can be handled by using a normalizable sigmoid activation function. One realistic model stays at zero until input current is received, at which point the firing frequency increases quickly at first, but gradually approaches an asymptote at 100% firing rate. Mathematically, this looks like $\phi(v_i)=U(v_i)\tanh(v_i)$, where the hyperbolic tangent function can be replaced by any sigmoid function. This behavior is realistically reflected in the neuron, as neurons cannot physically fire faster than a certain rate. This model runs into problems, however, in computational networks as it is not differentiable, a requirement to calculate backpropagation.
The final model, then, that is used in multilayer perceptrons is a sigmoidal activation function in the form of a hyperbolic tangent. Two forms of this function are commonly used: $\phi(v_i)=\tanh(v_i)$ whose range is normalized from -1 to 1, and $\phi(v_i) = (1+\exp(-v_i))^{-1}$ is vertically translated to normalize from 0 to 1. The latter model is often considered more biologically realistic, but it runs into theoretical and experimental difficulties with certain types.
## Comparison of activation functions
Some desirable properties in an activation function include:
- Nonlinear – When the activation function is non-linear, then a
two-layer neural network can be proven to be a universal function
approximator. The identity activation function does not satisfy
this property. When multiple layers use the identity activation
function, the entire network is equivalent to a single-layer model.
- Continuously differentiable – This property is necessary for
enabling gradient-based optimization methods. The binary step
activation function is not differentiable at 0, and it
differentiates to 0 for all other values, so gradient-based methods
can make no progress with it.
- Range – When the range of the activation function is finite,
gradient-based training methods tend to be more stable, because
pattern presentations significantly affect only limited weights.
When the range is infinite, training is generally more efficient
because pattern presentations significantly affect most of the
weights. In the latter case, smaller learning rates are typically
necessary.
- Monotonic – When the activation function is monotonic, the error
surface associated with a single-layer model is guaranteed to be
convex.
- Smooth Functions with a Monotonic derivative – These have been shown
to generalize better in some cases. The argument for these
properties suggests that such activation functions are more
consistent with Occam's razor.
- Approximates identity near the origin – When activation functions
have this property, the neural network will learn efficiently when
its weights are initialized with small random values. When the
activation function does not approximate identity near the origin,
special care must be used when initializing the weights.
_Rectified linear unit (ReLU) transfer function_
Rectified linear unit (ReLU)
Activation identity
$f(x)=x$
$f'(x)=1$
$(-\infty,\infty)$
$C^\infty$

Logistic (a.k.a. Soft step)
$f(x)=\frac{1}{1+e^{-x}}$
$f'(x)=f(x)(1-f(x))$
$(0,1)$
$C^\infty$

TanH
$f(x)=\tanh(x)=\frac{2}{1+e^{-2x}}-1$
$f'(x)=1-f(x)^2$
$(-1,1)$
$C^\infty$

Rectified linear unit (ReLU)
$f(x) = \begin{cases}
0 & \text{for } x < 0\\
x & \text{for } x \ge 0\end{cases}$
$f'(x) = \begin{cases}
0 & \text{for } x < 0\\
1 & \text{for } x \ge 0\end{cases}$
$[0,\infty)$
$C^0$

```
df_ma.columns
# Specifiy our features and target
X=['volume_A','volume_A_Momentum','lift_A_Momentum','3step_MA_lift_A','7step_MA_lift_A','15step_MA_lift_A','Std_Dev_lift_A']
y ='lift_A'
print(X)
print(y)
# Subset out data for only the desired features
X = df_ma.loc[:,X]
y = df_ma.iloc[:,0]
X.head()
y.head()
```
### Split dataset we used
Part of dataset wiil be used in training(80%). Others will be used in testing(20%).
The model uses the training data set to obtain weights, and the test data set to see how well the model performs on new data
Note that this split IS NOT random. We are using older data to train and more recent data to test. This kind of test-training split is common when we are forecasting as we are using the past to predict the future.
```
split = int(len(df_ma)*0.8)
X_train, X_test, y_train, y_test = X[:split], X[split:], y[:split], y[split:]
```
### Feature scaling
Another important step in preprocessing the data is to normalize the data set. This step will average all input features to 0 and convert their variance to 1. This can ensure that the model will not be biased due to different input features when training the model. If this step is not handled properly, the model may be confused and give higher weight to those input features with higher averages. This is particularly important in neural networks as on neuron maps to one feature in the input layer and the should learn their importance from training and not to to implicit bias due to different feature scale.
```
# Normalize our training and test data
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
```
### Build the Simple MLP
You will need tensorflow library in your environment first. Created by the Google Brain team, TensorFlow is an open source library for numerical computation and large-scale machine learning. TensorFlow bundles together a slew of machine learning and deep learning (aka neural networking) models and algorithms. It is very commonly used to build neural network models.
_Really Awesome TensorFlow Tutorials_
TensorFlow 101 (Really Awesome Intro Into TensorFlow) [https://youtu.be/arl8O22aa6I](https://youtu.be/arl8O22aa6I)
Getting Started with TensorFlow and Deep Learning | SciPy 2018 Tutorial ... [https://youtu.be/tYYVSEHq-io](https://youtu.be/tYYVSEHq-io)
Hands-on TensorBoard (TensorFlow Dev Summit 2017) [https://youtu.be/eBbEDRsCmv4](https://youtu.be/eBbEDRsCmv4)
```
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
DLmodel = Sequential()
```
_Input Layer_
* Units: It defines the number of nodes or neurons in a layer. We set the value here to 128, which means that there will be 128 neurons in our hidden layer.
* Kernel_initializer: It defines the starting values of different neuron weights in the hidden layer. We define it here as "uniform", which means that the weight is initialized with evenly distributed values.
* Activation: It is the activation function of neurons in the hidden layer. Here we define the activation function as a modified linear unit function called RELU.
* Input_dim: It defines the number of inputs to the hidden layer. We define the value of the number of inputs as equal to the number of columns in the input feature data frame. However, this argument is no longer needed in subsequent layers because the model knows how much output was generated in the previous layer.
```
DLmodel.add(
Dense(
units = 128,
kernel_initializer = 'uniform',
activation = 'relu',
input_dim = X.shape[1]))
```
_Hidden Layer_
```
DLmodel.add(
Dense(
units = 64,
kernel_initializer = 'uniform',
activation = 'relu'))
DLmodel.add(
Dense(
units = 64,
kernel_initializer = 'uniform',
activation = 'relu'))
```
_Output Layer_
```
DLmodel.add(
Dense(
units = 1,
kernel_initializer = 'uniform',
activation = 'relu'))
```
Add an output layer
```
DLmodel.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = ['accuracy'])
```
Finally, the following arguments are passed to compile the DLmodel:
* Optimizer: Choose optimizer as "Adam", which is an extended form of the stochastic gradient descent algorithm.
* Loss: It will define the loss that needs to be optimized during the training process. We define loss as the mean square error.
* Metrics: It defines a matrix that the model evaluates during training and testing. We choose an evaluation matrix whose accuracy is the model.
### Train the network
The model is trained for a number of runs called epochs. We usually visualize the training runs using a tool called TensorBoard and well as tell it stop when to doesn't improve, called early stopping. We will show early stopping in the third notebook. It is recommeded that you look at the TensorBoard tutorial below to understand how to visualize the networks learning.
Hands-on TensorBoard (TensorFlow Dev Summit 2017) [https://youtu.be/eBbEDRsCmv4](https://youtu.be/eBbEDRsCmv4)
```
DLmodel.fit(X_train, y_train, batch_size = 11, epochs = 55)
```
### Make Prediction
```
# Generate some predictions from our test data
y_pred = DLmodel.predict(X_test)
(y_pred,y_test.tolist())
# Calculate the accuracy
from sklearn import metrics
accuracy=np.sqrt(metrics.mean_squared_error(y_test.tolist(), y_pred))
print(accuracy)
```
## Create a Null Model
Another way to predict the value of lift is just the take the average value of lift and use this as a prediction. This gives one a base model for comparison.
If one can get as good or better prediction from taking the mean value than with an MLP than the MLP isn't very useful.
```
y_train_mean=np.mean(y_train.tolist())
print(y_train_mean)
y_null = np.zeros_like(y_test.tolist(), dtype=float)
y_null.fill(y_train_mean)
y_null[0:5]
null_accuracy=np.sqrt(metrics.mean_squared_error(y_test.tolist(),y_null))
print(null_accuracy)
```
## Visualize the differnce
The null model gives an RSME of around 0.092 and our MLP gives an RSME of around 0.017 so it seems much better than the null model.
```
import matplotlib.pyplot as plt
plt.figure(figsize=(10,5))
plt.plot(y_pred, color='r', label='Predicted lift Channel A')
plt.plot(y_test.tolist(), color='g', label='Actual lift Channel A')
plt.plot(y_null, color='k', label='Null model lift Channel A')
plt.legend()
plt.show()
df_ma.iloc[:,[1]].plot()
```
## Weird spikes?
There are some weird spikes in our MLP prediction. Can we get rid of those and further improve our model. The volume feature also has some large spikes. Is it possible that including extra features is actually hurting our model.
In machine learning, models that we will discuss in the second notebook will automatically downweight the importance of features and weight poorly predictive features to close to zero.
MLPs don't do this so let's remove the volume features as predictors and see if that helps?
```
# Rerun model without volume features
X=['lift_A_Momentum','3step_MA_lift_A','7step_MA_lift_A','15step_MA_lift_A','Std_Dev_lift_A']
X = df_ma.loc[:,X]
X.head()
X_train, X_test = X[:split], X[split:]
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
DLmodel2 = Sequential()
DLmodel2.add(
Dense(
units = 128,
kernel_initializer = 'uniform',
activation = 'relu',
input_dim = X.shape[1]))
DLmodel2.add(
Dense(
units = 64,
kernel_initializer = 'uniform',
activation = 'relu'))
DLmodel2.add(
Dense(
units = 64,
kernel_initializer = 'uniform',
activation = 'relu'))
DLmodel2.add(
Dense(
units = 1,
kernel_initializer = 'uniform',
activation = 'relu'))
DLmodel2.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = ['accuracy'])
DLmodel2.fit(X_train, y_train, batch_size = 11, epochs = 55)
# Generate some predictions from our test data
y_pred = DLmodel2.predict(X_test)
(y_pred,y_test.tolist())
accuracy=np.sqrt(metrics.mean_squared_error(y_test.tolist(), y_pred))
print(accuracy)
y_pred = DLmodel2.predict(X_test)
plt.figure(figsize=(10,5))
plt.plot(y_pred, color='r', label='Predicted lift Channel A')
plt.plot(y_test.tolist(), color='g', label='Actual lift Channel A')
plt.plot(y_null, color='k', label='Null model lift Channel A')
plt.legend()
plt.show()
```
## Removing a feature improved the model
Removing a feature improved the model from a RSME of 0.017 to 0.010 and we can see the weird spikes are gone.
## Todo
The above code should provide you with a starting framework for incorporating more complex features into a model. Here are a few things you can try out:
- Try the analysis on different channels.
- Experiment with different network architectures, cost functions, activation functions.
- Use more realistic features such as buying seasons, weekend versus weekday
<div style="text-align: right">NEU Skunkworks AI workshop at Northeastern with EM Lyon Business School</div>
<div style="text-align: right">Contributors</div>
<div style="text-align: right">Srijoni Biswas, Zixiao Wang, Abhishek Dabas, Kailash Dhondoo Nadkar,Abhi Patodi
</div>
<div style="text-align: right"> 3 December 2019</div>
Copyright 2019 NEU AI Skunkworks
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| github_jupyter |
### Packages
```
import numpy as np
import pandas as pd
import scipy
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from statsmodels.tsa.arima_model import ARIMA
from arch import arch_model
import seaborn as sns
import yfinance
import warnings
warnings.filterwarnings("ignore")
sns.set()
```
### Loading the data
```
raw_data = yfinance.download (tickers = "^GSPC ^FTSE ^N225 ^GDAXI", start = "1994-01-07", end = "2018-01-29",
interval = "1d", group_by = 'ticker', auto_adjust = True, treads = True)
df_comp = raw_data.copy()
df_comp['spx'] = df_comp['^GSPC'].Close[:]
df_comp['dax'] = df_comp['^GDAXI'].Close[:]
df_comp['ftse'] = df_comp['^FTSE'].Close[:]
df_comp['nikkei'] = df_comp['^N225'].Close[:]
df_comp = df_comp.iloc[1:]
del df_comp['^N225']
del df_comp['^GSPC']
del df_comp['^GDAXI']
del df_comp['^FTSE']
df_comp=df_comp.asfreq('b')
df_comp=df_comp.fillna(method='ffill')
```
### Creating Returns
```
df_comp['ret_spx'] = df_comp.spx.pct_change(1)*100
df_comp['ret_ftse'] = df_comp.ftse.pct_change(1)*100
df_comp['ret_dax'] = df_comp.dax.pct_change(1)*100
df_comp['ret_nikkei'] = df_comp.nikkei.pct_change(1)*100
```
### Splitting the Data
```
size = int(len(df_comp)*0.8)
df, df_test = df_comp.iloc[:size], df_comp.iloc[size:]
```
### Fitting a Model
```
from pmdarima.arima import auto_arima
model_auto = auto_arima(df.ret_ftse[1:])
model_auto
model_auto.summary()
```
### Important Arguments
```
model_auto = auto_arima(df_comp.ret_ftse[1:], exogenous = df_comp[['ret_spx', 'ret_dax', 'ret_nikkei']][1:], m = 5,
max_order = None, max_p = 7, max_q = 7, max_d = 2, max_P = 4, max_Q = 4, max_D = 2,
maxiter = 50, alpha = 0.05, n_jobs = -1, trend = 'ct', information_criterion = 'oob',
out_of_sample = int(len(df_comp)*0.2))
# !!! Important Note: In pdmarima v1.5.2, out_of_sample_size is replaced with out_of_sample, so make sure to use the latter!
# exogenous -> outside factors (e.g other time series)
# m -> seasonal cycle length
# max_order -> maximum amount of variables to be used in the regression (p + q)
# max_p -> maximum AR components
# max_q -> maximum MA components
# max_d -> maximum Integrations
# maxiter -> maximum iterations we're giving the model to converge the coefficients (becomes harder as the order increases)
# alpha -> level of significance, default is 5%, which we should be using most of the time
# n_jobs -> how many models to fit at a time (-1 indicates "as many as possible")
# trend -> "ct" usually
# information_criterion -> 'aic', 'aicc', 'bic', 'hqic', 'oob' (out of bag)
# (Akaike Information Criterion, Corrected Akaike Information Criterion,
# Bayesian Information Criterion, Hannan-Quinn Information Criterion, or
# "out of bag"--for validation scoring--respectively)
# out_of_smaple -> validates the model selection (pass the entire dataset, and set 20% to be the out_of_sample_size)
model_auto.summary()
```
| github_jupyter |
# PCA for Algorithmic Trading: Eigen Portfolios
## Imports & Settings
```
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
sns.set_style('white')
np.random.seed(42)
```
## Eigenportfolios
Another application of PCA involves the covariance matrix of the normalized returns. The principal components of the correlation matrix capture most of the covariation among assets in descending order and are mutually uncorrelated. Moreover, we can use standardized the principal components as portfolio weights.
Let’s use the 30 largest stocks with data for the 2010-2018 period to facilitate the exposition:
### Data Preparation
```
idx = pd.IndexSlice
with pd.HDFStore('../../data/assets.h5') as store:
stocks = store['us_equities/stocks'].marketcap.nlargest(30)
returns = (store['quandl/wiki/prices']
.loc[idx['2010': '2018', stocks.index], 'adj_close']
.unstack('ticker')
.pct_change())
```
We again winsorize and also normalize the returns:
```
normed_returns = scale(returns
.clip(lower=returns.quantile(q=.025),
upper=returns.quantile(q=.975),
axis=1)
.apply(lambda x: x.sub(x.mean()).div(x.std())))
returns = returns.dropna(thresh=int(returns.shape[0] * .95), axis=1)
returns = returns.dropna(thresh=int(returns.shape[1] * .95))
returns.info()
cov = returns.cov()
sns.clustermap(cov);
```
### Run PCA
After dropping assets and trading days as in the previous example, we are left with 23 assets and over 2,000 trading days. We estimate all principal components and find that the two largest explain 57.6% and 12.4% of the covariation, respectively:
```
pca = PCA()
pca.fit(cov)
pd.Series(pca.explained_variance_ratio_).to_frame('Explained Variance').head().style.format('{:,.2%}'.format)
```
### Create PF weights from principal components
Next, we select and normalize the four largest components so that they sum to 1 and we can use them as weights for portfolios that we can compare to an equal-weighted portfolio formed from all stocks::
```
top4 = pd.DataFrame(pca.components_[:4], columns=cov.columns)
eigen_portfolios = top4.div(top4.sum(1), axis=0)
eigen_portfolios.index = [f'Portfolio {i}' for i in range(1, 5)]
```
### Eigenportfolio Weights
The weights show distinct emphasis, e.g., portfolio 3 puts large weights on Mastercard and Visa, the two payment processors in the sampel whereas potfolio 2 has more exposure to some technology companies:
```
axes = eigen_portfolios.T.plot.bar(subplots=True,
layout=(2, 2),
figsize=(14, 8),
legend=False)
for ax in axes.flatten():
ax.set_ylabel('Portfolio Weight')
ax.set_xlabel('')
sns.despine()
plt.tight_layout()
```
### Eigenportfolio Performance
When comparing the performance of each portfolio over the sample period to ‘the market’ consisting of our small sample, we find that portfolio 1 performs very similarly, whereas the other portfolios capture different return patterns.
```
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 6), sharex=True)
axes = axes.flatten()
returns.mean(1).add(1).cumprod().sub(1).plot(title='The Market', ax=axes[0])
for i in range(3):
rc = returns.mul(eigen_portfolios.iloc[i]).sum(1).add(1).cumprod().sub(1)
rc.plot(title=f'Portfolio {i+1}', ax=axes[i+1], lw=1, rot=0)
for i in range(4):
axes[i].set_xlabel('')
sns.despine()
fig.tight_layout()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#Data Loading
data = np.genfromtxt('sgd_data.txt',delimiter = ',')
x = np.zeros((40,1), dtype = np.float)
y = np.zeros((40,1), dtype = np.float)
for i in range(data.shape[0]):
x[i] = data[i][0]
for i in range(data.shape[0]):
y[i] = data[i][1]
print("Input data shape = {}".format(x.shape))
print("Output data shape = {}".format(y.shape))
#Helper Functions
def f(x,w,b):
'''Sigmoid Function'''
f = 1/(1+np.exp(-(w*x+b)))
return f
def mse(x,y,w,b):
'''Mean Squared Loss Function'''
L = 0.0
for i in range(x.shape[0]):
L += 0.5*(y[i]-f(x[i],w,b))**2
return L
def cross_entropy(x,y,w,b):
'''Cross Entropy Loss Function'''
L = 0.0
for i in range(x.shape[0]):
L += -(y[i]*np.log(f(x[i],w,b)))
return L
def grad_w_mse(x,y,w,b):
fx = f(x,w,b)
dw = (fx - y)*fx*(1-fx)*x
return dw
def grad_b_mse(x,y,w,b):
fx = f(x,w,b)
db = (fx - y)*fx*(1-fx)
return db
def grad_w_cross(x,y,w,b):
fx = f(x,w,b)
dw = (- y)*(1-fx)*x
return dw
def grad_b_cross(x,y,w,b):
fx = f(x,w,b)
db = (- y)*(1-fx)
return db
#Mini Batch Nesterov accelerated Gradient Discent
def Nesterov_GD(x,y,epochs,batch_size,loss,eta,lr):
w = np.random.randn()
b = np.random.randn()
prev_w_look_ahead,prev_b_look_ahead = 0,0
points = 0
l_list = []
w_list = []
b_list = []
ep = [i for i in range(epochs+1)]
dw_look_ahead,db_look_ahead = 0,0
#First Look Ahead Point
w_look_ahead = w - lr*prev_w_look_ahead #W_look_ahead = w_t - lr*w_update_t-1
b_look_ahead = b - lr*prev_b_look_ahead #B_look_ahead = b_t - lr*b_update_t-1
for i in range(epochs+1):
dw_look_ahead,db_look_ahead = 0,0
for j in range(x.shape[0]):
#Gradients w.r.t Look Ahead Points
if (loss == 'mse'):
dw_look_ahead += grad_w_mse(x[j],y[j],w_look_ahead,b_look_ahead)
db_look_ahead += grad_b_mse(x[j],y[j],w_look_ahead,b_look_ahead)
elif (loss == 'cross_entropy'):
dw_look_ahead += grad_w_cross(x[j],y[j],w_look_ahead,b_look_ahead)
db_look_ahead += grad_b_cross(x[j],y[j],w_look_ahead,b_look_ahead)
points += 1
if(points % batch_size == 0):
updated_w = lr*prev_w_look_ahead + eta*dw_look_ahead #w_update_t = lr*w_update_t-1 + eta*gradient(w_look_ahead)
updated_b = lr*prev_b_look_ahead + eta*db_look_ahead #b_update_t = lr*b_update_t-1 + eta*gradient(b_look_ahead)
w = w - updated_w #W_(t+1) = w_t - w_update_t
b = b - updated_w #B_(t+1) = b_t - b_update_t
prev_w_look_ahead = updated_w
prev_b_look_ahead = updated_b
#New Look Ahead point after mini batch parameter update
w_look_ahead = w - lr*prev_w_look_ahead
b_look_ahead = b - lr*prev_b_look_ahead
dw_look_ahead,db_look_ahead = 0,0
if (loss == 'mse'):
print('Loss after {}th epoch = {}\n'.format(i,mse(x,y,w,b)[0]))
l_list.append(mse(x,y,w,b)[0])
elif (loss == 'cross_entropy'):
print('Loss after {}th epoch = {}\n'.format(i,cross_entropy(x,y,w,b)[0]))
l_list.append(cross_entropy(x,y,w,b)[0])
w_list.append(w[0])
b_list.append(b[0])
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss vs Epoch Curve\nAlgotithm : Nesterov Accelerated Gradient Decent\nBatch Size = {}\nLearning Rate(Gamma) = {}\nEta = {}\nLoss Function = {}'.format(batch_size,lr,eta,loss))
plt.plot(ep,l_list)
plt.show()
return w_list,b_list
W,B = Nesterov_GD(x,y,500,10,'mse',0.2,0.01)
print('Weight list = \n{}'.format(W))
print('\n\nBias list = \n{}'.format(B))
W,B = Nesterov_GD(x,y,500,10,'cross_entropy',0.2,0.01)
print('Weight list = \n{}'.format(W))
print('\n\nBias list = \n{}'.format(B))
#Error Surface MSE
w = np.linspace(-5,5,num = 500,dtype = np.float)
b = np.linspace(-5,5,num = 500,dtype = np.float)
w,b = np.meshgrid(w,b)
mse_list = []
for i in range(w.shape[0]):
Loss = mse(x,y,w[i],b[i])
mse_list.append(Loss)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(w, b, mse_list, cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.title('MSE Error Suface')
plt.show()
#Error Surface Cross Entropy
cross_list = []
for i in range(w.shape[0]):
Loss = cross_entropy(x,y,w[i],b[i])
cross_list.append(Loss)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(w, b, cross_list, cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.title('Cross Entropy Error Suface')
plt.show()
```
| github_jupyter |
```
import pyprob
from pyprob import Model
from pyprob.distributions import Normal
from pyprob.distributions import Uniform
from pyprob.distributions import Categorical
import torch
import numpy as np
import math
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure();
def plotDist(dist, min_vals=-10, max_vals=10, step_size=0.02, label=None, **kwargs):
xvals = np.arange(min_vals, max_vals, step_size)
plt.plot(xvals, [torch.exp(dist.log_prob(x)) for x in xvals], label=label, **kwargs)
def density(dist, x, M=1):
return M*torch.exp(dist.log_prob(x)).item()
def computeGamma(dist, r, min_vals=-20, max_vals=-20, step_width=0.01):
x_vals = torch.arange(-20, 20, step_width)
gamma = torch.sum(torch.exp(dist.log_prob(x_vals)) * (1 - r(x_vals))) * step_width
return gamma
def computeC(dist, r, min_vals=-20, max_vals=-20, step_width=0.01):
x_vals = torch.arange(-20, 20, step_width)
#c = torch.sum(torch.exp(dist.log_prob(x_vals)) * r(x_vals)) * step_width
c = 0
for x in x_vals:
c += torch.exp(dist.log_prob(x)) * r(x) * step_width
return c
class CorrectDistributions:
def __init__(self, model):
self.prior_mean = model.prior_mean
self.prior_std = model.prior_std
self.likelihood_std = model.likelihood_std
self.prior_dist = Normal(self.prior_mean, self.prior_std)
@property
def observed_list(self):
return self.__observed_list
@observed_list.setter
def observed_list(self, new_observed_list):
self.__observed_list = new_observed_list
self.construct_correct_posterior()
def construct_correct_posterior(self):
n = len(self.observed_list)
posterior_var = 1/(n/self.likelihood_std**2 + 1/self.prior_std**2)
posterior_mu = posterior_var * (self.prior_mean/self.prior_std**2 + n*np.mean(self.observed_list)/self.likelihood_std**2)
self.posterior_dist = Normal(posterior_mu, math.sqrt(posterior_var))
def prior_pdf(self, model, x):
p = Normal(model.prior_mean,model.prior_stdd)
return math.exp(p.log_prob(x))
def plot_posterior(self, min_val, max_val):
if not hasattr(self, 'posterior_dist'):
raise AttributeError('observed values are not set yet, and posterior is not defined.')
plot_function(min_val, max_val, get_dist_pdf(self.posterior_dist), label='correct posterior', color='orange')
def plot_prior(self, min_val, max_val):
plot_function(min_val, max_val, get_dist_pdf(self.prior_dist), label='prior', color='green')
def marsaglia(mean, stddev):
uniform = Uniform(-1, 1)
s = 1
while float(s) >= 1:
x = pyprob.sample(uniform, replace=True)
y = pyprob.sample(uniform, replace=True)
s = x*x + y*y
return mean + stddev * (x * torch.sqrt(-2 * torch.log(s) / s))
class GaussianUnknownMean(Model):
def __init__(self):
super().__init__(name='Gaussian with unknown mean')
self.prior_mean = 0
self.prior_std = 1
self.likelihood_std = math.sqrt(0.2)
self.dist_pi = Normal(self.prior_mean, self.prior_std)
def rejectionSampling(self):
u = pyprob.sample(Uniform(0, 1))
if u > 0.5:
while True:
#x = marsaglia(self.prior_mean, self.prior_std*2)
x = pyprob.sample(Normal(self.prior_mean, self.prior_std*4), replace=True)
u2 = pyprob.sample(Uniform(0, 1), control=False)
if x < 0 and u2 < 1/4 * torch.exp(Normal(self.prior_mean, self.prior_std).log_prob(x) - Normal(self.prior_mean, self.prior_std*4).log_prob(x)):
return x
else:
while True:
x = pyprob.sample(Normal(self.prior_mean, self.prior_std), replace=True)
if x >= 0:
return x
def forward(self, importance=False, obs=None, exhaustive=False):
# mu ~ N(0, 1), involving skewed rejection sampling loops
mu = self.rejectionSampling()
likelihood = Normal(mu, self.likelihood_std)
pyprob.observe(likelihood, name='obs0')
return mu
model = GaussianUnknownMean()
correct_dists = CorrectDistributions(model)
correct_dists.observed_list = [0]
samples = model.prior_results(num_traces=1000)
min_vals = min(-10, min(samples))
max_vals = max(10, max(samples))
samples.plot_histogram(show=False, alpha=0.75, label='Empirical prior', bins=100)
plotDist(model.dist_pi, min_vals=min_vals, max_vals=max_vals, label='True prior')
plt.legend();
samples = model.posterior_results(num_traces=1000,
inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING,
observe={'obs0': correct_dists.observed_list[0]}
)
min_vals = min(-5, min(samples))
max_vals = max(5, max(samples))
samples.unweighted().plot_histogram(show=False, alpha=0.75, label='Proposal', bins=100)
plotDist(correct_dists.posterior_dist, min_vals=min_vals, max_vals=max_vals, label='Posterior')
plt.legend();
min_vals = min(-5, min(samples))
max_vals = max(5, max(samples))
samples.plot_histogram(show=False, alpha=0.75, label='Inferred posterior', bins=100)
plotDist(correct_dists.posterior_dist, min_vals=min_vals, max_vals=max_vals, label='Posterior')
plt.legend();
model.learn_inference_network(num_traces=1000,
observe_embeddings={'obs0' : {'dim' : 32}},
inference_network=pyprob.InferenceNetwork.LSTM)
samples = model.posterior_results(num_traces=1000,
inference_engine=pyprob.InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK,
observe={'obs0': correct_dists.observed_list[0]},
importance_weighting=pyprob.ImportanceWeighting.IW0)
min_vals = min(-5, min(samples))
max_vals = max(5, max(samples))
samples.unweighted().plot_histogram(show=False, alpha=0.75, label='Proposal', bins=100)
plotDist(correct_dists.posterior_dist, min_vals=min_vals, max_vals=max_vals, label='Posterior')
plt.legend();
min_vals = min(-5, min(samples))
max_vals = max(5, max(samples))
samples.plot_histogram(show=False, alpha=0.75, label='Inferred posterior', bins=100)
plotDist(correct_dists.posterior_dist, min_vals=min_vals, max_vals=max_vals, label='Posterior')
plt.legend();
```
| github_jupyter |
# Statistics
:label:`sec_statistics`
Undoubtedly, to be a top deep learning practitioner, the ability to train the state-of-the-art and high accurate models is crucial. However, it is often unclear when improvements are significant, or only the result of random fluctuations in the training process. To be able to discuss uncertainty in estimated values, we must learn some statistics.
The earliest reference of *statistics* can be traced back to an Arab scholar Al-Kindi in the $9^{\mathrm{th}}$-century, who gave a detailed description of how to use statistics and frequency analysis to decipher encrypted messages. After 800 years, the modern statistics arose from Germany in 1700s, when the researchers focused on the demographic and economic data collection and analysis. Today, statistics is the science subject that concerns the collection, processing, analysis, interpretation and visualization of data. What is more, the core theory of statistics has been widely used in the research within academia, industry, and government.
More specifically, statistics can be divided to *descriptive statistics* and *statistical inference*. The former focus on summarizing and illustrating the features of a collection of observed data, which is referred to as a *sample*. The sample is drawn from a *population*, denotes the total set of similar individuals, items, or events of our experiment interests. Contrary to descriptive statistics, *statistical inference* further deduces the characteristics of a population from the given *samples*, based on the assumptions that the sample distribution can replicate the population distribution at some degree.
You may wonder: “What is the essential difference between machine learning and statistics?” Fundamentally speaking, statistics focuses on the inference problem. This type of problems includes modeling the relationship between the variables, such as causal inference, and testing the statistically significance of model parameters, such as A/B testing. In contrast, machine learning emphasizes on making accurate predictions, without explicitly programming and understanding each parameter's functionality.
In this section, we will introduce three types of statistics inference methods: evaluating and comparing estimators, conducting hypothesis tests, and constructing confidence intervals. These methods can help us infer the characteristics of a given population, i.e., the true parameter $\theta$. For brevity, we assume that the true parameter $\theta$ of a given population is a scalar value. It is straightforward to extend to the case where $\theta$ is a vector or a tensor, thus we omit it in our discussion.
## Evaluating and Comparing Estimators
In statistics, an *estimator* is a function of given samples used to estimate the true parameter $\theta$. We will write $\hat{\theta}_n = \hat{f}(x_1, \ldots, x_n)$ for the estimate of $\theta$ after observing the samples {$x_1, x_2, \ldots, x_n$}.
We have seen simple examples of estimators before in section :numref:`sec_maximum_likelihood`. If you have a number of samples from a Bernoulli random variable, then the maximum likelihood estimate for the probability the random variable is one can be obtained by counting the number of ones observed and dividing by the total number of samples. Similarly, an exercise asked you to show that the maximum likelihood estimate of the mean of a Gaussian given a number of samples is given by the average value of all the samples. These estimators will almost never give the true value of the parameter, but ideally for a large number of samples the estimate will be close.
As an example, we show below the true density of a Gaussian random variable with mean zero and variance one, along with a collection samples from that Gaussian. We constructed the $y$ coordinate so every point is visible and the relationship to the original density is clearer.
```
import random
from mxnet import np, npx
from d2l import mxnet as d2l
npx.set_np()
# Sample datapoints and create y coordinate
epsilon = 0.1
random.seed(8675309)
xs = np.random.normal(loc=0, scale=1, size=(300,))
ys = [np.sum(np.exp(-(xs[:i] - xs[i])**2 / (2 * epsilon**2))
/ np.sqrt(2*np.pi*epsilon**2)) / len(xs) for i in range(len(xs))]
# Compute true density
xd = np.arange(np.min(xs), np.max(xs), 0.01)
yd = np.exp(-xd**2/2) / np.sqrt(2 * np.pi)
# Plot the results
d2l.plot(xd, yd, 'x', 'density')
d2l.plt.scatter(xs, ys)
d2l.plt.axvline(x=0)
d2l.plt.axvline(x=np.mean(xs), linestyle='--', color='purple')
d2l.plt.title(f'sample mean: {float(np.mean(xs)):.2f}')
d2l.plt.show()
```
There can be many ways to compute an estimator of a parameter $\hat{\theta}_n$. In this section, we introduce three common methods to evaluate and compare estimators: the mean squared error, the standard deviation, and statistical bias.
### Mean Squared Error
Perhaps the simplest metric used to evaluate estimators is the *mean squared error (MSE)* (or $l_2$ loss) of an estimator can be defined as
$$\mathrm{MSE} (\hat{\theta}_n, \theta) = E[(\hat{\theta}_n - \theta)^2].$$
:eqlabel:`eq_mse_est`
This allows us to quantify the average squared deviation from the true value. MSE is always non-negative. If you have read :numref:`sec_linear_regression`, you will recognize it as the most commonly used regression loss function. As a measure to evaluate an estimator, the closer its value to zero, the closer the estimator is close to the true parameter $\theta$.
### Statistical Bias
The MSE provides a natural metric, but we can easily imagine multiple different phenomena that might make it large. Two fundamentally important are fluctuation in the estimator due to randomness in the dataset, and systematic error in the estimator due to the estimation procedure.
First, let us measure the systematic error. For an estimator $\hat{\theta}_n$, the mathematical illustration of *statistical bias* can be defined as
$$\mathrm{bias}(\hat{\theta}_n) = E(\hat{\theta}_n - \theta) = E(\hat{\theta}_n) - \theta.$$
:eqlabel:`eq_bias`
Note that when $\mathrm{bias}(\hat{\theta}_n) = 0$, the expectation of the estimator $\hat{\theta}_n$ is equal to the true value of parameter. In this case, we say $\hat{\theta}_n$ is an unbiased estimator. In general, an unbiased estimator is better than a biased estimator since its expectation is the same as the true parameter.
It is worth being aware, however, that biased estimators are frequently used in practice. There are cases where unbiased estimators do not exist without further assumptions, or are intractable to compute. This may seem like a significant flaw in an estimator, however the majority of estimators encountered in practice are at least asymptotically unbiased in the sense that the bias tends to zero as the number of available samples tends to infinity: $\lim_{n \rightarrow \infty} \mathrm{bias}(\hat{\theta}_n) = 0$.
### Variance and Standard Deviation
Second, let us measure the randomness in the estimator. Recall from :numref:`sec_random_variables`, the *standard deviation* (or *standard error*) is defined as the squared root of the variance. We may measure the degree of fluctuation of an estimator by measuring the standard deviation or variance of that estimator.
$$\sigma_{\hat{\theta}_n} = \sqrt{\mathrm{Var} (\hat{\theta}_n )} = \sqrt{E[(\hat{\theta}_n - E(\hat{\theta}_n))^2]}.$$
:eqlabel:`eq_var_est`
It is important to compare :eqref:`eq_var_est` to :eqref:`eq_mse_est`. In this equation we do not compare to the true population value $\theta$, but instead to $E(\hat{\theta}_n)$, the expected sample mean. Thus we are not measuring how far the estimator tends to be from the true value, but instead we measuring the fluctuation of the estimator itself.
### The Bias-Variance Trade-off
It is intuitively clear that these two main components contribute to the mean squared error. What is somewhat shocking is that we can show that this is actually a *decomposition* of the mean squared error into these two contributions plus a third one. That is to say that we can write the mean squared error as the sum of the square of the bias, the variance and the irreducible error.
$$
\begin{aligned}
\mathrm{MSE} (\hat{\theta}_n, \theta) &= E[(\hat{\theta}_n - \theta)^2] \\
&= E[(\hat{\theta}_n)^2] + E[\theta^2] - 2E[\hat{\theta}_n\theta] \\
&= \mathrm{Var} [\hat{\theta}_n] + E[\hat{\theta}_n]^2 + \mathrm{Var} [\theta] + E[\theta]^2 - 2E[\hat{\theta}_n]E[\theta] \\
&= (E[\hat{\theta}_n] - E[\theta])^2 + \mathrm{Var} [\hat{\theta}_n] + \mathrm{Var} [\theta] \\
&= (E[\hat{\theta}_n - \theta])^2 + \mathrm{Var} [\hat{\theta}_n] + \mathrm{Var} [\theta] \\
&= (\mathrm{bias} [\hat{\theta}_n])^2 + \mathrm{Var} (\hat{\theta}_n) + \mathrm{Var} [\theta].\\
\end{aligned}
$$
We refer the above formula as *bias-variance trade-off*. The mean squared error can be divided into three sources of error: the error from high bias, the error from high variance and the irreducible error. The bias error is commonly seen in a simple model (such as a linear regression model), which cannot extract high dimensional relations between the features and the outputs. If a model suffers from high bias error, we often say it is *underfitting* or lack of *flexibilty* as introduced in (:numref:`sec_model_selection`). The high variance usually results from a too complex model, which overfits the training data. As a result, an *overfitting* model is sensitive to small fluctuations in the data. If a model suffers from high variance, we often say it is *overfitting* and lack of *generalization* as introduced in (:numref:`sec_model_selection`). The irreducible error is the result from noise in the $\theta$ itself.
### Evaluating Estimators in Code
Since the standard deviation of an estimator has been implementing by simply calling `a.std()` for a tensor `a`, we will skip it but implement the statistical bias and the mean squared error.
```
# Statistical bias
def stat_bias(true_theta, est_theta):
return(np.mean(est_theta) - true_theta)
# Mean squared error
def mse(data, true_theta):
return(np.mean(np.square(data - true_theta)))
```
To illustrate the equation of the bias-variance trade-off, let us simulate of normal distribution $\mathcal{N}(\theta, \sigma^2)$ with $10,000$ samples. Here, we use a $\theta = 1$ and $\sigma = 4$. As the estimator is a function of the given samples, here we use the mean of the samples as an estimator for true $\theta$ in this normal distribution $\mathcal{N}(\theta, \sigma^2)$ .
```
theta_true = 1
sigma = 4
sample_len = 10000
samples = np.random.normal(theta_true, sigma, sample_len)
theta_est = np.mean(samples)
theta_est
```
Let us validate the trade-off equation by calculating the summation of the squared bias and the variance of our estimator. First, calculate the MSE of our estimator.
```
mse(samples, theta_true)
```
Next, we calculate $\mathrm{Var} (\hat{\theta}_n) + [\mathrm{bias} (\hat{\theta}_n)]^2$ as below. As you can see, the two values agree to numerical precision.
```
bias = stat_bias(theta_true, theta_est)
np.square(samples.std()) + np.square(bias)
```
## Conducting Hypothesis Tests
The most commonly encountered topic in statistical inference is hypothesis testing. While hypothesis testing was popularized in the early 20th century, the first use can be traced back to John Arbuthnot in the 1700s. John tracked 80-year birth records in London and concluded that more men were born than women each year. Following that, the modern significance testing is the intelligence heritage by Karl Pearson who invented $p$-value and Pearson's chi-squared test, William Gosset who is the father of Student's t-distribution, and Ronald Fisher who initialed the null hypothesis and the significance test.
A *hypothesis test* is a way of evaluating some evidence against the default statement about a population. We refer the default statement as the *null hypothesis* $H_0$, which we try to reject using the observed data. Here, we use $H_0$ as a starting point for the statistical significance testing. The *alternative hypothesis* $H_A$ (or $H_1$) is a statement that is contrary to the null hypothesis. A null hypothesis is often stated in a declarative form which posits a relationship between variables. It should reflect the brief as explicit as possible, and be testable by statistics theory.
Imagine you are a chemist. After spending thousands of hours in the lab, you develop a new medicine which can dramatically improve one's ability to understand math. To show its magic power, you need to test it. Naturally, you may need some volunteers to take the medicine and see whether it can help them learn math better. How do you get started?
First, you will need carefully random selected two groups of volunteers, so that there is no difference between their math understanding ability measured by some metrics. The two groups are commonly referred to as the test group and the control group. The *test group* (or *treatment group*) is a group of individuals who will experience the medicine, while the *control group* represents the group of users who are set aside as a benchmark, i.e., identical environment setups except taking this medicine. In this way, the influence of all the variables are minimized, except the impact of the independent variable in the treatment.
Second, after a period of taking the medicine, you will need to measure the two groups' math understanding by the same metrics, such as letting the volunteers do the same tests after learning a new math formula. Then, you can collect their performance and compare the results. In this case, our null hypothesis will be that there is no difference between the two groups, and our alternate will be that there is.
This is still not fully formal. There are many details you have to think of carefully. For example, what is the suitable metrics to test their math understanding ability? How many volunteers for your test so you can be confident to claim the effectiveness of your medicine? How long should you run the test? How do you decide if there is a difference between the two groups? Do you care about the average performance only, or also the range of variation of the scores? And so on.
In this way, hypothesis testing provides a framework for experimental design and reasoning about certainty in observed results. If we can now show that the null hypothesis is very unlikely to be true, we may reject it with confidence.
To complete the story of how to work with hypothesis testing, we need to now introduce some additional terminology and make some of our concepts above formal.
### Statistical Significance
The *statistical significance* measures the probability of erroneously rejecting the null hypothesis, $H_0$, when it should not be rejected, i.e.,
$$ \text{statistical significance }= 1 - \alpha = 1 - P(\text{reject } H_0 \mid H_0 \text{ is true} ).$$
It is also referred to as the *type I error* or *false positive*. The $\alpha$, is called as the *significance level* and its commonly used value is $5\%$, i.e., $1-\alpha = 95\%$. The significance level can be explained as the level of risk that we are willing to take, when we reject a true null hypothesis.
:numref:`fig_statistical_significance` shows the observations' values and probability of a given normal distribution in a two-sample hypothesis test. If the observation data example is located outsides the $95\%$ threshold, it will be a very unlikely observation under the null hypothesis assumption. Hence, there might be something wrong with the null hypothesis and we will reject it.

:label:`fig_statistical_significance`
### Statistical Power
The *statistical power* (or *sensitivity*) measures the probability of reject the null hypothesis, $H_0$, when it should be rejected, i.e.,
$$ \text{statistical power }= 1 - \beta = 1 - P(\text{ fail to reject } H_0 \mid H_0 \text{ is false} ).$$
Recall that a *type I error* is error caused by rejecting the null hypothesis when it is true, whereas a *type II error* is resulted from failing to reject the null hypothesis when it is false. A type II error is usually denoted as $\beta$, and hence the corresponding statistical power is $1-\beta$.
Intuitively, statistical power can be interpreted as how likely our test will detect a real discrepancy of some minimum magnitude at a desired statistical significance level. $80\%$ is a commonly used statistical power threshold. The higher the statistical power, the more likely we are to detect true differences.
One of the most common uses of statistical power is in determining the number of samples needed. The probability you reject the null hypothesis when it is false depends on the degree to which it is false (known as the *effect size*) and the number of samples you have. As you might expect, small effect sizes will require a very large number of samples to be detectable with high probability. While beyond the scope of this brief appendix to derive in detail, as an example, want to be able to reject a null hypothesis that our sample came from a mean zero variance one Gaussian, and we believe that our sample's mean is actually close to one, we can do so with acceptable error rates with a sample size of only $8$. However, if we think our sample population true mean is close to $0.01$, then we'd need a sample size of nearly $80000$ to detect the difference.
We can imagine the power as a water filter. In this analogy, a high power hypothesis test is like a high quality water filtration system that will reduce harmful substances in the water as much as possible. On the other hand, a smaller discrepancy is like a low quality water filter, where some relative small substances may easily escape from the gaps. Similarly, if the statistical power is not of enough high power, then the test may not catch the smaller discrepancy.
### Test Statistic
A *test statistic* $T(x)$ is a scalar which summarizes some characteristic of the sample data. The goal of defining such a statistic is that it should allow us to distinguish between different distributions and conduct our hypothesis test. Thinking back to our chemist example, if we wish to show that one population performs better than the other, it could be reasonable to take the mean as the test statistic. Different choices of test statistic can lead to statistical test with drastically different statistical power.
Often, $T(X)$ (the distribution of the test statistic under our null hypothesis) will follow, at least approximately, a common probability distribution such as a normal distribution when considered under the null hypothesis. If we can derive explicitly such a distribution, and then measure our test statistic on our dataset, we can safely reject the null hypothesis if our statistic is far outside the range that we would expect. Making this quantitative leads us to the notion of $p$-values.
### $p$-value
The $p$-value (or the *probability value*) is the probability that $T(X)$ is at least as extreme as the observed test statistic $T(x)$ assuming that the null hypothesis is *true*, i.e.,
$$ p\text{-value} = P_{H_0}(T(X) \geq T(x)).$$
If the $p$-value is smaller than or equal to a predefined and fixed statistical significance level $\alpha$, we may reject the null hypothesis. Otherwise, we will conclude that we are lack of evidence to reject the null hypothesis. For a given population distribution, the *region of rejection* will be the interval contained of all the points which has a $p$-value smaller than the statistical significance level $\alpha$.
### One-side Test and Two-sided Test
Normally there are two kinds of significance test: the one-sided test and the two-sided test. The *one-sided test* (or *one-tailed test*) is applicable when the null hypothesis and the alternative hypothesis only have one direction. For example, the null hypothesis may state that the true parameter $\theta$ is less than or equal to a value $c$. The alternative hypothesis would be that $\theta$ is greater than $c$. That is, the region of rejection is on only one side of the sampling distribution. Contrary to the one-sided test, the *two-sided test* (or *two-tailed test*) is applicable when the region of rejection is on both sides of the sampling distribution. An example in this case may have a null hypothesis state that the true parameter $\theta$ is equal to a value $c$. The alternative hypothesis would be that $\theta$ is not equal to $c$.
### General Steps of Hypothesis Testing
After getting familiar with the above concepts, let us go through the general steps of hypothesis testing.
1. State the question and establish a null hypotheses $H_0$.
2. Set the statistical significance level $\alpha$ and a statistical power ($1 - \beta$).
3. Obtain samples through experiments. The number of samples needed will depend on the statistical power, and the expected effect size.
4. Calculate the test statistic and the $p$-value.
5. Make the decision to keep or reject the null hypothesis based on the $p$-value and the statistical significance level $\alpha$.
To conduct a hypothesis test, we start by defining a null hypothesis and a level of risk that we are willing to take. Then we calculate the test statistic of the sample, taking an extreme value of the test statistic as evidence against the null hypothesis. If the test statistic falls within the reject region, we may reject the null hypothesis in favor of the alternative.
Hypothesis testing is applicable in a variety of scenarios such as the clinical trails and A/B testing.
## Constructing Confidence Intervals
When estimating the value of a parameter $\theta$, point estimators like $\hat \theta$ are of limited utility since they contain no notion of uncertainty. Rather, it would be far better if we could produce an interval that would contain the true parameter $\theta$ with high probability. If you were interested in such ideas a century ago, then you would have been excited to read "Outline of a Theory of Statistical Estimation Based on the Classical Theory of Probability" by Jerzy Neyman :cite:`Neyman.1937`, who first introduced the concept of confidence interval in 1937.
To be useful, a confidence interval should be as small as possible for a given degree of certainty. Let us see how to derive it.
### Definition
Mathematically, a *confidence interval* for the true parameter $\theta$ is an interval $C_n$ that computed from the sample data such that
$$P_{\theta} (C_n \ni \theta) \geq 1 - \alpha, \forall \theta.$$
:eqlabel:`eq_confidence`
Here $\alpha \in (0, 1)$, and $1 - \alpha$ is called the *confidence level* or *coverage* of the interval. This is the same $\alpha$ as the significance level as we discussed about above.
Note that :eqref:`eq_confidence` is about variable $C_n$, not about the fixed $\theta$. To emphasize this, we write $P_{\theta} (C_n \ni \theta)$ rather than $P_{\theta} (\theta \in C_n)$.
### Interpretation
It is very tempting to interpret a $95\%$ confidence interval as an interval where you can be $95\%$ sure the true parameter lies, however this is sadly not true. The true parameter is fixed, and it is the interval that is random. Thus a better interpretation would be to say that if you generated a large number of confidence intervals by this procedure, $95\%$ of the generated intervals would contain the true parameter.
This may seem pedantic, but it can have real implications for the interpretation of the results. In particular, we may satisfy :eqref:`eq_confidence` by constructing intervals that we are *almost certain* do not contain the true value, as long as we only do so rarely enough. We close this section by providing three tempting but false statements. An in-depth discussion of these points can be found in :cite:`Morey.Hoekstra.Rouder.ea.2016`.
* **Fallacy 1**. Narrow confidence intervals mean we can estimate the parameter precisely.
* **Fallacy 2**. The values inside the confidence interval are more likely to be the true value than those outside the interval.
* **Fallacy 3**. The probability that a particular observed $95\%$ confidence interval contains the true value is $95\%$.
Sufficed to say, confidence intervals are subtle objects. However, if you keep the interpretation clear, they can be powerful tools.
### A Gaussian Example
Let us discuss the most classical example, the confidence interval for the mean of a Gaussian of unknown mean and variance. Suppose we collect $n$ samples $\{x_i\}_{i=1}^n$ from our Gaussian $\mathcal{N}(\mu, \sigma^2)$. We can compute estimators for the mean and standard deviation by taking
$$\hat\mu_n = \frac{1}{n}\sum_{i=1}^n x_i \;\text{and}\; \hat\sigma^2_n = \frac{1}{n-1}\sum_{i=1}^n (x_i - \hat\mu)^2.$$
If we now consider the random variable
$$
T = \frac{\hat\mu_n - \mu}{\hat\sigma_n/\sqrt{n}},
$$
we obtain a random variable following a well-known distribution called the *Student's t-distribution on* $n-1$ *degrees of freedom*.
This distribution is very well studied, and it is known, for instance, that as $n\rightarrow \infty$, it is approximately a standard Gaussian, and thus by looking up values of the Gaussian c.d.f. in a table, we may conclude that the value of $T$ is in the interval $[-1.96, 1.96]$ at least $95\%$ of the time. For finite values of $n$, the interval needs to be somewhat larger, but are well known and precomputed in tables.
Thus, we may conclude that for large $n$,
$$
P\left(\frac{\hat\mu_n - \mu}{\hat\sigma_n/\sqrt{n}} \in [-1.96, 1.96]\right) \ge 0.95.
$$
Rearranging this by multiplying both sides by $\hat\sigma_n/\sqrt{n}$ and then adding $\hat\mu_n$, we obtain
$$
P\left(\mu \in \left[\hat\mu_n - 1.96\frac{\hat\sigma_n}{\sqrt{n}}, \hat\mu_n + 1.96\frac{\hat\sigma_n}{\sqrt{n}}\right]\right) \ge 0.95.
$$
Thus we know that we have found our $95\%$ confidence interval:
$$\left[\hat\mu_n - 1.96\frac{\hat\sigma_n}{\sqrt{n}}, \hat\mu_n + 1.96\frac{\hat\sigma_n}{\sqrt{n}}\right].$$
:eqlabel:`eq_gauss_confidence`
It is safe to say that :eqref:`eq_gauss_confidence` is one of the most used formula in statistics. Let us close our discussion of statistics by implementing it. For simplicity, we assume we are in the asymptotic regime. Small values of $N$ should include the correct value of `t_star` obtained either programmatically or from a $t$-table.
```
# Number of samples
N = 1000
# Sample dataset
samples = np.random.normal(loc=0, scale=1, size=(N,))
# Lookup Students's t-distribution c.d.f.
t_star = 1.96
# Construct interval
mu_hat = np.mean(samples)
sigma_hat = samples.std(ddof=1)
(mu_hat - t_star*sigma_hat/np.sqrt(N), mu_hat + t_star*sigma_hat/np.sqrt(N))
```
## Summary
* Statistics focuses on inference problems, whereas deep learning emphasizes on making accurate predictions without explicitly programming and understanding.
* There are three common statistics inference methods: evaluating and comparing estimators, conducting hypothesis tests, and constructing confidence intervals.
* There are three most common estimators: statistical bias, standard deviation, and mean square error.
* A confidence interval is an estimated range of a true population parameter that we can construct by given the samples.
* Hypothesis testing is a way of evaluating some evidence against the default statement about a population.
## Exercises
1. Let $X_1, X_2, \ldots, X_n \overset{\text{iid}}{\sim} \mathrm{Unif}(0, \theta)$, where "iid" stands for *independent and identically distributed*. Consider the following estimators of $\theta$:
$$\hat{\theta} = \max \{X_1, X_2, \ldots, X_n \};$$
$$\tilde{\theta} = 2 \bar{X_n} = \frac{2}{n} \sum_{i=1}^n X_i.$$
* Find the statistical bias, standard deviation, and mean square error of $\hat{\theta}.$
* Find the statistical bias, standard deviation, and mean square error of $\tilde{\theta}.$
* Which estimator is better?
1. For our chemist example in introduction, can you derive the 5 steps to conduct a two-sided hypothesis testing? Given the statistical significance level $\alpha = 0.05$ and the statistical power $1 - \beta = 0.8$.
1. Run the confidence interval code with $N=2$ and $\alpha = 0.5$ for $100$ independently generated dataset, and plot the resulting intervals (in this case `t_star = 1.0`). You will see several very short intervals which are very far from containing the true mean $0$. Does this contradict the interpretation of the confidence interval? Do you feel comfortable using short intervals to indicate high precision estimates?
[Discussions](https://discuss.d2l.ai/t/419)
| github_jupyter |
# Build and train your first deep learning model
This notebook describes how to build a basic neural network with CNTK. We'll train a model on [the iris data set](https://archive.ics.uci.edu/ml/datasets/iris) to classify iris flowers. This dataset contains 4 features that describe an iris flower belonging to one of three possible species contained in the dataset.
# Step 1: Building the network structure
We're setting the random seed for this notebook to a fixed value. This ensures that you get the same results each time you run the sample code.
```
import cntk
cntk.cntk_py.set_fixed_random_seed(1337)
```
## Define the layers
This step defines the layer structure for the neural network. We're using a default `relu` activation function for each of the neurons in the hidden layers. The output layer gets a `log_softmax` activation function.
```
from cntk import default_options, input_variable
from cntk.layers import Dense, Sequential
from cntk.ops import log_softmax, sigmoid
model = Sequential([
Dense(4, activation=sigmoid),
Dense(3, activation=log_softmax)
])
```
## Define the input for the neural network
The input for the model is a vector with four features:
- Sepal length
- Sepal width
- Petal length
- Petal width
In order for the model to work we need to define its input as an `input_variable`. This variable should have the same size as the number of features that we want to use for making a prediction. In this case it should be 4, because we have 4 different features in our dataset.
```
features = input_variable(4)
```
## Finalize the neural network structure
The last step is to finalize the neural network structure. We define a new variable `z` and invoke the model function with the input variable to bind it as the input for our model.
```
z = model(features)
```
# Train the model
After we've defined the model we need to setup the training logic. This is done in three steps:
1. Load the dataset and prepare it for use
2. Define the loss for the model.
3. Set up the trainer and learner for the model.
3. Use the trainer to train the model with the loaded data.
## Loading the data
Before we can actually train the model, we need to load the data from disk. We will use pandas for this.
Pandas is widely used python library for working with data. It contains functions to load and process data
as well as a large amount functions to perform statistical operations.
```
import pandas as pd
df_source = pd.read_csv('iris.csv',
names=['sepal_length', 'sepal_width','petal_length','petal_width', 'species'],
index_col=False)
df_source.info()
df_source.describe()
```
We split the dataset into features `X` and labels `y`. We need to feed these separately to the trainer later on to train the model. We convert the features and labels to numpy arrays as this is what CNTK expects as input.
```
import numpy as np
X = df_source.iloc[:, :4].values
y = df_source['species'].values
```
Our model doesn't take strings as values. It needs floating point values to do its job. So we need to encode the strings into a floating point representation. We can do this using a standard label encoder which is available in the `scikit-learn` python package.
```
def one_hot(x, length):
result = np.zeros(length)
result[x] = 1
return result
label_mapping = {
'Iris-setosa': 0,
'Iris-versicolor': 1,
'Iris-virginica': 2
}
y = np.array([one_hot(label_mapping[v], 3) for v in y])
```
CNTK is configured to use 32-bit floats by default. Right the features are stored as 64-bit floats and the labels are stored as integers. In order to help CNTK make sense of this, we will have to convert our data to 32-bit floats.
```
X = X.astype(np.float32)
y = y.astype(np.float32)
```
One of the challenges with machine learning is the fact that your model will try to memorize every bit of data it saw. This is called overfitting and bad for your model as it is no longer able to correctly predict outcome correctly for samples it didn't see before. We want our model to learn a set of rules that predict the correct class of flower.
In order for us to detect overfitting we need to split the dataset into a training and test set. This is done using a utility function found in the scikit-learn python package which is included with your standard anaconda installation.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, stratify=y)
```
## Defining the target and loss
Let's define a target for our model and a loss function. The loss function measures the distance between the actual and predicted value. The loss is later used by the learner to optimize the parameters in the model.
```
from cntk.losses import cross_entropy_with_softmax
from cntk.metrics import classification_error
label = input_variable(3)
loss = cross_entropy_with_softmax(z, label)
error_rate = classification_error(z, label)
```
## Setting up the learner and trainer
When we have a model and loss we can setup the learner and trainer to train the model.
We first define the learner, which is going to use the loss function and target to optimize the model.
```
from cntk.learners import sgd
from cntk.train.trainer import Trainer
learner = sgd(z.parameters, 0.001)
trainer = Trainer(z, (loss, error_rate), [learner])
```
# Train the model
To train the model you can use different methods on the trainer. The `train_minibatch` method can be used to manually feed data into the model as minibatches. You typically use this method when you have a dataset that you've loaded manually using Pandas or numpy.
We're going to train our model by running our dataset 10 times through the trainer. Each time we perform a full pass over the dataset we perform one training epoch.
At the end of the training process we have a fully trained model that we can use to make predictions.
```
for _ in range(5):
trainer.train_minibatch({ features: X_train, label: y_train })
print('Loss: {}, Acc: {}'.format(
trainer.previous_minibatch_loss_average,
trainer.previous_minibatch_evaluation_average))
```
# Evaluate the model
After we've trained the model using the training set we can measure the models performance using a call to the test_minibatch method on the trainer instance we used earlier. This outputs a value between 0 and 1. A value closer to 1 indicates a perfectly working classifier.
Please note that at this point the model performance may be a little underwhelming. You can try running all the cells in the notebook again and it will most likely improve. This happens because the weights are initialized using a random number which changes every time you rerun all the cells in this notebook. You may get lucky!
```
trainer.test_minibatch( {features: X_test, label: y_test })
```
# Make a prediction with the trained model
Once trained we can make predictions with our model by simply invoking the model. This produces a vector with the activation values of the output layer of our model. We can then use the `argmax` function from numpy to determine the neuron with the highest activation, which is the species the flower was classified as.
```
sample_index = np.random.choice(X_test.shape[0])
sample = X_test[sample_index]
inverted_mapping = {
0: 'Iris-setosa',
1: 'Iris-versicolor',
2: 'Iris-virginica'
}
prediction = z(sample)
predicted_label = inverted_mapping[np.argmax(prediction)]
print(predicted_label)
```
| github_jupyter |
# Use Your Own Inference Code with Amazon SageMaker XGBoost Algorithm
_**Customized inference for computing SHAP values with Amazon SageMaker XGBoost script mode**_
---
## Contents
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Training the XGBoost model](#Training-the-XGBoost-model)
4. [Deploying the XGBoost endpoint](#Deploying-the-XGBoost-endpoint)
---
## Introduction
This notebook shows how you can configure the SageMaker XGBoost model server by defining the following three functions in the Python source file you pass to the XGBoost constructor in the SageMaker Python SDK:
- `input_fn`: Takes request data and deserializes the data into an object for prediction,
- `predict_fn`: Takes the deserialized request object and performs inference against the loaded model, and
- `output_fn`: Takes the result of prediction and serializes this according to the response content type.
We will write a customized inference script that is designed to illustrate how [SHAP](https://github.com/slundberg/shap) values enable the interpretion of XGBoost models.
We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), originally from the [UCI data repository](https://archive.ics.uci.edu/ml/datasets/abalone). More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In this libsvm converted version, the nominal feature (Male/Female/Infant) has been converted into a real valued feature as required by XGBoost. Age of abalone is to be predicted from eight physical measurements.
This notebook uses the Abalone dataset to deploy a model server that returns SHAP values, which enable us to create model explanation such as the following plots that show each features contributing to push the model output from the base value.
<table><tr>
<td> <img src="images/shap_young_abalone.png" alt="Drawing"/> </td>
<td> <img src="images/shap_old_abalone.png" alt="Drawing"/> </td>
</tr></table>
---
## Setup
This notebook was tested in Amazon SageMaker Studio on a ml.t3.medium instance with Python 3 (Data Science) kernel.
Let's start by specifying:
1. The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
2. The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regex with a the appropriate full IAM role arn string(s).
```
%%time
import io
import os
import boto3
import sagemaker
import time
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
# S3 bucket for saving code and model artifacts.
# Feel free to specify a different bucket here if you wish.
bucket = sagemaker.Session().default_bucket()
prefix = "sagemaker/DEMO-xgboost-inference-script-mode"
```
### Fetching the dataset
The following methods download the Abalone dataset and upload files to S3.
```
%%time
s3 = boto3.client("s3")
# Load the dataset
FILE_DATA = "abalone"
s3.download_file(
"sagemaker-sample-files", f"datasets/tabular/uci_abalone/abalone.libsvm", FILE_DATA
)
sagemaker.Session().upload_data(FILE_DATA, bucket=bucket, key_prefix=prefix + "/train")
```
## Training the XGBoost model
SageMaker can now run an XGboost script using the XGBoost estimator. A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later. In this notebook, we use the same training script [abalone.py](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/abalone.py) from [Regression with Amazon SageMaker XGBoost algorithm](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/xgboost_abalone_dist_script_mode.ipynb). Refer to [Regression with Amazon SageMaker XGBoost algorithm](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/xgboost_abalone_dist_script_mode.ipynb) for details on the training script.
After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between few minutes.
To run our training script on SageMaker, we construct a `sagemaker.xgboost.estimator.XGBoost` estimator, which accepts several constructor arguments:
* __entry_point__: The path to the Python script SageMaker runs for training and prediction.
* __role__: Role ARN
* __framework_version__: SageMaker XGBoost version you want to use for executing your model training code, e.g., `0.90-1`, `0.90-2`, `1.0-1`, or `1.2-1`.
* __train_instance_type__ *(optional)*: The type of SageMaker instances for training. __Note__: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* __sagemaker_session__ *(optional)*: The session used to train on Sagemaker.
* __hyperparameters__ *(optional)*: A dictionary passed to the train function as hyperparameters.
```
from sagemaker.inputs import TrainingInput
from sagemaker.xgboost.estimator import XGBoost
job_name = "DEMO-xgboost-inference-script-mode-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print("Training job", job_name)
hyperparameters = {
"max_depth": "5",
"eta": "0.2",
"gamma": "4",
"min_child_weight": "6",
"subsample": "0.7",
"objective": "reg:squarederror",
"num_round": "50",
"verbosity": "2",
}
instance_type = "ml.c5.xlarge"
xgb_script_mode_estimator = XGBoost(
entry_point="abalone.py",
hyperparameters=hyperparameters,
role=role,
instance_count=1,
instance_type=instance_type,
framework_version="1.2-1",
output_path="s3://{}/{}/{}/output".format(bucket, prefix, job_name),
)
content_type = "text/libsvm"
train_input = TrainingInput(
"s3://{}/{}/{}/".format(bucket, prefix, "train"), content_type=content_type
)
```
### Train XGBoost Estimator on Abalone Data
Training is as simple as calling `fit` on the Estimator. This will start a SageMaker Training job that will download the data, invoke the entry point code (in the provided script file), and save any model artifacts that the script creates. In this case, the script requires a `train` and a `validation` channel. Since we only created a `train` channel, we re-use it for validation.
```
xgb_script_mode_estimator.fit({"train": train_input, "validation": train_input}, job_name=job_name)
```
## Deploying the XGBoost endpoint
After training, we can host the newly created model in SageMaker, and create an Amazon SageMaker endpoint – a hosted and managed prediction service that we can use to perform inference. If you call `deploy` after you call `fit` on an XGBoost estimator, it will create a SageMaker endpoint using the training script (i.e., `entry_point`). You can also optionally specify other functions to customize the behavior of deserialization of the input request (`input_fn()`), serialization of the predictions (`output_fn()`), and how predictions are made (`predict_fn()`). If any of these functions are not specified, the endpoint will use the default functions in the SageMaker XGBoost container. See the [SageMaker Python SDK documentation](https://sagemaker.readthedocs.io/en/stable/frameworks/xgboost/using_xgboost.html#sagemaker-xgboost-model-server) for details.
In this notebook, we will run a separate inference script and customize the endpoint to return [SHAP](https://github.com/slundberg/shap) values in addition to predictions. The inference script that we will run in this notebook is provided as the accompanying file (`inference.py`) and also shown below:
```python
import json
import os
import pickle as pkl
import numpy as np
import sagemaker_xgboost_container.encoder as xgb_encoders
def model_fn(model_dir):
"""
Deserialize and return fitted model.
"""
model_file = "xgboost-model"
booster = pkl.load(open(os.path.join(model_dir, model_file), "rb"))
return booster
def input_fn(request_body, request_content_type):
"""
The SageMaker XGBoost model server receives the request data body and the content type,
and invokes the `input_fn`.
Return a DMatrix (an object that can be passed to predict_fn).
"""
if request_content_type == "text/libsvm":
return xgb_encoders.libsvm_to_dmatrix(request_body)
else:
raise ValueError(
"Content type {} is not supported.".format(request_content_type)
)
def predict_fn(input_data, model):
"""
SageMaker XGBoost model server invokes `predict_fn` on the return value of `input_fn`.
Return a two-dimensional NumPy array where the first columns are predictions
and the remaining columns are the feature contributions (SHAP values) for that prediction.
"""
prediction = model.predict(input_data)
feature_contribs = model.predict(input_data, pred_contribs=True, validate_features=False)
output = np.hstack((prediction[:, np.newaxis], feature_contribs))
return output
def output_fn(predictions, content_type):
"""
After invoking predict_fn, the model server invokes `output_fn`.
"""
if content_type == "text/csv":
return ','.join(str(x) for x in predictions[0])
else:
raise ValueError("Content type {} is not supported.".format(content_type))
```
### transform_fn
If you would rather not structure your code around the three methods described above, you can instead define your own `transform_fn` to handle inference requests. An error is thrown if a `transform_fn` is present in conjunction with any `input_fn`, `predict_fn`, and/or `output_fn`. In our case, the `transform_fn` would look as follows:
```python
def transform_fn(model, request_body, content_type, accept_type):
dmatrix = xgb_encoders.libsvm_to_dmatrix(request_body)
prediction = model.predict(dmatrix)
feature_contribs = model.predict(dmatrix, pred_contribs=True, validate_features=False)
output = np.hstack((prediction[:, np.newaxis], feature_contribs))
return ','.join(str(x) for x in predictions[0])
```
where `model` is the model object loaded by `model_fn`, `request_body` is the data from the inference request, `content_type` is the content type of the request, and `accept_type` is the request content type for the response.
### Deploy to an endpoint
Since the inference script is separate from the training script, here we use `XGBoostModel` to create a model from s3 artifacts and specify `inference.py` as the `entry_point`.
```
from sagemaker.xgboost.model import XGBoostModel
model_data = xgb_script_mode_estimator.model_data
print(model_data)
xgb_inference_model = XGBoostModel(
model_data=model_data,
role=role,
entry_point="inference.py",
framework_version="1.2-1",
)
predictor = xgb_inference_model.deploy(
initial_instance_count=1,
instance_type="ml.c5.xlarge",
)
```
### Explain the model's predictions on each data point
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def plot_feature_contributions(prediction):
attribute_names = [
"Sex", # nominal / -- / M, F, and I (infant)
"Length", # continuous / mm / Longest shell measurement
"Diameter", # continuous / mm / perpendicular to length
"Height", # continuous / mm / with meat in shell
"Whole weight", # continuous / grams / whole abalone
"Shucked weight", # continuous / grams / weight of meat
"Viscera weight", # continuous / grams / gut weight (after bleeding)
"Shell weight", # continuous / grams / after being dried
]
prediction, _, *shap_values, bias = prediction
if len(shap_values) != len(attribute_names):
raise ValueError("Length mismatch between shap values and attribute names.")
df = pd.DataFrame(data=[shap_values], index=["SHAP"], columns=attribute_names).T
df.sort_values(by="SHAP", inplace=True)
df["bar_start"] = bias + df.SHAP.cumsum().shift().fillna(0.0)
df["bar_end"] = df.bar_start + df.SHAP
df[["bar_start", "bar_end"]] = np.sort(df[["bar_start", "bar_end"]].values)
df["hue"] = df.SHAP.apply(lambda x: 0 if x > 0 else 1)
sns.set(style="white")
ax1 = sns.barplot(x=df.bar_end, y=df.index, data=df, orient="h", palette="vlag")
for idx, patch in enumerate(ax1.patches):
x_val = patch.get_x() + patch.get_width() + 0.8
y_val = patch.get_y() + patch.get_height() / 2
shap_value = df.SHAP.values[idx]
value = "{0}{1:.2f}".format("+" if shap_value > 0 else "-", shap_value)
ax1.annotate(value, (x_val, y_val), ha="right", va="center")
ax2 = sns.barplot(x=df.bar_start, y=df.index, data=df, orient="h", color="#FFFFFF")
ax2.set_xlim(
df[["bar_start", "bar_end"]].values.min() - 1, df[["bar_start", "bar_end"]].values.max() + 1
)
ax2.axvline(x=bias, color="#000000", alpha=0.2, linestyle="--", linewidth=1)
ax2.set_title("base value: {0:.1f} → model output: {1:.1f}".format(bias, prediction))
ax2.set_xlabel("Abalone age")
sns.despine(left=True, bottom=True)
plt.tight_layout()
plt.show()
def predict_and_plot(predictor, libsvm_str):
label, *features = libsvm_str.strip().split()
predictions = predictor.predict(" ".join(["-99"] + features)) # use dummy label -99
np_array = np.array([float(x) for x in predictions[0]])
plot_feature_contributions(np_array)
```
The below figure shows features each contributing to push the model output from the base value (9.9 rings) to the model output (6.9 rings). The primary indicator for a young abalone according to the model is low shell weight, which decreases the prediction by 3.0 rings from the base value of 9.9 rings. Whole weight and shucked weight are also powerful indicators. The whole weight pushes the prediction lower by 0.84 rings, while shucked weight pushes the prediction higher by 1.6 rings.
```
a_young_abalone = "6 1:3 2:0.37 3:0.29 4:0.095 5:0.249 6:0.1045 7:0.058 8:0.067"
predict_and_plot(predictor, a_young_abalone)
```
The second example shows feature contributions for another sample, an old abalone. We again see that the primary indicator for the age of abalone according to the model is shell weight, which increases the model prediction by 2.36 rings. Whole weight and shucked weight also contribute significantly, and they both push the model's prediction higher.
```
an_old_abalone = "15 1:1 2:0.655 3:0.53 4:0.175 5:1.2635 6:0.486 7:0.2635 8:0.415"
predict_and_plot(predictor, an_old_abalone)
```
### (Optional) Delete the Endpoint
If you're done with this exercise, please run the `delete_endpoint` line in the cell below. This will remove the hosted endpoint and avoid any charges from a stray instance being left on.
```
predictor.delete_endpoint()
```
| github_jupyter |
## Dictionaries
- [Download the lecture notes](https://philchodrow.github.io/PIC16A/content/basics/dictionaries.ipynb).
Dictionaries (or `dict`s) are iterables, like lists, tuples, and sets. We've given them their own lecture notes because they are exceptionally useful and also somewhat more complicated than other common iterables.
A `dict` is a set of key-value pairs, and is typically used to indicate a relationship between different types of objects. Like sets, `dict`s are enclosed in `{}` curly braces. A `:` colon separates keys from values, and key-value pairs are separated by commas.
For example, here's a `dict` that assigns the commanding officer to each starship:
```
command_dict = {
"Enterprise A" : "Kirk",
"Enterprise D" : "Picard",
"DS9" : "Sisko",
"Voyager" : "Janeway"
}
```
One can "look up" the name of the commander by passing the name of the vessel as a subscript:
```
command_dict["DS9"]
```
The *keys* of a dict should be immutable and distinct.
```
# can't use mutable keys
bad_one = {
[1, 2] : "list"
}
# tuples are immutable, so this is ok
good_one = {
(1, 2) : "tuple"
}
good_one[(1, 2)]
# don't duplicate keys -- keys will be dropped.
bad_two = {
"key1" : "value1",
"key1" : "value2"
}
bad_two
```
In many cases, it's useful to build dictionaries incrementally, one key-value pair at a time:
```
d = {}
d["TNG"] = "pretty good"
d["DS9"] = "the best"
d["ENT"] = "not great"
d
```
## Dictionary Methods
Dictionaries come with a number of useful methods (functions) for retrieving and manipulating their values.
### Getting Data
A common problem when working with `dict`s comes when we try to access a key that doesn't exist yet.
```
d["TOS"]
# ---
for key in ["TNG", "TOS", "DS9", "ENT"]:
print(d[key])
# ---
```
Our entire code fails because we tried to access one nonexistent key. To avoid this, we can use the `get()` method, which allows us to specify a default value to return in case a key is not found.
```
for key in ["TNG", "TOS", "DS9", "ENT"]:
print(d.get(key, "unknown"))
# ---
```
We can also get keys and values from a `dict`. These objects are returned as special `dict_keys` and `dict_values` objects, but they can easily be converted into sets or lists.
```
d.keys()
set(d.keys())
list(d.values())
```
When iterating over key-value pairs, use the `items()` method:
```
for key, val in d.items():
print(key + " is " + val)
# ---
```
### Modifying Dicts
To remove keys from a `dict`, use the `pop()` method. This method returns the value associated to the supplied key, and then removes both from the `dict`.
```
d.pop("ENT")
d
```
To "fuse" two dicts, use `update()`:
```
d.update({"DIS" : "new", "the movies" : "bad"})
d
```
If any keys supplied to `update()` are already present, the old values will be overwritten:
```
d.update({"the movies" : "cringe"})
d
```
| github_jupyter |
# Import statements
```
from google.colab import drive
drive.mount('/content/drive')
from my_ml_lib import MetricTools, PlotTools
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import json
import datetime
import copy
from PIL import Image as im
import joblib
from sklearn.model_selection import train_test_split
# import math as Math
import random
import torch.optim
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torchvision
import cv2
```
# Saving and Loading code
```
# Saving and Loading models using joblib
def save(filename, obj):
with open(filename, 'wb') as handle:
joblib.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(filename):
with open(filename, 'rb') as handle:
return joblib.load(filename)
```
# Importing Dataset
```
data_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/dataset/train.pkl"
x = load(data_path)
# save_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/dataset/"
# # saving the images and labels array
# save(save_path + "data_image.pkl",data_image)
# save(save_path + "data_label.pkl",data_label)
# # dict values where labels key and image arrays as vlaues in form of list
# save(save_path + "my_dict.pkl",my_dict)
save_path = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/dataset/"
# saving the images and labels array
data_image = load(save_path + "data_image.pkl")
data_label = load(save_path + "data_label.pkl")
# dict values where labels key and image arrays as vlaues in form of list
my_dict = load(save_path + "my_dict.pkl")
len(data_image) , len(data_label), my_dict.keys()
```
# Data Class and Data Loaders and Data transforms
```
len(x['names']) ,x['names'][4999] , data_image[0].shape
```
## Splitting the data into train and val
```
X_train, X_test, y_train, y_test = train_test_split(data_image, data_label, test_size=0.10, random_state=42,stratify=data_label )
len(X_train) , len(y_train) , len(X_test) ,len(y_test)
pd.DataFrame(y_test).value_counts()
```
## Data Class
```
class myDataClass(Dataset):
"""Custom dataset class"""
def __init__(self, images, labels , transform=None):
"""
Args:
images : Array of all the images
labels : Correspoing labels of all the images
"""
self.images = images
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
# converts image value between 0 and 1 and returns a tensor C,H,W
img = torchvision.transforms.functional.to_tensor(self.images[idx])
target = self.labels[idx]
if self.transform:
img = self.transform(img)
return img,target
```
## Data Loaders
```
batch = 64
train_dataset = myDataClass(X_train, y_train)
test_dataset = myDataClass(X_test, y_test)
train_dataloader = DataLoader(train_dataset, batch_size= batch, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size= batch, shuffle=True)
# next(iter(train_dataloader))[0].shape
len(train_dataloader) , len(test_dataloader)
```
# Train and Test functions
```
def load_best(all_models,model_test):
FILE = all_models[-1]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model_test.parameters(), lr=0)
checkpoint = torch.load(FILE)
model_test.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optim_state'])
epoch = checkpoint['epoch']
model_test.eval()
return model_test
def train(save_path,epochs,train_dataloader,model,test_dataloader,optimizer,criterion,basic_name):
model_no = 1
c = 1
all_models = []
valid_loss_min = np.Inf
train_losses = []
val_losses = []
for e in range(epochs):
train_loss = 0.0
valid_loss = 0.0
model.train()
for idx, (images,labels) in enumerate(train_dataloader):
images, labels = images.to(device) , labels.to(device)
optimizer.zero_grad()
log_ps= model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
train_loss += ((1 / (idx + 1)) * (loss.data - train_loss))
else:
accuracy = 0
correct = 0
model.eval()
with torch.no_grad():
for idx, (images,labels) in enumerate(test_dataloader):
images, labels = images.to(device) , labels.to(device)
log_ps = model(images)
_, predicted = torch.max(log_ps.data, 1)
loss = criterion(log_ps, labels)
# correct += (predicted == labels).sum().item()
equals = predicted == labels.view(*predicted.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
valid_loss += ((1 / (idx + 1)) * (loss.data - valid_loss))
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
e+1,
train_loss,
valid_loss
), "Test Accuracy: {:.3f}".format(accuracy/len(test_dataloader)))
train_losses.append(train_loss)
val_losses.append(valid_loss)
if valid_loss < valid_loss_min:
print('Saving model..' + str(model_no))
valid_loss_min = valid_loss
checkpoint = {
"epoch": e+1,
"model_state": model.state_dict(),
"optim_state": optimizer.state_dict(),
"train_losses": train_losses,
"test_losses": val_losses,
}
FILE = save_path + basic_name +"_epoch_" + str(e+1) + "_model_" + str(model_no)
all_models.append(FILE)
torch.save(checkpoint, FILE)
model_no = model_no + 1
save(save_path + basic_name + "_all_models.pkl", all_models)
return model, train_losses, val_losses, all_models
def plot(train_losses,val_losses,title='Training Validation Loss with CNN'):
plt.plot(train_losses, label='Training loss')
plt.plot(val_losses, label='Validation loss')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.legend()
_ = plt.ylim()
plt.title(title)
# plt.savefig('plots/Training Validation Loss with CNN from scratch.png')
plt.show()
def test(loader, model, criterion, device, name):
test_loss = 0.
correct = 0.
total = 0.
y = None
y_hat = None
model.eval()
for batch_idx, (images, labels) in enumerate(loader):
# move to GPU or CPU
images, labels = images.to(device) , labels.to(device)
target = labels
# forward pass: compute predicted outputs by passing inputs to the model
output = model(images)
# calculate the loss
loss = criterion(output,labels)
# update average test loss
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
if y is None:
y = target.cpu().numpy()
y_hat = pred.data.cpu().view_as(target).numpy()
else:
y = np.append(y, target.cpu().numpy())
y_hat = np.append(y_hat, pred.data.cpu().view_as(target).numpy())
correct += np.sum(pred.view_as(labels).cpu().numpy() == labels.cpu().numpy())
total = total + images.size(0)
# if batch_idx % 20 == 0:
# print("done till batch" , batch_idx+1)
print(name + ' Loss: {:.6f}\n'.format(test_loss))
print(name + ' Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
return y, y_hat
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# def train(save_path,epochs,train_dataloader,model,test_dataloader,optimizer,criterion,basic_name)
# def plot(train_losses,val_losses,title='Training Validation Loss with CNN')
# def test(loader, model, criterion, device)
```
# Relu [ X=2 Y=3 Z=1 ]
## CNN - Block-1
### model
```
cfg1 = {
'B1': [16,16,'M'],
}
def make_layers1(cfg, batch_norm=True):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=4, stride=3)]
elif v == 'M1':
layers += [nn.MaxPool2d(kernel_size=2, stride=(1,2),padding=(0,1))]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class Model_B1(nn.Module):
def __init__(self, features):
super(Model_B1, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(1600, 512),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(256, 64),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(64, 10),
)
def forward(self, x):
x = self.features(x)
# print(x.shape)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# m = Model_B1(make_layers1(cfg1['B1']))
# for i,l in train_dataloader:
# o = m(i)
model1 = Model_B1(make_layers1(cfg1['B1'])).to(device)
learning_rate = 0.01
criterion1 = nn.CrossEntropyLoss()
optimizer1 = optim.Adam(model1.parameters(), lr=learning_rate)
print(model1)
```
###train
```
# !rm '/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block1/'*
# !ls '/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block1/'
save_path1 = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block1/"
# /content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_2/cnn_block123
# /content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_2/cnn_block12
# /content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_2/cnn_block1
m, train_losses, val_losses,m_all_models = train(save_path1,100,train_dataloader,model1,test_dataloader,optimizer1,criterion1,"cnn_b1")
```
### Plots and test
```
plot(train_losses,val_losses,'Training Validation Loss with CNN-block1')
all_models1 = load(save_path1 + "cnn_b1_all_models.pkl")
FILE = all_models1[-1]
m = Model_B1(make_layers1(cfg1['B1'])).to(device)
m = load_best(all_models1,m)
train_y, train_y_hat = test(train_dataloader, m, criterion1, device, "TRAIN")
cm = MetricTools.confusion_matrix(train_y, train_y_hat, nclasses=10)
PlotTools.confusion_matrix(cm, [i for i in range(10)], title='',
filename='Confusion Matrix with CNN', figsize=(6,6))
test_y, test_y_hat = test(test_dataloader, m, criterion1, device,"TEST")
cm = MetricTools.confusion_matrix(test_y, test_y_hat, nclasses=10)
PlotTools.confusion_matrix(cm, [i for i in range(10)], title='',
filename='Confusion Matrix with CNN', figsize=(6,6))
```
## CNN-Block12
### model
```
cfg2 = {
'B12': [16,16,'M', 32, 32, 32,'M'],
}
def make_layers2(cfg, batch_norm=True):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'M1':
layers += [nn.MaxPool2d(kernel_size=2, stride=1,padding=1)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class Model_B12(nn.Module):
'''
Model
'''
def __init__(self, features):
super(Model_B12, self).__init__()
self.features = features
self.classifier = nn.Sequential(
# nn.Linear(3200, 512),
# nn.ReLU(True),
nn.Linear(512, 256),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(256, 64),
nn.ReLU(True),
nn.Dropout(p=0.5),
nn.Linear(64, 10),
)
def forward(self, x):
x = self.features(x)
# print(x.shape)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# m = Model_B12(make_layers2(cfg2['B12']))
# for i,l in train_dataloader:
# o = m(i)
model2 = Model_B12(make_layers2(cfg2['B12'])).to(device)
learning_rate = 0.01
criterion2 = nn.CrossEntropyLoss()
optimizer2 = optim.Adam(model2.parameters(), lr=learning_rate)
print(model2)
```
### Train
```
# !rm '/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block12/'*
# !ls '/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block12/'
save_path2 = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block12/"
m, train_losses, val_losses,m_all_models = train(save_path2,100,train_dataloader,model2,test_dataloader,optimizer2,criterion2,"cnn_b12")
```
### Test and Plots
```
plot(train_losses,val_losses,'Training Validation Loss with CNN-block1')
all_models2 = load(save_path2 + "cnn_b12_all_models.pkl")
FILE = all_models2[-1]
m2 = Model_B12(make_layers2(cfg2['B12'])).to(device)
m2 = load_best(all_models2,m2)
train_y, train_y_hat = test(train_dataloader, m2, criterion2, device, "TRAIN")
cm = MetricTools.confusion_matrix(train_y, train_y_hat, nclasses=10)
PlotTools.confusion_matrix(cm, [i for i in range(10)], title='',
filename='Confusion Matrix with CNN', figsize=(6,6))
test_y, test_y_hat = test(test_dataloader, m2, criterion2, device,"TEST")
cm = MetricTools.confusion_matrix(test_y, test_y_hat, nclasses=10)
PlotTools.confusion_matrix(cm, [i for i in range(10)], title='',
filename='Confusion Matrix with CNN', figsize=(6,6))
```
## CNN-Block-123
```
```
### model
```
cfg3 = {
'B123': [16,16,'M',32,32,32,'M',64,'M'],
}
def make_layers3(cfg, batch_norm=True):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'M1':
layers += [nn.MaxPool2d(kernel_size=2, stride=1)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class Model_B123(nn.Module):
'''
Model
'''
def __init__(self, features):
super(Model_B123, self).__init__()
self.features = features
self.classifier = nn.Sequential(
# nn.Linear(1024, 512),
# nn.ReLU(True),
# nn.Linear(512, 256),
# nn.ReLU(True),
# nn.Linear(256, 64),
# nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(64, 10),
)
def forward(self, x):
x = self.features(x)
# print(x.shape)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# m = Model_B123(make_layers3(cfg3['B123']))
# for i,l in train_dataloader:
# o = m(i)
model3 = Model_B123(make_layers3(cfg3['B123'])).to(device)
learning_rate = 0.01
criterion3 = nn.CrossEntropyLoss()
optimizer3 = optim.Adam(model3.parameters(), lr=learning_rate)
print(model3)
```
### train
```
# !rm '/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block123/'*
# !ls '/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block123/'
save_path3 = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A3/models_saved_Q1/1_3/bw_fc/Dropout(0.5)/cnn_block123/"
m, train_losses, val_losses,m_all_models = train(save_path3,100,train_dataloader,model3,test_dataloader,optimizer3,criterion3,"cnn_b123")
```
### Tests and Plots
```
plot(train_losses,val_losses,'Training Validation Loss with CNN-block1')
all_models3 = load(save_path3 + "cnn_b123_all_models.pkl")
FILE = all_models3[-1]
m3 = Model_B123(make_layers3(cfg3['B123'])).to(device)
m3 = load_best(all_models3,m3)
train_y, train_y_hat = test(train_dataloader, m3, criterion3, device, "TRAIN")
cm = MetricTools.confusion_matrix(train_y, train_y_hat, nclasses=10)
PlotTools.confusion_matrix(cm, [i for i in range(10)], title='',
filename='Confusion Matrix with CNN', figsize=(6,6))
test_y, test_y_hat = test(test_dataloader, m3, criterion3, device,"TEST")
cm = MetricTools.confusion_matrix(test_y, test_y_hat, nclasses=10)
PlotTools.confusion_matrix(cm, [i for i in range(10)], title='',
filename='Confusion Matrix with CNN', figsize=(6,6))
```
| github_jupyter |
<div style="text-align: center; line-height: 0; padding-top: 2px;">
<img src="https://www.quantiaconsulting.com/logos/quantia_logo_orizz.png" alt="Quantia Consulting" style="width: 600px; height: 250px">
</div>
# Prequential Error - Solution
```
import numpy as np
from sklearn import datasets as skdatasets
from river.stream import iter_sklearn_dataset
from river.naive_bayes import GaussianNB
from river.metrics import Accuracy
iris = skdatasets.load_iris()
```
## The `progressive_val_score` method
The [progressive_val_score](https://riverml.xyz/latest/api/evaluate/progressive-val-score/) method implements the prequential evaluation. It requires three components:
- a data stream
- a model
- a metric
```
from river.evaluate import progressive_val_score
# Setup stream and estimators
stream = iter_sklearn_dataset(iris, shuffle=True, seed=42)
model = GaussianNB()
metric = Accuracy()
# Setup evaluator
progressive_val_score(dataset=stream, model=model, metric=metric, print_every=10)
```
#### The [progressive_val_score](https://riverml.xyz/latest/api/evaluate/progressive-val-score/) method saves all the predictions in memory and uses all of them to compute the metrics
## Sliding Window Prequential Error
---
The [Rolling](https://riverml.xyz/latest/api/metrics/Rolling/) method is a wrapper that allows you to apply a metric over a window of observations. Under the hood, a buffer with the window_size most recent pairs of (y_true, y_pred) is memorised. When the buffer is full, the oldest pair is removed and the revert method of the metric is called with said pair.
```
from river.metrics import Rolling
metric = Rolling(Accuracy(), window_size=50)
model = GaussianNB()
for x, y in iter_sklearn_dataset(iris, shuffle=True, seed=42):
y_p = model.predict_one(x) # Predict class
if y_p is not None:
print(metric.update(y_true=y, y_pred=y_p))
model.learn_one(x, y) # Train the model
```
## Fading Factor Prequential Error
---
It gives more importance to the new predictions and less importance to the oldest ones.
The estimation is updated in this way:
- correct_pred = $\alpha$ * correct_pred + value
- total_seen = $\alpha$ * total_seen + 1.0
with $0 < \alpha \leq 1$. The larger $\alpha$ is, the greater the old predictions importance is.
```
correct_pred = 0.0
total = 0.0
alpha = 0.95
print_every = 10
i = 1
instance = 1
wrong = 0
model = GaussianNB()
for x, y in iter_sklearn_dataset(iris, shuffle=True, seed=42):
y_p = model.predict_one(x) # Predict class
if y_p is not None:
to_add = 0.0
if y == y_p:
to_add = 1.0
correct_pred = (alpha * correct_pred) + to_add
total = (alpha * total) + 1.0
if instance == print_every * i:
if total > 0:
print('[' + str(instance) + '] Accuracy: ' + str(np.round((correct_pred / total * 100),2)) + '%')
else:
print('[' + str(instance) + '] Accuracy: 0.00%')
i += 1
model.learn_one(x, y) # Train the model
instance += 1
```
#####  Quantia Consulting, srl. All rights reserved.
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.