text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
## Example 1: `Hello world`
```
# Print the character string `Hello world!`
print('Hello world!')
```
## Example 2: Make a plot of the US unemployment rate
Download unemployment rate data from FRED (https://fred.stlouisfed.org/series/UNRATE/) and make a well-labeled plot.
```
# Import the pandas library as pd
import pandas as pd
# Import the plotting library matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Download the unemployment rate data (PROVIDED)
unemployment = pd.read_csv('https://fred.stlouisfed.org/data/UNRATE.txt',skiprows=24,sep='\s+',index_col=0,parse_dates = True)
# Print first 5 rows of unemployment variable
unemployment.head()
# Create a well-labeled plot of the US unemployment rate
unemployment.plot(legend=False,lw=3,alpha=0.75)
plt.ylabel('Percent')
plt.xlabel('Date')
plt.title('US Unemployment Rate')
# Save the figure to the current working directory at 120 dots per inch resolution
plt.savefig('unemployment.png',dpi=120)
```
## Example 2: Date and time
Use the `datetime` module to print the current date and time
```
# Import the datetime module
import datetime
# Create a variable called `today` that stores the current date and time
today = datetime.datetime.today()
# Print the value of `today`
print(today)
# Print today's date formatted as: month day, year
print(today.strftime('%B %d, %Y'))
# Print the time component of `today` as: HH:MM:SS
print(today.strftime('%I:%M:%S'))
```
## Example 3: Approximate a geometric series
Consider the infinite geometric series:
\begin{align}
\sum_{k=0}^{\infty} r^k & = r + r + r^2 + r^3 + \cdots
\end{align}
If $|r|<1$, then the series converges to:
\begin{align}
\frac{1}{1+r}
\end{align}
We can verify this fact numerically by computing the *truncated* series, i.e., compute the series to a finite number of terms. We will do this for $r=0.2$. The idea is to create a variable $s$ that is initially equal to zero. Then we let $k$ increment from 0, 1, up to $N$ and each time add $r^k$ to $s$.
```
# Set the value of r
r = 0.2
# Create a variable `N` that stores the desired number of terms to compute
N = 25
# Initialize a variable that will store the value of the summation. Set equal to tzero
s = 0
# Iterate over values of k fron 0 to N. Add the new term to the sum and print the current value of the sum
for k in range(N+1):
s+=r**k
print(s)
```
## Example 4: Draw random values from a normal probability distribution
The `numpy` library has a bunch of powerful tools for working with numbers. Here we'll use `numpy` to draw a random sample from the normal probability distribution. Specifically, 100 values from $\mathcal{N}(2,0.2^2)$.
```
# Import the numpy numerical tools library as np
import numpy as np
# Set the seed or state of the numpy random number generator
np.random.seed(126)
# Create a variable called 'x' that contains 100 draws from the normal(2,0.2^2) didistribution
x = np.random.normal(loc=2,scale=0.2,size=100)
plt.plot(x)
# Print the mean of the values in the variable 'x'
print(np.mean(x))
# Print the standard deviation of the values in the variable 'x'
print(np.std(x))
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import datetime
import warnings
warnings.filterwarnings('ignore')
coronavirus = pd.read_csv('patient2.csv')
coronavirus.head()
coronavirus.shape
#coronavirus['death'] = coronavirus['death'].astype('Int64')
#coronavirus['age'] = coronavirus['age'].astype('Int64')
coronavirus = coronavirus[['gender', 'age', 'death', 'symptom']]
coronavirus = coronavirus[coronavirus['gender'].notna()]
coronavirus['symptom'] = coronavirus['symptom'].fillna(0)
coronavirus = coronavirus[coronavirus['age'].notna()]
coronavirus['gender'].replace(['male', 'female'], [0, 1], inplace=True)
coronavirus.head()
symptom = coronavirus['symptom']
coronavirus['symptom'] = symptom[symptom != 0] = 1
symptom.tail()
coronavirus.tail()
coronavirus['death'].replace(['2/21/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/21/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/19/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/19/2020'], [1], inplace=True)
coronavirus['death'].replace(['02/01/20'], [1], inplace=True)
coronavirus['death'].replace(['2/27/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/25/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/22/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/24/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/23/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/26/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/23/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/23/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/23/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/25/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/27/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/26/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/28/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/13/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/26/2020'], [1], inplace=True)
coronavirus['death'].replace(['2/14/2020'], [1], inplace=True)
#Besoin de (sex), (birth_year), (disease)
from sklearn.neighbors import KNeighborsClassifier
y = coronavirus['death']
y=y.astype('int')
X = coronavirus.drop('death', axis=1)
X=X.astype('int')
coronavirus.shape
model = KNeighborsClassifier()
model.fit(X, y)
model.score(X, y)
def survie(model):
gender = input('What is your sex?: ')
age = input('What is your birth year?: ')
symptom = input('Do you have any symptom?: ')
x = np.array([gender, age, symptom]).reshape(1, 3)
test = model.predict_proba(x).T
print(model.predict(x))
print(model.predict_proba(x))
print(model.predict_proba(x).shape)
print(test[0])
print(test[1])
survie(model)
full_table = pd.read_csv('covid_19_clean_complete.csv', parse_dates=['Date'])
from sklearn.decomposition import PCA
model = PCA(n_components=3)
X_reduced = model.fit_transform(X)
plt.scatter(X_reduced[:,0], X_reduced[:,1], c=y)
plt.colorbar()
```
| github_jupyter |
## Libraries
```
#Import All Dependencies
# import cv2, os, bz2, json, csv, difflib, requests, socket, whois, urllib.request, urllib.parse, urllib.error, re, OpenSSL, ssl
import numpy as np
from datetime import datetime
from urllib.parse import urlparse
from urllib.request import Request, urlopen
# from selenium import webdriver
from matplotlib import pyplot as plt
from bs4 import BeautifulSoup
# from timeout import timeout
import requests
import numpy as np
import urllib
import cv2
import re
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.options import Options
from PIL import Image
from io import BytesIO
import time
import os
import os.path
from os import path
import io
from difflib import SequenceMatcher
import json
import contextlib
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import sys
```
### TinyURL
```
#Taken from https://www.geeksforgeeks.org/python-url-shortener-using-tinyurl-api/
#Returns the url subtracting the domain name with www and com stuff
#So, http://tinyurl.com/y5bffkh2 ---becomes---> y5bffkh2
def getTinyURL(URL):
request_url = ('http://tinyurl.com/api-create.php?' + urlencode({'url':URL}))
with contextlib.closing(urlopen(request_url)) as response:
return response.read().decode('utf-8 ')[19:]
#Returns Beautiful Soup object
def getHTML(URL):
try:
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36'} #Make the user agent verified, that is Mozilla
# req = Request(URL,headers=hdr)
req = requests.get(URL, headers=hdr)
page = req.text #Get URL HTML contents
soup = BeautifulSoup(page, 'html.parser') #Convert to BeutifulSoup
print("Built Soup")
#prettyText = str(soup.prettify()) #Convert the HTML in its form
return soup
except Exception as e:
# # if e.__class__.__name__ == "TimeoutError": raise TimeoutError("")
return None
```
### XPath
```
from UnitTestingOnXPath import getMyStackXPath
```
### Similarity
```
highlySimilarValue = 0.40 #Denotes high chances. If occured, remove the less elements
moderatelySimilarValue = 0.15 #Denotes moderate chances. If occured, remove the less elements
bareMinimumSimilarValue = 0.05 #Denotes elements which have barely any similarity, so discard the elements below them
def similar(a, b):
a = a.strip()
b = b.strip()
return SequenceMatcher(None, a, b).ratio()
```
### Element Extractor
1. Try different varaitions of price text.
2. Check if the size of the element is more than 1x1 //Walmart Page cause problem using hidden element
3.
```
#Returns the elements for sepcified regex which will match the tag content/string
def getTheElementListForSpecificRegexContent(regexContent, soup):
returnList= []
try:
# print("getTheElementListForSpecificRegexContent() soup(text=re.compile(regexContent)):",soup(text=re.compile(regexContent)))
for counter in soup(text=re.compile(regexContent)):
# print("getTheElementListForSpecificRegexContent() counter.parent:",counter.parent)
returnList.append(counter.parent)
return returnList
except Exception as e:
print(e)
return returnList
#Find scores for set of element
def defineScoreForElementList(elementList, comparatorValue, highProbailityTags, elementType):
scoreList = []
for element in elementList:
similarityCounterValue = similar(comparatorValue, element.string) #Find similarity
#Add weighted factor if the tag is found
for tag in highProbailityTags:
if tag in element.name:
similarityCounterValue = similarityCounterValue + weightFactorForTheTag
#Add weighted factor if the elementType is found
if elementType in json.dumps(element.attrs):
similarityCounterValue = similarityCounterValue + weightFactorForTheTag
scoreList.append(similarityCounterValue)
print("defineScoreForElementList() len(scoreList):", len(scoreList))
print("defineScoreForElementList() len(elementList):", len(elementList))
return scoreList
"""
Find top elements which maps to the comparator:
typeOfComparators is set of values which a element's string/content should contain
highProbailityTags are tags which will likely have those typeOfComparators values so give them more weight
elementType is useful because the attribute of the tag usually contain such value
"""
def brainOfElementExtractor(typeOfComparators, soup, highProbailityTags, elementType):
elementList = []
for comparatorValue in typeOfComparators:
print("brainOfElementExtractor() comparatorValue",comparatorValue)
elementList = elementList + getTheElementListForSpecificRegexContent(comparatorValue, soup)
print("brainOfElementExtractor() len(elementList):", len(elementList))
# print(elementList) #For Debugging
# print(defineScoreForElementList(elementList, comparatorValue, highProbailityTags, elementType)) #For Debugging
#Remove Duplicate Elements from list
elementListCurated = []
for i in elementList:
if i not in elementListCurated:
elementListCurated.append(i)
scoreList = defineScoreForElementList(elementListCurated, comparatorValue, highProbailityTags, elementType)
print("brainOfElementExtractor() len(scoreList):", len(scoreList))
#Check if list is empty
if len(scoreList) == 0:
return None
returnListOfElements = []
returnListOfScores = []
if max(scoreList)>highlySimilarValue: #Check if even one value has 'High' similarity score. If yes, then only send those elements
for i in range(0, len(scoreList)):
if scoreList[i]>highlySimilarValue:
returnListOfElements.append(elementListCurated[i])
returnListOfScores.append(scoreList[i])
elif max(scoreList)>moderatleySimilarValue: #Check if it passes 'Moderately'
for i in range(0, len(scoreList)):
if scoreList[i]>moderatelySimilarValue:
returnListOfElements.append(elementListCurated[i])
returnListOfScores.append(scoreList[i])
else:
for i in range(0, len(scoreList)): #Check if it passes bare minimum
if scoreList[i]>bareMinimumSimilarValue:
returnListOfElements.append(elementListCurated[i])
returnListOfScores.append(scoreList[i])
return returnListOfElements, returnListOfScores
```
##### Price
```
# Price is supposed to be defined in the following tags, so give higher weight
priceHighProbailityTags = ["h", "span"]
weightFactorForTheTag = 0.3
#Try different varaitions of Price Text
#Order: \$379.00, 379.00, \$379, 379
def findPriceElementTagList(gShopPrice, soup): #gShopPrice = $379.00
typeOfPrices = []
gShopPrice = gShopPrice.replace("now","").strip() #GShop sometimes gives prices with now suffix like "$0.00 now"
if "$" in gShopPrice:
typeOfPrices.append(gShopPrice.replace("$", "\$")) #gShopPriceUpdated = \$379.00; Required because $ is reserved keyword for regex
typeOfPrices.append(gShopPrice.replace("$","")) #gShopPriceUpdated = 379.00
typeOfPrices.append(gShopPrice.replace("$", "\$").split(".")[0]) #gShopPriceUpdated = \$379
typeOfPrices.append(gShopPrice.replace("$","").split(".")[0]) #gShopPriceUpdated = 379
return typeOfPrices
```
##### Heading
```
# Heading is supposed to be defined in the following tags, so give higher weight
headingHighProbailityTags = ["h", "span"]
weightFactorForTheTag = 0.3
#Try different varaitions of Heading Text
def findHeadingElementTagList(gShopHeading, soup): #gShopPrice = $379.00
typeOfHeadings = []
gShopHeading = gShopHeading.strip()
noOfSpaces = gShopHeading.count(' ')
for i in range(0,noOfSpaces):
return typeOfPrices
```
### Testing
```
URL = "https://www.walmart.com/ip/Farberware-3-2-Quart-Digital-Oil-Less-Fryer-White/264698854?athcpid=264698854&athpgid=athenaHomepage&athcgid=null&athznid=BestInDeals&athieid=v1&athstid=CS020&athguid=466001f5-46cfa622-5eb821569a18a716&athancid=null&athena=true"
soup = getHTML(URL)
```
##### Price
```
gShopPrice = "$39.00"
typeOfPrices = findPriceElementTagList(gShopPrice, soup)
brainOfElementExtractor(typeOfPrices, soup, priceHighProbailityTags, "price")
```
##### Heading
```
gShopHeading = "White"
typeOfHeadings = [gShopHeading]
brainOfElementExtractor(typeOfHeadings, soup, headingHighProbailityTags, "heading")
def getTheTagElementListForPrice(gShopPriceUpdated, soup):
returnPriceElementTag = None
try:
dummyVar = soup(text=re.compile(gShopPriceUpdated))
# print(dummyVar)
for elem in dummyVar:
#find similarity
# if similar(gShopPrice, headingTag.string)
# print("elem: ",elem)
# print("elem.parent: ",elem.parent)
# print("elem.parent.name: ",elem.parent.name)
if returnPriceElementTag == None: #The first element is the return value unless we encounter a heading tag
returnPriceElementTag = elem.parent
if "h" in elem.parent.name: #Found the heading tag, so return this tag and break the loop
returnPriceElementTag = elem.parent
return returnPriceElementTag
if "span" in elem.parent.name: #Found the span tag, so return this tag and break the loop
print("elem: ",elem)
print("elem.parent: ",elem.parent)
print("elem.parent.name: ",elem.parent.name)
print(str(elem.parent))
print("" + json.dumps(elem.parent.attrs))
returnPriceElementTag = elem.parent
return returnPriceElementTag
except Exception as e:
print("Error in getTheTagElementForPrice(gShopPriceUpdated, soup)")
return returnPriceElementTag
#Try different varaitions of Price Text
#Order: \$379.00, 379.00, \$379, 379
def findPriceElementTag(gShopPrice, soup): #gShopPrice = $379.00
gShopPrice = gShopPrice.replace("now","").strip() #GShop soemtimes gives prices with now suffix like "$0.00 now"
print("Hi",gShopPrice)
if "$" in gShopPrice:
gShopPriceUpdated = gShopPrice.replace("$", "\$") #gShopPriceUpdated = \$379.00; Required because $ is reserved keyword for regex
print(gShopPriceUpdated)
returnPriceElementTag = getTheTagElementListForPrice(gShopPriceUpdated, soup)
if returnPriceElementTag != None and len(str(returnPriceElementTag))<400:
return returnPriceElementTag
gShopPriceUpdated = gShopPrice.replace("$","") #gShopPriceUpdated = 379.00
print(gShopPriceUpdated)
returnPriceElementTag = getTheTagElementListForPrice(gShopPriceUpdated, soup)
if returnPriceElementTag != None and len(str(returnPriceElementTag))<400:
return returnPriceElementTag
gShopPriceUpdated = gShopPrice.replace("$", "\$").split(".")[0] #gShopPriceUpdated = \$379
print(gShopPriceUpdated)
returnPriceElementTag = getTheTagElementListForPrice(gShopPriceUpdated, soup)
if returnPriceElementTag != None and len(str(returnPriceElementTag))<400:
return returnPriceElementTag
gShopPriceUpdated = gShopPrice.replace("$","").split(".")[0] #gShopPriceUpdated = 379
print(gShopPriceUpdated)
returnPriceElementTag = getTheTagElementListForPrice(gShopPriceUpdated, soup)
if returnPriceElementTag != None and len(str(returnPriceElementTag))<400:
return returnPriceElementTag
return None
returnPriceElementTag = findPriceElementTag(priceOfCurrentScreenshot, soup)
getMyStackXPath(returnPriceElementTag)
```
| github_jupyter |
___
<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
___
# Python Crash Course Exercises
This is an optional exercise to test your understanding of Python Basics. If you find this extremely challenging, then you probably are not ready for the rest of this course yet and don't have enough programming experience to continue. I would suggest you take another course more geared towards complete beginners, such as [Complete Python Bootcamp](https://www.udemy.com/complete-python-bootcamp/?couponCode=PY20)
## Exercises
Answer the questions or complete the tasks outlined in bold below, use the specific method described if applicable.
** What is 7 to the power of 4?**
```
7**4
```
** Split this string:**
s = "Hi there Sam!"
**into a list. **
```
s = "Hi there Sam!"
s = s.split(' ')
print(s)
```
** Given the variables:**
planet = "Earth"
diameter = 12742
** Use .format() to print the following string: **
The diameter of Earth is 12742 kilometers.
```
planet = "Earth"
diameter = 12742
print("The diameter of {} is {} kilometers.".format(planet,diameter))
```
** Given this nested list, use indexing to grab the word "hello" **
```
lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]
print(lst[3][1][2][0])
```
** Given this nested dictionary grab the word "hello". Be prepared, this will be annoying/tricky **
```
d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]}
print(d['k1'][3]['tricky'][3]['target'][3])
```
** What is the main difference between a tuple and a list? **
```
# Tuple is immutable (cant change)
# List is mutable (can change)
```
** Create a function that grabs the email website domain from a string in the form: **
user@domain.com
**So for example, passing "user@domain.com" would return: domain.com**
```
def domainGet(email):
return email.split('@')[1]
domainGet('user@domain.com')
```
** Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization. **
```
def findDog(s):
return 'dog' in s.lower().split()
findDog('Is there a dog here?')
```
** Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases. **
```
def countDog(st):
count = 0
for word in st.lower().split():
if word == 'dog':
count += 1
return count
countDog('This dog runs faster than the other dog dude!')
```
** Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example:**
seq = ['soup','dog','salad','cat','great']
**should be filtered down to:**
['soup','salad']
```
seq = ['soup','dog','salad','cat','great']
list(filter(lambda word: word[0] == 's', seq))
```
### Final Problem
**You are driving a little too fast, and a police officer stops you. Write a function
to return one of 3 possible results: "No ticket", "Small ticket", or "Big Ticket".
If your speed is 60 or less, the result is "No Ticket". If speed is between 61
and 80 inclusive, the result is "Small Ticket". If speed is 81 or more, the result is "Big Ticket". Unless it is your birthday (encoded as a boolean value in the parameters of the function) -- on your birthday, your speed can be 5 higher in all
cases. **
```
def caught_speeding(speed, is_birthday):
if is_birthday:
speeding = speed - 5
else:
speeding = speed
if speeding > 80:
return 'Big Ticket'
elif speeding > 60:
return 'Small Ticket'
else:
return 'No Ticket'
caught_speeding(81,True)
caught_speeding(81,False)
```
# Great job!
| github_jupyter |
##内容检索
1. 简单绘图 --- plot函数、title函数、subplot函数
2. 绘制百度的全年股票价格 --- figure函数、add_subplot函数、一些设置x轴刻度和标签的函数
3. 绘制直方图 --- hist函数
4. 对数坐标图 --- semilogx函数等
5. 散点图 --- scatter函数
6. 着色 --- fill_between函数
7. 图例和注释 --- legend函数、annotate函数
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
##1. 简单绘图
matplotlib.pyplot包中包含了简单绘图功能。调用的相关绘图函数都会改变当前的绘图,知道我们将绘图存入文件或使用show函数显示出来。
###1.1 绘制多项式函数
```
func = np.poly1d(np.array([1, 2, 3, 4]).astype(float))
func2 = func.deriv(m=2)
x = np.linspace(-10, 10, 30)
y = func(x)
y2 = func2(x)
```
其中,linspace函数常见x轴的数值,在-10和10之间产生30个均匀分布的值。
```
plt.plot(x, y)
plt.plot(x, y2, 'r>')
plt.xlabel('x')
plt.ylabel('y(x)')
plt.show()
```
plot函数可以接受任意个数的参数,我们可以使用可选的**格式字符串参数指定线条的颜色和风格**,默认为`'b-'`即蓝色视线。你可以指定其他风格。
[plot文档](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot)
###1.2 绘制多项式函数及其导函数
```
func1 = func.deriv(m=1)
y1 = func1(x)
plt.plot(x, y, 'ro', x, y1, 'g--')
plt.show()
```
###1.3 分组绘制多幅图
```
plt.subplot(311)
plt.plot(x, y, 'r-')
plt.title("Polynomial")
plt.subplot(312)
plt.plot(x, y1, 'b^')
plt.title("First Derivative")
plt.subplot(313)
plt.plot(x, y2, 'go')
plt.title("Second Derivative")
plt.show()
```
## 2. 绘制百度的全年股票价格
Matplotlib可以帮助我们监控股票投资。使用matplotlib.finance包中的函数可以从雅虎财经频道下载股价数据,并绘制K线图(candlestick)。
股市及期货市场中的K线图的画法包含四个数据,即开盘价、最高价、最低价、收盘价,所有的k线都是围绕这四个数据展开,反映大势的状况和价格信息。
```
# 将当前的日期减去1年作为起始日期
from matplotlib.dates import DateFormatter
from matplotlib.dates import DayLocator
from matplotlib.dates import MonthLocator
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.finance import candlestick_ochl
import sys
from datetime import date
today = date.today()
start = (today.year - 1, today.month, today.day)
```
创建定位器(locator),使用来自matplotlib.dates的对象在x轴定位月份和日期
```
alldays = DayLocator()
months = MonthLocator()
```
创建日期格式化器(date formatter),以格式化x轴的日期。该格式化器创建一个字符串,包含简写的月份和年份。
```
month_formatter = DateFormatter("%b %Y")
# 从财经频道下载股价数据
symbol = 'BIDU' # 百度的股票代码
quotes = quotes_historical_yahoo_ochl(symbol, start, today)
# 创建figure对象,这是绘图组件的顶层容器
fig = plt.figure()
# 增加一个子图
ax = fig.add_subplot(111)
# x轴上的主定位器设置为月定位器,该定位器负责x轴上较粗的刻度
ax.xaxis.set_major_locator(months)
# x轴上的次定位器设置为日定位器,该定位器负责x轴上较细的刻度
ax.xaxis.set_minor_locator(alldays)
# x轴上的主格式化器设置为月格式化器,该格式化器负责x轴上较粗刻度的标签
ax.xaxis.set_major_formatter(month_formatter)
# 使用matplotlib.finance包的candlestick函数绘制k线图
candlestick_ochl(ax, quotes)
# 将x轴上的标签格式化为日期
fig.autofmt_xdate()
plt.title('Baidu, Inc. (BIDU)')
plt.show()
```
##3. 绘制直方图
直方图(histogram)可以将数据的分布可视化。Matplotlib中有便捷的hist函数可以绘制直方图。
```
# 使用百度一年的股票价格数据来绘制分布直方图
# 将股价数据从python列表转化为numpy数组并提取收盘价数据
quotes = np.array(quotes)
cp = quotes.T[4]
#指定合理数量的柱形,绘制分布直方图
plt.hist(cp, np.sqrt(len(cp)))
plt.show()
```
##4. 对数坐标图
当数据的变化范围很大时,对数坐标图很有用。Matplotlib中又semilogx函数(对x轴取对数)、semilogy(对y轴取对数)和loglog函数(同时对x轴y轴取对数)。
```
# 下载百度的历史数据,从中提取出日期和成交量数据
dates = quotes.T[0]
volume = quotes.T[5]
fig = plt.figure()
ax = fig.add_subplot(111)
plt.semilogy(dates, volume)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(month_formatter)
fig.autofmt_xdate()
plt.show()
```
## 5. 散点图
散点图(scatter plot)用于绘制同一数据集中的两种数值变量。Matplotlib的scatter函数可以创建散点图。
*我们可以指定数据点的颜色和大小,以及图像的alpha透明度。*[scatter文档](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter)
接下来绘制股票收益率和成交量的散点图。
```
# 计算股票收益率和成交量的变化值
ret = np.diff(cp) / cp[:-1]
volchange = np.diff(volume) / volume[:-1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(ret, volchange, c=ret*100, s=volchange*100, alpha=0.5)
#c is color,s is size
#alpha is between 0 (transparent) and 1 (opaque)
ax.set_title('Closing price and volume returns')
ax.grid(True)
plt.show()
```
##6. 着色
fill_between函数使用指定的颜色填充图像中的区域。我们也可以选择alpha通道的取值。该函数的where参数可以指定着色的条件。[fiil_between文档](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.fill_between)
下面我们要对股票曲线图进行着色,并将低于均值和高于均值的收盘价填充为不同的颜色。
```
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(dates, cp)
# 对收盘价下方的区域进行着色,依据低于或高于平均收盘价使用不同的颜色填充
# fill_between(x, y1, y2=0, where=None, **kwargs)
# filling the regions between y1 and y2 where where==True
plt.fill_between(dates, cp.min(), cp, where=cp>cp.mean(), facecolor="green", alpha=0.4)
plt.fill_between(dates, cp.min(), cp, where=cp<cp.mean(), facecolor="red", alpha=0.6)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(month_formatter)
plt.grid(True)
fig.autofmt_xdate()
plt.show()
```
##7. 图例和注释
对于高质量的绘图,图例和注释是至关重要的。我们可以用legend函数创建透明的图例,并有Matplotlib自动确定其摆放位置。同时,我们可以用annotate函数在图像上精确地添加注释,并有很多可选的注释和箭头风格。
[legend文档](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend)
[annotate文档](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate)
```
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
ax.plot([3, 2, 2])
ax.plot([2, 2, 1])
ax.legend(('line1','line2','line3'),loc='best')
```
我们就上面的收盘价例子添加图例
```
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot(dates, cp)
plt.fill_between(dates, cp.min(), cp, where=cp>cp.mean(), facecolor="green", alpha=0.4)
plt.fill_between(dates, cp.min(), cp, where=cp<cp.mean(), facecolor="red", alpha=0.6)
line.set_label("Closing Price")
leg = ax.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(month_formatter)
plt.grid(True)
fig.autofmt_xdate()
plt.show()
```
**注释示例**
**s** : string
标注
**xy** : (x, y)
添加注释的位置点
**xytext** : (x, y) , optional, default: None
注释的位置
**xycoords** : string, optional, default: “data”
string that indicates what type of coordinates xy is. Examples: “figure points”, “figure pixels”, “figure fraction”, “axes points”, .... See matplotlib.text.Annotation for more details.
**textcoords** : string, optional
string that indicates what type of coordinates text is. Examples: “figure points”, “figure pixels”, “figure fraction”, “axes points”, .... See matplotlib.text.Annotation for more details. Default is None.
**arrowprops** : matplotlib.lines.Line2D properties, optional
Dictionary of line properties for the arrow that connects the annotation to the point. If the dictionnary has a key arrowstyle, a FancyArrowPatch instance is created and drawn. See matplotlib.text.Annotation for more details on valid options. Default is None.
```
fig = plt.figure(1,figsize=(8,5))
ax = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate(s='arrowstyle', xy=(0, 1), xycoords='data',
xytext=(0, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->")
)
ax.annotate('arc3', xy=(0.5, -1), xycoords='data',
xytext=(-30, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3,rad=.2")
)
ax.annotate('arc', xy=(1., 1), xycoords='data',
xytext=(-40, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=30,rad=10"),
)
ax.annotate('arc', xy=(1.5, -1), xycoords='data',
xytext=(-40, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=20,angleB=-90,armB=15,rad=7"),
)
ax.annotate('angle', xy=(2., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle3', xy=(2.5, -1), xycoords='data',
xytext=(-50, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3,angleA=0,angleB=-90"),
)
plt.show()
```
| github_jupyter |
```
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
!ls "/content/drive/My Drive/SkinDataset"
!cp "/content/drive/My Drive/SkinDataset/train_skin.zip" "train_skin.zip"
!cp "test.zip" "/content/drive/My Drive/SkinDataset/test.zip"
!unzip train_skin.zip
!unzip test.zip
import os
len(os.listdir('test'))
classes = [str(i) for i in range(23)]
classes_dict = {}
for i, label in enumerate(sorted(os.listdir('train'))):
classes_dict[i] = label
classes_dict
!pwd
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, ModelCheckpoint
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import resnet
%matplotlib inline
classes = ['Acne and Rosacea Photos',
'Actinic Keratosis Basal Cell Carcinoma and other Malignant Lesions',
'Eczema Photos',
'Melanoma Skin Cancer Nevi and Moles',
'Psoriasis pictures Lichen Planus and related diseases',
'Tinea Ringworm Candidiasis and other Fungal Infections',
'Urticaria Hives',
'Nail Fungus and other Nail Disease']
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
model_chkpt = ModelCheckpoint('resnet_50epochs.h5',
monitor='val_loss', verbose=1,
save_best_only=True)
batch_size = 32
nb_classes = 8
nb_epoch = 50
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR10 images are RGB.
img_channels = 3
train_path='/content/train'
test_path='/content/test'
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_data=train_datagen.flow_from_directory(train_path,classes=classes,target_size=(32,32),batch_size=62)
test_data=test_datagen.flow_from_directory(test_path,classes=classes,target_size=(32,32),batch_size=62)
model = resnet.ResnetBuilder.build_resnet_18((img_channels, img_rows, img_cols), nb_classes)
# model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history=model.fit_generator(train_data,steps_per_epoch=124,validation_data=test_data,validation_steps=33,epochs=50,verbose=1, callbacks=[lr_reducer, model_chkpt])
model.save('final_resnet50epochs.h5')
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
from keras.models import load_model
model = load_model('final_resnet50epochs.h5', compile=True)
history=model.fit_generator(train_data,steps_per_epoch=124,validation_data=test_data,validation_steps=33,epochs=20,verbose=1, callbacks=[lr_reducer, model_chkpt])
model.save('final_resnet70epochs.h5')
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
| github_jupyter |
```
from utils import GensimModels
from nltk.stem import WordNetLemmatizer
import Config
import numpy as np
import random
import csv
train = Config.path_culture
feat_dim = 10
gensimLoader = GensimModels.GensimModels()
model_loaded = gensimLoader.load_word2vec(path=Config.path_embeddings_ingredients)
cult2id = {}
id2cult = []
comp2id = {'Nan':0}
id2comp = ['Nan']
comp2cnt = {'Nan':0}
train_cult = []
train_comp = []
train_comp_len = []
comp_thr = 5
max_comp_cnt = 0
filtred_comp = 0
train_f = open(train, 'r')
lines = train_f.readlines()[4:]
random.shuffle(lines)
train_thr = int(len(lines) * 0.8)
print "Build composer dictionary..."
for i, line in enumerate(lines):
tokens = line.strip().split(',')
culture = tokens[0]
composers = tokens[1:]
if cult2id.get(culture) is None:
cult2id[culture] = len(cult2id)
id2cult.append(culture)
if comp_thr > len(composers):
filtred_comp += 1
continue
#if max_comp_cnt < len(composers):
# max_comp_cnt = len(composers)
for composer in composers:
if comp2id.get(composer) is None:
comp2id[composer] = len(comp2id)
id2comp.append(composer)
comp2cnt[composer] = 0.
comp2cnt[composer] += 1
train_cult.append(cult2id.get(culture))
train_comp.append([comp2id.get(composer) for composer in composers])
for comp in train_comp:
train_comp_len.append(len(comp))
if len(comp) < max_comp_cnt:
comp += [0]*(max_comp_cnt - len(comp))
f = open('ingr_engine.csv', 'wb')
wr = csv.writer(f)
wnl = WordNetLemmatizer()
wv = model_loaded.wv
w = model_loaded.index2word
#print [model_loaded[idx] for idx in w]
for i, idx in enumerate(w):
if idx not in id2comp:
wr.writerow([i, idx, False])
else:
wr.writerow([i, idx, True])
total_comp = 0.
for cnt in comp2cnt.values():
total_comp += cnt
print total_comp
mu, sigma = 0, 1
compid2vec = []
unk_cnt = 0
add_cnt = 0
call_cnt = 0.
for idx, comp in enumerate(id2comp):
if comp in wv:
wr.writerow([idx, comp, comp2cnt[comp]/total_comp*100, True])
compid2vec.append(model_loaded[comp])
call_cnt += comp2cnt[comp]
elif wnl.lemmatize(comp) in wv:
wr.writerow([idx, comp, comp2cnt[comp]/total_comp*100, 'Modified'])
compid2vec.append(model_loaded[wnl.lemmatize(comp)])
call_cnt += comp2cnt[comp]
add_cnt += 1
elif comp.rstrip().split('_')[-1] in wv:
wr.writerow([idx, comp, comp2cnt[comp]/total_comp*100, 'Modified'])
compid2vec.append(model_loaded[comp.rstrip().split('_')[-1]])
call_cnt += comp2cnt[comp]
add_cnt += 1
elif wnl.lemmatize(comp.rstrip().split('_')[-1]) in wv:
wr.writerow([idx, comp, comp2cnt[comp]/total_comp*100, 'Modified'])
compid2vec.append(model_loaded[wnl.lemmatize(comp.rstrip().split('_')[-1])])
call_cnt += comp2cnt[comp]
add_cnt += 1
else:
wr.writerow([idx, comp, comp2cnt[comp]/total_comp*100, False])
compid2vec.append(np.random.normal(mu, sigma, feat_dim))
unk_cnt += 1
f.close()
print "added cnt :", add_cnt
print "unk cnt :", unk_cnt, "in", len(id2comp)
print "call cnt :", call_cnt, "in", total_comp
print "filtered composer count is", filtred_comp
```
| github_jupyter |
# Chapter 5 - Commmunity Detection
In this notebook, we explore several algorithms to find communities in graphs.
In some cells, we use the ABCD benchmark to generate synthetic graphs with communities.
ABCD is written in Julia.
### Installing Julia and ABCD
We use the command line interface option to run ABCD below.
The following steps are required:
* install Julia (we used version 1.4.2) from https://julialang.org/downloads/
* download ABCD from https://github.com/bkamins/ABCDGraphGenerator.jl
* adjust the 'abcd_path' in the next cell to the location of the 'utils' subdirectory of ABCD
* run 'julia abcd_path/install.jl' to install the required packages
Also set the path(s) in the cell below. For Windows, you may need to use "\\" or "\\\\" as delimiters, for example 'C:\ABCD\utils\\\\'
### Directories
* Set the directories accordingly in the next cell
```
## set those accordingly
datadir = '../Datasets/'
abcd_path = '~/ABCD/utils/'
import igraph as ig
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
from collections import Counter
import os
import umap
import pickle
import partition_igraph
import subprocess
from sklearn.metrics import adjusted_mutual_info_score as AMI
## we used those for the book, but you can change to other colors
cls_edges = 'gainsboro'
cls = ['silver','dimgray','black']
```
# Zachary (karate) graph
A small graph with 34 nodes and two "ground-truth" communities.
Modularity-based algorithms will typically find 4 or 5 communities.
In the next cells, we look at this small graph from several different angles.
```
z = ig.Graph.Famous('zachary')
z.vs['size'] = 12
z.vs['name'] = [str(i) for i in range(z.vcount())]
z.vs['label'] = [str(i) for i in range(z.vcount())]
z.vs['label_size'] = 8
z.es['color'] = cls_edges
z.vs['comm'] = [0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1]
z.vs['color'] = [cls[i] for i in z.vs['comm']]
#ig.plot(z, 'zachary_gt.eps', bbox=(0,0,300,200))
ig.plot(z, bbox=(0,0,350,250))
```
## Node Roles
We compute z(v) (normalized within module degree) and p(v) (participation coefficients) as defined in section 5.2 of the book for the Zachary graph. We identify 3 types of nodes, as described in the book.
* provincial hubs
* peripheral nodes (non-hubs)
* ultra peripheral nodes (non-hubs)
```
## compute internal degrees
in_deg_0 = z.subgraph_edges([e for e in z.es if z.vs['comm'][e.tuple[0]]==0 and z.vs['comm'][e.tuple[1]]==0],
delete_vertices=False).degree()
in_deg_1 = z.subgraph_edges([e for e in z.es if z.vs['comm'][e.tuple[0]]==1 and z.vs['comm'][e.tuple[1]]==1],
delete_vertices=False).degree()
## compute z (normalized within-module degree)
z.vs['in_deg'] = [in_deg_0[i] + in_deg_1[i] for i in range(z.vcount())]
mu = [np.mean([x for x in in_deg_0 if x>0]),np.mean([x for x in in_deg_1 if x>0])]
sig = [np.std([x for x in in_deg_0 if x>0],ddof=1),np.std([x for x in in_deg_1 if x>0],ddof=1)]
z.vs['z'] = [(v['in_deg']-mu[v['comm']])/sig[v['comm']] for v in z.vs]
## computing p (participation coefficient)
z.vs['deg'] = z.degree()
z.vs['out_deg'] = [v['deg'] - v['in_deg'] for v in z.vs]
z.vs['p'] = [1-(v['in_deg']/v['deg'])**2-(v['out_deg']/v['deg'])**2 for v in z.vs]
D = pd.DataFrame(np.array([z.vs['z'],z.vs['p']]).transpose(),columns=['z','p']).sort_values(by='z',ascending=False)
D.head()
```
Below, we plot the Zachary graph w.r.t. z where z>2.5 are hubs, which we show as square nodes.
The largest values are for node 0 (instructor), node 33 (president) and node 32.
Nodes 0 and 33 are the key nodes for the division of the group into two factions.
```
## Zachary graph w.r.t. roles
z.vs['color'] = 'black'
z.vs['shape'] = 'circle'
for v in z.vs:
if v['z']<2.5: ## non-hub
if v['p'] < .62 and v['p'] >= .05: ## peripheral
v['color'] = 'dimgrey'
if v['p'] < .05: ## ultra-peripheral
v['color'] = 'gainsboro'
if v['z']>=2.5 and v['p'] < .3: ## hubs (all provincial here)
v['color'] = 'silver'
v['shape'] = 'square'
#ig.plot(z, 'zachary_roles_1.eps', bbox=(0,0,350,250))
ig.plot(z, bbox=(0,0,350,250))
```
Code below is to generate Figure 5.3(b) in the book, again comparing node roles in the Zachary graph.
```
## Figure 5.3(b) -- comparing the roles
fig, ax = plt.subplots(figsize=(12,9))
ax.scatter(z.vs['p'],z.vs['z'],marker='o',s=75, color='k')
plt.plot([0, .5], [2.5, 2.5], color='k', linestyle='-', linewidth=2)
plt.plot([.05, .05], [-.5, 2.4], color='k', linestyle='-', linewidth=2)
ax.annotate('node 0', (z.vs['p'][0],z.vs['z'][0]-.05), xytext=(z.vs['p'][0]+.01,z.vs['z'][0]-.3),
fontsize=14,
arrowprops = dict( arrowstyle="-",connectionstyle="angle3,angleA=0,angleB=-90"))
ax.annotate('node 33', (z.vs['p'][33],z.vs['z'][33]-.05), xytext=(z.vs['p'][33]-.07,z.vs['z'][33]-.3),
fontsize=14,
arrowprops = dict( arrowstyle="-",connectionstyle="angle3,angleA=0,angleB=-90"))
ax.annotate('node 32', (z.vs['p'][32]-.005,z.vs['z'][32]), xytext=(z.vs['p'][32]-.07,z.vs['z'][32]),
fontsize=14,
arrowprops = dict( arrowstyle="-",connectionstyle="angle3,angleA=0,angleB=-90"))
ax.annotate('node 1', (z.vs['p'][1],z.vs['z'][1]-.05), xytext=(z.vs['p'][1]-.07,z.vs['z'][1]-.3),
fontsize=14,
arrowprops = dict( arrowstyle="-",connectionstyle="angle3,angleA=0,angleB=-90"))
ax.annotate('node 3', (z.vs['p'][3],z.vs['z'][3]-.05), xytext=(z.vs['p'][3]+.07,z.vs['z'][3]-.3),
fontsize=14,
arrowprops = dict( arrowstyle="-",connectionstyle="angle3,angleA=0,angleB=-90"))
ax.annotate('node 2', (z.vs['p'][2],z.vs['z'][2]-.05), xytext=(z.vs['p'][2]-.07,z.vs['z'][2]-.3),
fontsize=14,
arrowprops = dict( arrowstyle="-",connectionstyle="angle3,angleA=0,angleB=-90"))
ax.annotate('provincial hubs',(.3,3), fontsize=18)
ax.annotate('peripheral non-hubs',(.3,1.8), fontsize=18)
ax.annotate('ultra peripheral non-hubs',(0.025,0.0),xytext=(.1,0), fontsize=18,
arrowprops = dict( arrowstyle="->", connectionstyle="angle3,angleA=0,angleB=-90"))
plt.xlabel('participation coefficient (p)',fontsize=16)
plt.ylabel('normalized within module degree (z)',fontsize=16);
#plt.savefig('zachary_roles_2.eps')
```
## Strong and weak communities
Communities are defined as strong or weak as per (5.1) and (5.2) in the book.
For the Zachary graph, we verify if nodes within communities satisfy the strong criterion, then we verify is the two communities satisfy the weak definition.
For the strong definition (internal degree larger than external degree for each node), only two nodes do not qualify.
For the weak definition (total community internal degree > total community external degree), both communities satisfy this criterion.
```
## strong criterion
for i in range(z.vcount()):
c = z.vs[i]['comm']
n = [z.vs[v]['comm']==c for v in z.neighbors(i)]
if sum(n)<=len(n)-sum(n):
print('node',i,'has internal degree',sum(n),'external degree',len(n)-sum(n))
## weak criterion
I = [0,0]
E = [0,0]
for i in range(z.vcount()):
c = z.vs[i]['comm']
n = [z.vs[v]['comm']==c for v in z.neighbors(i)]
I[c] += sum(n)
E[c] += len(n)-sum(n)
print('community 0 internal degree',I[0],'external degree',E[0])
print('community 1 internal degree',I[1],'external degree',E[1])
```
## Hierarchical clustering and dendrogram
Girvan-Newman algorithm is described in section 5.5 of the book. We apply it to the Zachary graph and show the results of this divisive algorithm as a dendrogram.
```
## Girvan-Newman algorithm
gn = z.community_edge_betweenness()
#ig.plot(gn,'zachary_dendrogram.eps',bbox=(0,0,300,300))
ig.plot(gn,bbox=(0,0,300,300))
```
This is an example of a hierarchical clustering. In the next plot, we compute modularity for each possible cut of the dendrogram.
We see that we get strong modularity with 2 clusters, but maximal value is obtained with 5.
```
## compute modularity at each possible cut and plot
q = []
for i in np.arange(z.vcount()):
q.append(z.modularity(gn.as_clustering(n=i+1)))
plt.plot(np.arange(1,1+z.vcount()),q,'o-',color='black')
plt.xlabel('number of clusters',fontsize=14)
plt.ylabel('modularity',fontsize=14);
#plt.savefig('zachary_modularity.eps');
```
How are the nodes partitioned is we pick only 2 communities? How does this compare to the underlying ground truth?
From the plot below, we see that only 1 node is misclassified.
We also report the modularity of this partition, $q = 0.35996$. We also compare the partition with ground truth via AMI (adjusted mutual information), as defined in section 5.3 of the book; we got a high value AMI = 0.83276 showing strong concordance.
```
## show result with 2 clusters --
z.vs['gn'] = gn.as_clustering(n=2).membership
print('AMI:',AMI(z.vs['comm'],z.vs['gn'])) ## adjusted mutual information
print('q:',z.modularity(z.vs['gn'])) ## modularity
z.vs['size'] = 10
z.vs['name'] = [str(i) for i in range(z.vcount())]
z.vs['label'] = [str(i) for i in range(z.vcount())]
z.vs['label_size'] = 8
z.es['color'] = cls_edges
z.vs['comm'] = [0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1]
#z.vs['color'] = [cls[i] for i in z.vs['comm']]
z.vs['color'] = [cls[i] for i in z.vs['gn']]
#ig.plot(z, 'zachary_2.eps',bbox=(0,0,300,200))
ig.plot(z,bbox=(0,0,300,200))
```
Same as above with 5 communities. We see higher modularity, but weaker AMI value.
```
## show result with optimal modularity (5 clusters)
z.vs['label'] = gn.as_clustering(n=5).membership
print('AMI:',AMI(z.vs['comm'],z.vs['label']))
print('q:',z.modularity(z.vs['label']))
z.vs['color'] = [cls[i] for i in z.vs['comm']]
z.vs['size'] = 10
z.vs['label_size'] = 8
#ig.plot(z, 'zachary_5.eps',bbox=(0,0,300,200))
ig.plot(z,bbox=(0,0,300,200))
```
# ABCD graph with 100 nodes
Next we look at a slightly larger graph generated with the ABCD benchmark model, which is described in section 5.3 of the book. This graph has 3 communities.
Using hierarchical clustering, we compare modularity and AMI for each possible cut.
ABCD parameters used to generate this graph are: $\gamma=3, \tau=2$, degree range [5,15], community size range [25,50], $\xi=.2$.
```
## read graph and communities; plot
g = ig.Graph.Read_Ncol(datadir+'ABCD/abcd_100.dat',directed=False)
c = np.loadtxt(datadir+'ABCD/abcd_100_comms.dat',dtype='uint16',usecols=(1))
g.vs['comm'] = [c[int(x['name'])-1]-1 for x in g.vs]
gt = {k:(v-1) for k,v in enumerate(g.vs['comm'])}
## map between int(name) to key
n2k = {int(v):k for k,v in enumerate(g.vs['name'])}
g.vs['size'] = 7
g.es['color'] = cls_edges
g.vs['color'] = [cls[i] for i in g.vs['comm']]
ig.plot(g, bbox=(0,0,300,200))
```
Girvan-Newman algorithm -- Modularity and AMI for each cut
In this case, both modularity and AMI are maximized with 3 communities.
```
q = []
a = []
gn = g.community_edge_betweenness()
for i in np.arange(g.vcount()):
q.append(g.modularity(gn.as_clustering(n=i+1)))
a.append(AMI(g.vs['comm'],gn.as_clustering(n=i+1).membership))
plt.plot(np.arange(1,1+g.vcount()),q,'.-',color='black',label='modularity')
plt.plot(np.arange(1,1+g.vcount()),a,'.-',color='grey',label='AMI')
plt.xlabel('number of clusters',fontsize=14)
plt.ylabel('modularity or AMI',fontsize=14)
plt.legend();
#plt.savefig('abcd_dendrogram.eps');
```
We see that with 3 communities, $q=0.502$ and AMI=1, so perfect recovery.
```
n_comm = np.arange(1,g.vcount()+1)
D = pd.DataFrame(np.array([n_comm,q,a]).transpose(),columns=['n_comm','q','AMI'])
df = D.head()
df
```
What would we get with 4 clusters, for which AMI = 0.95?
We see below that we have a few nodes splitted from one community.
```
## 4 communities
g.vs['gn'] = gn.as_clustering(n=4).membership
cls = ['silver','dimgray','black','white']
g.vs['color'] = [cls[i] for i in g.vs['gn']]
#ig.plot(g, 'abcd_4.eps', bbox=(0,0,300,200))
ig.plot(g, bbox=(0,0,300,200))
```
Those nodes form a triangle
```
sg = g.subgraph([v for v in g.vs() if v['gn']==3])
ig.plot(sg, bbox=(0,0,100,100))
```
# ABCD with varying $\xi$
Here we show a typical way to compare graph clustering algorithms using benchmark graphs.
We pick some model, here ABCD, and we vary the noise parameter $\xi$.
With ABCD, the larger $\xi$ is, the closer we are to a random Chung-Lu or configuration model graph (i.e. where only the degree distribution matters). For $\xi=0$, we get pure communities (all edges are internal).
For each choice of $\xi$, we generate 30 graphs, apply several different clustering algorithms,
and compute AMI for each algorithm, comparing with griund-truth communities.
The code below is commented out as it can take a while to run; a pickle file with results is included in the Data directory. To re-run from scratch, uncomment the cell below.
Parameters for the ABCD benchmark graphs are:
$\gamma=2.5, \tau=1.5$, degree range [10,50], community size range [50,100], $0.1 \le \xi \le 0.8$.
```
## load data generated with the code from above cell
with open(datadir+"ABCD/abcd_study.pkl","rb") as f:
L = pickle.load(f)
## store in dataframe and take averages
D = pd.DataFrame(L,columns=['algo','xi','AMI'])
## take average over 30 runs for each algorithm and every choice of xi
X = D.groupby(by=['algo','xi']).mean()
```
We plot the results in the following 2 cells.
We see good results with Louvain and Infomap, and even better results with ECG.
Label propagation is a fast algortihm, but it does collapse with moderate to high level of noise.
From the standard deviation plot, we see high variability around the value(s) for $\xi$ where the different
algorithms start to collapse. We see that this happen later and at a smaller scale with EGC, which is known to have better stability.
Such studies are useful to compare algorithms; using benchmarks, we can directly control parameters such as the noise level.
```
## plot average results foe each algorithm over range of xi
a = ['ECG','Louvain','Infomap','Label Prop.']
lt = ['-','--',':','-.','-.']
cl = ['blue','green','purple','red']
for i in range(len(a)):
## pick one - color or greyscale
plt.plot(X.loc[(a[i])].index,X.loc[(a[i])],lt[i],label=a[i],color=cl[i])
#plt.plot(X.loc[(a[i])].index,X.loc[(a[i])],lt[i],label=a[i],color='black')
plt.xlabel(r'ABCD noise ($\xi$)',fontsize=14)
plt.ylabel('AMI',fontsize=14)
plt.legend();
#plt.savefig('abcd_study.eps');
## Look at standard deviations
S = D.groupby(by=['algo','xi']).std()
a = ['ECG','Louvain','Infomap','Label Prop.']
#a = ['ECG','Louvain','Infomap','Label Prop.','Leiden','CNM']
lt = ['-','--',':','-.','--',':']
cl = ['blue','green','purple','red','red','blue']
for i in range(len(a)):
## pick one - color of greyscale
plt.plot(S.loc[(a[i])].index,S.loc[(a[i])],lt[i],label=a[i],color=cl[i])
#plt.plot(S.loc[(a[i])].index,S.loc[(a[i])],lt[i],label=a[i],color='black')
plt.xlabel(r'ABCD noise ($\xi$)',fontsize=14)
plt.ylabel('Standard Deviation (AMI)',fontsize=14)
plt.legend();
#plt.savefig('abcd_study_stdv.eps');
```
## Compare stability
This study is similar to the previous one, but we compare pairs of partitions for each algorithm on the same graph instead of comparing with the ground truth, so we look at the stability of algorithms. Note that an algorithm can be stable, but still be bad (ex: always cluster all nodes in a single community).
The code below can take a while to run; a pickle file with results is included in the Data directory. To re-run from scratch, uncomment the cell below.
```
## load L and train/val/test ids
with open(datadir+"ABCD/abcd_study_stability.pkl","rb") as f:
Ls = pickle.load(f)
## store in dataframe
D = pd.DataFrame(Ls,columns=['algo','xi','AMI'])
## take averages for each algorithm and each noise value xi
X = D.groupby(by=['algo','xi']).mean()
```
We plot the results below. The behaviour of algorithms can be clustered in two groups:
* For Louvain and ECG, stability is excellent and degrades gradually for high noise level, with ECG being the more stable algorithm.
* For Infomap and Label Propagation, stability is also good until the noise value where the results start to degrade, as we saw in the previous study. We see near perfect stability for very high noise values; those are values where the results were very bad in the previous study; this typically happens when the algorithm can't get any good clustering and returns some trivial parititon, such as putting all nodes together in the same community, thus a stable but bad result.
```
a = ['ECG','Louvain','Infomap','Label Prop.']
lt = ['-','--',':','-.']
for i in range(len(a)):
plt.plot(X.loc[(a[i])].index,X.loc[(a[i])],lt[i],label=a[i],color='black')
plt.xlabel(r'ABCD noise ($\xi$)',fontsize=14)
plt.ylabel('AMI between successive runs',fontsize=14)
plt.legend();
#plt.savefig('abcd_study_stability.eps');
```
# Modularity, resolution limit and rings of cliques
We illustrate issues with modularity with the famous ring of cliques examples.
For example below, we have a ring of 3-cliques connected ny a single (inter-clique) edge.
```
## n cliques of size s
def ringOfCliques(n,s):
roc = ig.Graph.Erdos_Renyi(n=n*s,p=0)
## cliques
for i in range(n):
for j in np.arange(s*i,s*(i+1)):
for k in np.arange(j+1,s*(i+1)):
roc.add_edge(j,k)
## ring
for i in range(n):
if i>0:
roc.add_edge(s*i-1,s*i)
else:
roc.add_edge(n*s-1,0)
roc.vs['size'] = 8
roc.vs['color'] = cls[2]
roc.es['color'] = cls_edges
return roc
## Ex: 10 3-cliques
roc = ringOfCliques(10,3)
#ig.plot(roc,'ring_3.eps',bbox=(0,0,300,300))
ig.plot(roc,bbox=(0,0,300,300))
```
We compare the number of cliques (the natural parts in a partition) with the actual number of communities found via 3 modularity based algorithms (Louvain, CNM, ECG).
We see that both Louvain and CNM return a smaller number of communities than the number of cliques; this is a known problem with modularity: merging cliques in the same community often lead to higher modularity.
A concensus algorithm like ECG can help a lot in such cases; here we see that the cliques are correctly recovered with ECG.
```
## Compare number of cliques and number of clusters found
L = []
s = 3
for n in np.arange(3,50,3):
roc = ringOfCliques(n,s)
ml = np.max(roc.community_multilevel().membership)+1
ec = np.max(roc.community_ecg().membership)+1
cnm = np.max(roc.community_fastgreedy().as_clustering().membership)+1
L.append([n,ml,ec,cnm])
D = pd.DataFrame(L,columns=['n','Louvain','ECG','CNM'])
plt.figure(figsize=(8,6))
plt.plot(D['n'],D['Louvain'],'--o',color='black',label='Louvain')
plt.plot(D['n'],D['ECG'],'-o',color='black',label='ECG')
plt.plot(D['n'],D['CNM'],':o',color='black',label='CNM')
plt.xlabel('number of '+str(s)+'-cliques',fontsize=14)
plt.ylabel('number of clusters found',fontsize=14)
plt.legend(fontsize=14);
#plt.savefig('rings.eps');
```
Let us look at a specific example: 10 cliques of size 3. Below we plot the communities found with Louvain; we clearly see that pairs of communities are systematically grouped into clusters.
```
## Louvain communities with 10 3-cliques
roc = ringOfCliques(n=10,s=3)
roc.vs['ml'] = roc.community_multilevel().membership
roc.vs['color'] = [cls[x%3] for x in roc.vs['ml']]
#ig.plot(roc,'ring_3_q.eps', bbox=(0,0,300,300))
ig.plot(roc,bbox=(0,0,300,300))
```
Why is ECG solving this problem? It is due to the first step, where we run an ensemble of level-1 Louvain and assign new weights to edges based on the proportion of times those edges are internal to a community.
We see below that there are exactly 30 edges with maximal edge weight of 1 (edges within cliques) and 10 edges with default minimal weight of 0.05 (edges between cliques).
With those new weights, the last clustering in ECG can easily recover the cliques as communities.
```
## ECG weights in this case: all 30 clique edges have max score
roc.es['W'] = roc.community_ecg().W
Counter(roc.es['W'])
```
# Ego nets and more
Suppose we want to look at node "near" some seed node $v$. One common way to do this is to look at its ego-net, i.e. the subgraph consisting of node $v$ and all other nodes that can be reached from $v$ in $k$ hops or less, where $k$ is small, typically 1 or 2.
Such subgraphs can become large quickly as we increase $k$. In the cells below, we look at ego-nets and compare with another approach to extract subgraph(s) around $v$ via clustering.
We consider the airport graph we already saw several times. We consider a simple, undirected version (no loops, directions or edge weights).
We compare ego-nets (1 and 2-hops subgraphs from a given node) with clusters obtained via graph clustering for some vertex $v$ with degree 11 (you can try other vertices).
```
## read edges and build simple undirected graph
D = pd.read_csv(datadir+'Airports/connections.csv')
g = ig.Graph.TupleList([tuple(x) for x in D.values], directed=True, edge_attrs=['weight'])
#df = D.head()
g = g.as_undirected()
g = g.simplify()
## read vertex attributes and add to graph
A = pd.read_csv(datadir+'Airports/airports_loc.csv')
lookup = {k:v for v,k in enumerate(A['airport'])}
l = [lookup[x] for x in g.vs()['name']]
g.vs()['layout'] = [(A['lon'][i],A['lat'][i]) for i in l]
g.vs()['state'] = [A['state'][i] for i in l]
g.vs()['city'] = [A['city'][i] for i in l]
## add a few more attributes for visualization
g.vs()['size'] = 6
g.vs()['color'] = cls[0]
g.es()['color'] = cls_edges
df = A.head()
## pick a vertex v
v = 207
print(g.vs[v])
print('degree:',g.degree()[v])
g.vs[v]['color'] = 'black'
## show its ego-net for k=1 (vertex v in black)
sg = g.subgraph([i for i in g.neighborhood(v,order=1)])
print(sg.vcount(),'nodes')
#ig.plot(sg,'airport_ego_1.eps',bbox=(0,0,300,300))
ig.plot(sg,bbox=(0,0,300,300))
## show its 2-hops ego-net ... this is already quite large!
sg = g.subgraph([i for i in g.neighborhood(v,order=2)])
sg.vs()['core'] = sg.coreness()
sg.delete_vertices([v for v in sg.vs if v['core']<2])
print(sg.vcount(),'nodes')
#ig.plot(sg,'airport_ego_2.eps',bbox=(0,0,300,300))
ig.plot(sg,bbox=(0,0,300,300))
## apply clustering and show the cluster containing the selected vertex
## recall that we ignore edge weights
## This result can vary somehow between runs
ec = g.community_ecg(ens_size=16)
g.es['W'] = ec.W
m = ec.membership[v]
sg = g.subgraph([i for i in range(g.vcount()) if ec.membership[i]==m])
sg.vs()['core'] = sg.coreness()
## display the 2-core
sg.delete_vertices([v for v in sg.vs if v['core']<2])
print(sg.vcount(),'nodes')
#ig.plot(sg,'airport_ecg.eps',bbox=(0,0,300,300))
ig.plot(sg,bbox=(0,0,300,300))
```
We see above that looking at the cluster with $v$ is smaller than the 2-hops ego-net, and several nodes are tightly connected.
Below we go further and look at the ECG edge weights, which we can use to prune the graph above, so we can look at the nodes most tightly connected to node $v$.
You can adjust the threshold below to get different zoomings.
```
## filter edges w.r.t. ECG votes (weights)
thresh = .85
tmp = sg.subgraph_edges([e for e in sg.es if e['W'] > thresh])
n = [i for i in range(tmp.vcount()) if tmp.vs[i]['color']=='black'][0]
tmp.vs['cl'] = tmp.clusters().membership
cl = tmp.vs[n]['cl']
ssg = tmp.subgraph([i for i in tmp.vs if i['cl']==cl])
ssg.vs()['core'] = ssg.coreness()
ssg.delete_vertices([v for v in ssg.vs if v['core']<2])
print(ssg.vcount(),'nodes')
#ig.plot(ssg,'airport_ecg_focus.eps',bbox=(0,0,300,300))
ig.plot(ssg,bbox=(0,0,300,300))
```
Most nodes in this subgraph are from the same state as node $v$ (MI) or nearby state (WI).
```
## states in the above subgraph
Counter(ssg.vs['state'])
```
# EXTRA CODE
The code below requires that Julia and ABCD are installed.
This is extra material not in the book.
# ABCD Properties
The cells below are for illustration purpose only, to show some ABCD graphs with different $\xi$ (noise) parameters,
and to show how you can run ABCD with Julia installed.
* notice the density of edges between communities as $\xi$ increases.
* most runs should yield 3 communities
Natural layouts for noisy graphs make it hard to distinguish communities, as the nodes will overlap a lot.
We use an ad-hoc method to "push away" nodes from the 3 different clusters to allow for better visualization.
```
## just for visualization -- push the layout apart given 3 communities
## adjust the 'push' factor with d
def push_layout(d=0):
if np.max(g.vs['comm'])>2:
return -1
ly = g.layout()
g.vs['ly'] = ly
x = [0,0,0]
y = [0,0,0]
for v in g.vs:
c = v['comm']
x[c] += v['ly'][0]
y[c] += v['ly'][1]
delta = [-d,0,d]
dx = [delta[i] for i in np.argsort(x)]
dy = [delta[i] for i in np.argsort(y)]
for v in g.vs:
c = v['comm']
v['ly'][0] += dx[c]
v['ly'][1] += dy[c]
return g.vs['ly']
## ABCD with very strong communities (xi = 0.05)
## results will vary, but we see 3 communities in most runs.
xi = 0.05
mc = 0
while mc != 3: ## run until we get 3 communities
## generate degree and community size values
cmd = 'julia '+abcd_path+'deg_sampler.jl deg.dat 2.5 5 15 100 1000'
os.system(cmd+' >/dev/null 2>&1')
cmd = 'julia '+abcd_path+'com_sampler.jl cs.dat 1.5 30 50 100 1000'
os.system(cmd+' >/dev/null 2>&1');
cmd = 'julia '+abcd_path+'graph_sampler.jl net.dat comm.dat deg.dat cs.dat xi '\
+str(xi)+' false false'
os.system(cmd+' >/dev/null 2>&1')
g = ig.Graph.Read_Ncol('net.dat',directed=False)
c = np.loadtxt('comm.dat',dtype='uint16',usecols=(1))
mc = max(c)
## plot
g.vs['comm'] = [c[int(x['name'])-1]-1 for x in g.vs]
g.vs['color'] = [cls[i] for i in g.vs['comm']]
g.vs['size'] = 5
g.es['color'] = 'lightgrey'
ly = push_layout(d=0) ## d=0, no need to push, communities are clear
ig.plot(g, layout=ly, bbox=(0,0,300,300))
## viz: ABCD with strong communities (xi = 0.15)
xi = 0.15
mc = 0
while mc != 3: ## run until we get 3 communities
## generate degree and community size values
cmd = 'julia '+abcd_path+'deg_sampler.jl deg.dat 2.5 5 15 100 1000'
os.system(cmd+' >/dev/null 2>&1')
cmd = 'julia '+abcd_path+'com_sampler.jl cs.dat 1.5 30 50 100 1000'
os.system(cmd+' >/dev/null 2>&1');
cmd = 'julia '+abcd_path+'graph_sampler.jl net.dat comm.dat deg.dat cs.dat xi '\
+str(xi)+' false false'
os.system(cmd+' >/dev/null 2>&1')
## compute AMI for various clustering algorithms
g = ig.Graph.Read_Ncol('net.dat',directed=False)
c = np.loadtxt('comm.dat',dtype='uint16',usecols=(1))
mc = max(c)
## plot
g.vs['comm'] = [c[int(x['name'])-1]-1 for x in g.vs]
g.vs['color'] = [cls[i] for i in g.vs['comm']]
g.vs['size'] = 5
g.es['color'] = 'lightgrey'
ly = push_layout(d=1) ## slightly push clusters apart for viz
ig.plot(g, layout=ly, bbox=(0,0,300,300))
## viz: ABCD with weak communities
## lots of edges between communities as expected
xi = 0.33
mc = 0
while mc != 3: ## run until we get 3 communities
## generate degree and community size values
cmd = 'julia '+abcd_path+'deg_sampler.jl deg.dat 2.5 5 15 100 1000'
os.system(cmd+' >/dev/null 2>&1')
cmd = 'julia '+abcd_path+'com_sampler.jl cs.dat 1.5 30 50 100 1000'
os.system(cmd+' >/dev/null 2>&1');
cmd = 'julia '+abcd_path+'graph_sampler.jl net.dat comm.dat deg.dat cs.dat xi '\
+str(xi)+' false false'
os.system(cmd+' >/dev/null 2>&1')
## compute AMI for various clustering algorithms
g = ig.Graph.Read_Ncol('net.dat',directed=False)
c = np.loadtxt('comm.dat',dtype='uint16',usecols=(1))
mc = max(c)
## plot
g.vs['comm'] = [c[int(x['name'])-1]-1 for x in g.vs]
g.vs['color'] = [cls[i] for i in g.vs['comm']]
g.vs['size'] = 5
g.es['color'] = 'lightgrey'
ly = push_layout(d=3) ## need to push more -- with d=0, communities can't be seen clearly
ig.plot(g, layout=ly, bbox=(0,0,300,300))
## viz: ABCD with very weak communities
xi = 0.5
mc = 0
while mc != 3: ## run until we get 3 communities
## generate degree and community size values
cmd = 'julia '+abcd_path+'deg_sampler.jl deg.dat 2.5 5 15 100 1000'
os.system(cmd+' >/dev/null 2>&1')
cmd = 'julia '+abcd_path+'com_sampler.jl cs.dat 1.5 30 50 100 1000'
os.system(cmd+' >/dev/null 2>&1');
cmd = 'julia '+abcd_path+'graph_sampler.jl net.dat comm.dat deg.dat cs.dat xi '\
+str(xi)+' false false'
os.system(cmd+' >/dev/null 2>&1')
## compute AMI for various clustering algorithms
g = ig.Graph.Read_Ncol('net.dat',directed=False)
c = np.loadtxt('comm.dat',dtype='uint16',usecols=(1))
mc = max(c)
## plot
g.vs['comm'] = [c[int(x['name'])-1]-1 for x in g.vs]
g.vs['color'] = [cls[i] for i in g.vs['comm']]
g.vs['size'] = 5
g.es['color'] = 'lightgrey'
ly = push_layout(5) ## need to push more -- with d=0, communities can't be seen clearly
ig.plot(g, layout=ly, bbox=(0,0,300,300))
```
## Measures to compare partitions
* We illustrate the importance of using proper adjusted measures when comparing partitions; this is why we use AMI (adjusted mutual information) or ARI (adjusted Rand index) in our experiments
* We generate some ABCD graph and compare ground truth with **random** partitions of different sizes
* Scores for random partitions should be close to 0 regardless of the number of parts
```
## RAND Index: given two clusterings u and v
def RI(u,v):
## build sets from A and B
a = np.max(u)+1
b = np.max(v)+1
n = len(u)
if n != len(v):
exit -1
A = [set() for i in range(a)]
B = [set() for i in range(b)]
for i in range(n):
A[u[i]].add(i)
B[v[i]].add(i)
## RAND index step by step
R = 0
for i in range(a):
for j in range(b):
s = len(A[i].intersection(B[j]))
if s>1:
R += s*(s-1)/2
R *= 2
for i in range(a):
s = len(A[i])
if s>1:
R -= s*(s-1)/2
for i in range(b):
s = len(B[i])
if s>1:
R -= s*(s-1)/2
R += n*(n-1)/2
R /= n*(n-1)/2
return R
## generate new degree and community size values
cmd = 'julia '+abcd_path+'deg_sampler.jl deg.dat 2.5 5 50 1000 1000'
os.system(cmd+' >/dev/null 2>&1')
cmd = 'julia '+abcd_path+'com_sampler.jl cs.dat 1.5 75 150 1000 1000'
os.system(cmd+' >/dev/null 2>&1')
xi = .1
cmd = 'julia '+abcd_path+'graph_sampler.jl net.dat comm.dat deg.dat cs.dat xi '\
+str(xi)+' false false'
os.system(cmd+' >/dev/null 2>&1')
g = ig.Graph.Read_Ncol('net.dat',directed=False)
c = np.loadtxt('comm.dat',dtype='uint16',usecols=(1))
## ground-truth communities
gt = [c[int(x['name'])-1]-1 for x in g.vs]
print('number of communities:',np.max(gt)+1)
## generate random clusterings and compute various measures w.r.t. ground truth
## this can take a few minutes to run
from sklearn.metrics import mutual_info_score as MI
from sklearn.metrics import adjusted_rand_score as ARI
from sklearn.metrics import normalized_mutual_info_score as NMI
L = []
n = g.vcount()
tc = {idx:part for idx,part in enumerate(gt)}
ar = np.arange(2,21)
for s in ar:
for i in range(100):
r = np.random.choice(s, size=n)
rd = {idx:part for idx,part in enumerate(r)}
L.append([s,MI(gt,r),NMI(gt,r),AMI(gt,r),RI(gt,r),ARI(gt,r),g.gam(tc,rd,adjusted=False),g.gam(tc,rd)])
D = pd.DataFrame(L,columns=['size','MI','NMI','AMI','RI','ARI','GRI','AGRI'])
R = D.groupby(by='size').mean()
```
Below we show results for 3 measures:
* Mutual information (MI) as is has strong bian w.r.t. number of clusters
* Normalized MI is better
* AMI is best, no bias w.r.t. number of clusters.
```
## Mutual information (MI), normalized MI (NMI) and adjusted MI (AMI)
plt.plot(ar,R['MI'],':',color='black',label='MI')
plt.plot(ar,R['NMI'],'--',color='black',label='NMI')
plt.plot(ar,R['AMI'],'-',color='black',label='AMI')
plt.xlabel('number of random clusters',fontsize=14)
plt.legend();
#plt.savefig('MI.eps');
```
Same below for Rand index (RI) and adjusted version.
GRI (graph RI) and AGRI (adjusted GRI) are variations of RI specifically for graph data.
```
## RAND index (RI) and adjusted (ARI)
## Also: Graph-aware RAND index (GRI) and adjusted version (AGRI)
## those measures are included in partition-igraph
## input are partitions of type 'igraph.clustering.VertexClustering'or a dictionaries of node:community.
plt.plot(ar,R['RI'],':',color='black',label='RI')
plt.plot(ar,R['GRI'],'--',color='black',label='GRI')
plt.plot(ar,R['ARI'],'-',color='black',label='ARI/AGRI')
plt.plot(ar,R['AGRI'],'-',color='black')
plt.xlabel('number of random clusters',fontsize=14)
plt.legend();
#plt.savefig('RI.eps');
```
| github_jupyter |
# Values and Variables
**CS1302 Introduction to Computer Programming**
___
```
%reload_ext mytutor
```
## Integers
**How to enter an [integer](https://docs.python.org/3/reference/lexical_analysis.html#integer-literals) in a program?**
```
15 # an integer in decimal
0b1111 # a binary number
0xF # hexadecimal (base 16) with possible digits 0, 1,2,3,4,5,6,7,8,9,A,B,C,D,E,F
```
**Why all outputs are the same?**
- What you have entered are *integer literals*, which are integers written out literally.
- All the literals have the same integer value in decimal.
- By default, if the last line of a code cell has a value, the jupyter notebook (*IPython*) will store and display the value as an output.
```
3 # not the output of this cell
4 + 5 + 6
```
- The last line above also has the same value, `15`.
- It is an *expression* (but not a literal) that *evaluates* to the integer value.
**Exercise** Enter an expression that evaluates to an integer value, as big as possible.
(You may need to interrupt the kernel if the expression takes too long to evaluate.)
```
# There is no maximum for an integer for Python3.
# See https://docs.python.org/3.1/whatsnew/3.0.html#integers
11 ** 100000
```
## Strings
**How to enter a [string](https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals) in a program?**
```
'\U0001f600: I am a string.' # a sequence of characters delimited by single quotes.
"\N{grinning face}: I am a string." # delimited by double quotes.
"""\N{grinning face}: I am a string.""" # delimited by triple single/double quotes.
```
- `\` is called the *escape symbol*.
- `\U0001f600` and `\N{grinning face}` are *escape sequences*.
- These sequences represent the same grinning face emoji by its Unicode in hexadecimal and its name.
**Why use different quotes?**
```
print('I\'m line #1.\nI\'m line #2.') # \n is a control code for line feed
print("I'm line #3.\nI'm line #4.") # no need to escape single quote.
print('''I'm line #5.
I'm line #6.''') # multi-line string
```
Note that:
- The escape sequence `\n` does not represent any symbol.
- It is a *control code* that creates a new line when printing the string.
- Another common control code is `\t` for tab.
Using double quotes, we need not escape the single quote in `I'm`.
Triple quotes delimit a multi-line string, so there is no need to use `\n`.
(You can copy and paste a multi-line string from elsewhere.)
In programming, there are often many ways to do the same thing.
The following is a one-line code ([one-liner](https://en.wikipedia.org/wiki/One-liner_program)) that prints multiple lines of strings without using `\n`:
```
print("I'm line #1", "I'm line #2", "I'm line #3", sep='\n') # one liner
```
- `sep='\n'` is a *keyword argument* that specifies the separator of the list of strings.
- By default, `sep=' '`, a single space character.
In IPython, we can get the *docstring* (documentation) of a function conveniently using the symbol `?`.
```
?print
print?
```
**Exercise** Print a cool multi-line string below.
```
print('''
(ง •̀_•́)ง
╰(●’◡’●)╮
(..•˘_˘•..)
(づ ̄ 3 ̄)づ
''')
# See also https://github.com/glamp/bashplotlib
# Star Wars via Telnet http://asciimation.co.nz/
```
## Variables and Assignment
It is useful to store a value and retrieve it later.
To do so, we assign the value to a variable:
```
x = 15
x # output the value of x
```
**Is assignment the same as equality?**
No because:
- you cannot write `15 = x`, but
- you can write `x = x + 1`, which increases the value of `x` by `1`.
**Exercise** Try out the above code yourself.
```
x = x + 1
x
```
Let's see the effect of assignment step-by-step:
1. Run the following cell.
1. Click `Next >` to see the next step of the execution.
```
%%mytutor -h 200
x = 15
x = x + 1
```
The following *tuple assignment* syntax can assign multiple variables in one line.
```
%%mytutor -h 200
x, y, z = '15', '30', 15
```
One can also use *chained assignment* to set different variables to the same value.
```
%%mytutor -h 250
x = y = z = 0
```
Variables can be deleted using `del`. Accessing a variable before assignment raises a Name error.
```
del x, y
x, y
```
## Identifiers
*Identifiers* such as variable names are case sensitive and follow certain rules.
**What is the syntax for variable names?**
1. Must start with a letter or `_` (an underscore) followed by letters, digits, or `_`.
1. Must not be a [keyword](https://docs.python.org/3.7/reference/lexical_analysis.html#keywords) (identifier reserved by Python):
<pre>False await else import pass
None break except in raise
True class finally is return
and continue for lambda try
as def from nonlocal while
assert del global not with
async elif if or yield</pre>
**Exercise** Evaluate the following cell and check if any of the rules above is violated.
```
from ipywidgets import interact
@interact
def identifier_syntax(assignment=['a-number = 15',
'a_number = 15',
'15 = 15',
'_15 = 15',
'del = 15',
'Del = 15',
'type = print',
'print = type',
'input = print']):
exec(assignment)
print('Ok.')
```
1. `a-number = 15` violates Rule 1 because `-` is not allowed. `-` is interpreted as an operator.
1. `15 = 15` violates Rule 1 because `15` starts with a digit instead of letter or _.
1. `del = 15` violates Rule 2 because `del` is a keyword.
What can we learn from the above examples?
- `del` is a keyword and `Del` is not because identifiers are case sensitive.
- Function/method/type names `print`/`input`/`type` are not keywords and can be reassigned.
This can useful if you want to modify the default implementations without changing their source code.
To help make code more readable, additional style guides such as [PEP 8](https://www.python.org/dev/peps/pep-0008/#function-and-variable-names) are available:
- Function names should be lowercase, with words separated by underscores as necessary to improve readability.
- Variable names follow the same convention as function names.
## User Input
**How to let the user input a value at *runtime*,
i.e., as the program executes?**
We can use the method `input`:
- There is no need to delimit the input string by quotation marks.
- Simply press `enter` after typing a string.
```
print('Your name is', input('Please input your name: '))
```
- The `input` method prints its argument, if any, as a [prompt](https://en.wikipedia.org/wiki/Command-line_interface#Command_prompt).
- It takes user's input and *return* it as its value. `print` takes in that value and prints it.
**Exercise** Explain whether the following code prints `'My name is Python'`. Does `print` return a value?
```
print('My name is', print('Python'))
```
- Unlike `input`, the function `print` does not return the string it is trying to print. Printing a string is, therefore, different from returning a string.
- `print` actually returns a `None` object that gets printed as `None`.
## Type Conversion
The following program tries to compute the sum of two numbers from user inputs:
```
num1 = input('Please input an integer: ')
num2 = input('Please input another integer: ')
print(num1, '+', num2, 'is equal to', num1 + num2)
```
**Exercise** There is a [bug](https://en.wikipedia.org/wiki/Software_bug) in the above code. Can you locate the error?
The two numbers are concatenated instead of added together.
`input` *returns* user input as a string.
E.g., if the user enters `12`, the input is
- not treated as the integer twelve, but rather
- treated as a string containing two characters, one followed by two.
To see this, we can use `type` to return the data type of an expression.
```
num1 = input('Please input an integer: ')
print('Your input is', num1, 'with type', type(num1))
```
**Exercise** `type` applies to any expressions. Try it out below on `15`, `print`, `print()`, `input`, and even `type` itself and `type(type)`.
```
type(15), type(print), type(print()), type(input), type(type), type(type(type))
```
**So what happens when we add strings together?**
```
'4' + '5' + '6'
```
**How to fix the bug then?**
We can convert a string to an integer using `int`.
```
int('4') + int('5') + int('6')
```
We can also convert an integer to a string using `str`.
```
str(4) + str(5) + str(6)
```
**Exercise** Fix the bug in the following cell.
```
num1 = input('Please input an integer: ')
num2 = input('Please input another integer: ')
# print(num1, '+', num2, 'is equal to', num1 + num2) # fix this line below
### BEGIN SOLUTION
print(num1, '+', num2, 'is equal to', int(num1) + int(num2))
### END SOLUTION
```
## Error
In addition to writing code, a programmer spends significant time in *debugging* code that contains errors.
**Can an error be automatically detected by the computer?**
- You have just seen an example of *logical error*, which is due to an error in the logic.
- The ability to debug or even detect such error is, unfortunately, beyond Python's intelligence.
Other kinds of error may be detected automatically.
As an example, note that we can omit `+` for string concatenation, but we cannot omit it for integer summation:
```
print('Skipping + for string concatenation')
'4' '5' '6'
print('Skipping + for integer summation')
4 5 6
```
Python interpreter detects the bug and raises a *syntax* error.
**Why Syntax error can be detected automatically?
Why is the print statement before the error not executed?**
- The Python interpreter can easily detect syntax error even before executing the code simply because
- the interpreter fails to interpret the code, i.e., translates the code to lower-level executable code.
The following code raises a different kind of error.
```
print("Evaluating '4' + '5' + 6")
'4' + '5' + 6 # summing string with integer
```
**Why Python throws a TypeError when evaluating `'4' + '5' + 6`?**
There is no default implementation of `+` operation on a value of type `str` and a value of type `int`.
- Unlike syntax error, the Python interpreter can only detect type error at runtime (when executing the code.)
- Hence, such error is called a *runtime error*.
**Why is TypeError a runtime error?**
The short answer is that Python is a [strongly-and-dynamically-typed](https://en.wikipedia.org/wiki/Strong_and_weak_typing) language:
- Strongly-typed: Python does not force a type conversion to avoid a type error.
- Dynamically-typed: Python allow data type to change at runtime.
The underlying details are more complicated than required for this course. It helps if you already know the following languages:
- JavaScript, which is a *weakly-typed* language that forces a type conversion to avoid a type error.
- C, which is a *statically-typed* language that does not allow data type to change at runtime.
```
%%javascript
alert('4' + '5' + 6) // no error because 6 is converted to a str automatically
```
A weakly-typed language may seem more robust, but it can lead to [more logical errors](https://www.oreilly.com/library/view/fluent-conference-javascript/9781449339203/oreillyvideos1220106.html).
To improve readability, [typescript](https://www.typescriptlang.org/) is a strongly-typed replacement of javascript.
**Exercise** Not all the strings can be converted into integers. Try breaking the following code by providing invalid inputs and record them in the subsequent cell. Explain whether the errors are runtime errors.
```
num1 = input('Please input an integer: ')
num2 = input('Please input another integer: ')
print(num1, '+', num2, 'is equal to', int(num1) + int(num2))
```
The possible invalid inputs are:
> `4 + 5 + 6`, `15.0`, `fifteen`
It raises a value error, which is a runtime error detected during execution.
Note that the followings are okay
> int('-1'), eval('4 + 5 + 6')
## Floating Point Numbers
Not all numbers are integers. In Enginnering, we often need to use fractions.
**How to enter fractions in a program?**
```
x = -0.1 # decimal number
y = -1.0e-1 # scientific notation
z = -1/10 # fraction
x, y, z, type(x), type(y), type(z)
```
**What is the type `float`?**
- `float` corresponds to the [*floating point* representation](https://en.wikipedia.org/wiki/Floating-point_arithmetic#Floating-point_numbers).
- A `float` in stored exactly the way we write it in scientific notation:
$$
\overbrace{-}^{\text{sign}} \underbrace{1.0}_{\text{mantissa}\kern-1em}e\overbrace{-1}^{\text{exponent}\kern-1em}=-1\times 10^{-1}
$$
- The [truth](https://www.h-schmidt.net/FloatConverter/IEEE754.html) is more complicated than required for the course.
Integers in mathematics may be regarded as a `float` instead of `int`:
```
type(1.0), type(1e2)
```
You can also convert an `int` or a `str` to a `float`.
```
float(1), float('1')
```
**Is it better to store an integer as `float`?**
Python stores a [floating point](https://docs.python.org/3/library/sys.html#sys.float_info) with finite precision (usually as a 64bit binary fraction):
```
import sys
sys.float_info
```
It cannot represent a number larger than the `max`:
```
sys.float_info.max * 2
```
The precision also affects the check for equality.
```
(1.0 == 1.0 + sys.float_info.epsilon * 0.5, # returns true if equal
1.0 == 1.0 + sys.float_info.epsilon * 0.6, sys.float_info.max + 1 == sys.float_info.max)
```
Another issue with float is that it may keep more decimal places than desired.
```
1/3
```
**How to [round](https://docs.python.org/3/library/functions.html#round) a floating point number to the desired number of decimal places?**
```
round(2.665,2), round(2.675,2)
```
**Why 2.675 rounds to 2.67 instead of 2.68?**
- A `float` is actually represented in binary.
- A decimal fraction [may not be represented exactly in binary](https://docs.python.org/3/tutorial/floatingpoint.html#tut-fp-issues).
The `round` function can also be applied to an integer.
```
round(150,-2), round(250,-2)
```
**Why 250 rounds to 200 instead of 300?**
- Python 3 implements the default rounding method in [IEEE 754](https://en.wikipedia.org/w/index.php?title=IEEE_754#Rounding_rules).
## String Formatting
**Can we round a `float` or `int` for printing but not calculation?**
This is possible with [*format specifications*](https://docs.python.org/3/library/string.html#format-specification-mini-language).
```
x = 10000/3
print('x ≈ {:.2f} (rounded to 2 decimal places)'.format(x))
x
```
- `{:.2f}` is a *format specification*
- that gets replaced by a string
- that represents the argument `x` of `format`
- as a decimal floating point number rounded to 2 decimal places.
**Exercise** Play with the following widget to learn the effect of different format specifications. In particular, print `10000/3` as `3,333.33`.
```
from ipywidgets import interact
@interact(x='10000/3',
align={'None':'','<':'<','>':'>','=':'=','^':'^'},
sign={'None':'','+':'+','-':'-','SPACE':' '},
width=(0,20),
grouping={'None':'','_':'_',',':','},
precision=(0,20))
def print_float(x,sign,align,grouping,width=0,precision=2):
format_spec = f"{{:{align}{sign}{'' if width==0 else width}{grouping}.{precision}f}}"
print("Format spec:",format_spec)
print("x ≈",format_spec.format(eval(x)))
print('{:,.2f}'.format(10000/3))
```
String formatting is useful for different data types other than `float`.
E.g., consider the following program that prints a time specified by some variables.
```
# Some specified time
hour = 12
minute = 34
second = 56
print("The time is " + str(hour) + ":" + str(minute) + ":" + str(second)+".")
```
Imagine you have to show also the date in different formats.
The code can become very hard to read/write because
- the message is a concatenation of multiple strings and
- the integer variables need to be converted to strings.
Omitting `+` leads to syntax error. Removing `str` as follows also does not give the desired format.
```
print("The time is ", hour, ":", minute, ":", second, ".") # note the extra spaces
```
To make the code more readable, we can use the `format` function as follows.
```
message = "The time is {}:{}:{}."
print(message.format(hour,minute,second))
```
- We can have multiple *place-holders* `{}` inside a string.
- We can then provide the contents (any type: numbers, strings..) using the `format` function, which
- substitutes the place-holders by the function arguments from left to right.
According to the [string formatting syntax](https://docs.python.org/3/library/string.html#format-string-syntax), we can change the order of substitution using
- indices *(0 is the first item)* or
- names inside the placeholder `{}`:
```
print("You should {0} {1} what I say instead of what I {0}.".format("do", "only"))
print("The surname of {first} {last} is {last}.".format(first="John", last="Doe"))
```
You can even put variables inside the format specification directly and have a nested string formatting.
```
align, width = "^", 5
print(f"{{:*{align}{width}}}".format(x)) # note the syntax f"..."
```
**Exercise** Play with the following widget to learn more about the formating specification.
1. What happens when `align` is none but `fill` is `*`?
1. What happens when the `expression` is a multi-line string?
```
from ipywidgets import interact
@interact(expression=r"'ABC'",
fill='*',
align={'None':'','<':'<','>':'>','=':'=','^':'^'},
width=(0,20))
def print_objectt(expression,fill,align='^',width=10):
format_spec = f"{{:{fill}{align}{'' if width==0 else width}}}"
print("Format spec:",format_spec)
print("Print:",format_spec.format(eval(expression)))
```
1. It returns a ValueError because align must be specified when fill is.
1. The newline character is simply regarded a character. The formatting is not applied line-by-line. E.g., try 'ABC\nDEF'.
| github_jupyter |
```
%%html
<link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" />
<link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" />
<style>.subtitle {font-size:medium; display:block}</style>
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" />
<link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. -->
<script>
var cell = $(".container .cell").eq(0), ia = cell.find(".input_area")
if (cell.find(".toggle-button").length == 0) {
ia.after(
$('<button class="toggle-button">Toggle hidden code</button>').click(
function (){ ia.toggle() }
)
)
ia.hide()
}
</script>
```
**Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the "Run" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.
$\newcommand{\identity}{\mathrm{id}}
\newcommand{\notdivide}{\nmid}
\newcommand{\notsubset}{\not\subset}
\newcommand{\lcm}{\operatorname{lcm}}
\newcommand{\gf}{\operatorname{GF}}
\newcommand{\inn}{\operatorname{Inn}}
\newcommand{\aut}{\operatorname{Aut}}
\newcommand{\Hom}{\operatorname{Hom}}
\newcommand{\cis}{\operatorname{cis}}
\newcommand{\chr}{\operatorname{char}}
\newcommand{\Null}{\operatorname{Null}}
\newcommand{\lt}{<}
\newcommand{\gt}{>}
\newcommand{\amp}{&}
$
<div class="mathbook-content"><h1 class="heading hide-type" alt="Index Index"><span class="type">Index</span><span class="codenumber" /><span class="title">Index</span></h1></div>
<div class="mathbook-content"><div class="indexletter" id="indexletter-$"><div class="indexitem">$G$-equivalent<span class="indexknowl">, <a knowl="./knowl/p-2038.html">Paragraph</a></span></div><div class="indexitem">$G$-set<span class="indexknowl">, <a knowl="./knowl/p-2029.html">Paragraph</a></span></div><div class="indexitem">$n$th root of unity<span class="indexknowl">, <a knowl="./knowl/p-604.html">Paragraph</a> <a knowl="./knowl/p-3654.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-a"><div class="indexitem">Abel, Niels Henrik<span class="indexknowl">, <a knowl="./knowl/p-3648.html">Paragraph</a></span></div><div class="indexitem">Abelian group<span class="indexknowl">, <a knowl="./knowl/p-381.html">Paragraph</a></span></div><div class="indexitem">Ackermann's function<span class="indexknowl">, <a knowl="./knowl/exercise-63.html">Exercise</a></span></div><div class="indexitem">Adleman, L.<span class="indexknowl">, <a knowl="./knowl/p-1109.html">Paragraph</a></span></div><div class="indexitem">Algebraic closure<span class="indexknowl">, <a knowl="./knowl/p-3272.html">Paragraph</a></span></div><div class="indexitem">Algebraic extension<span class="indexknowl">, <a knowl="./knowl/p-3227.html">Paragraph</a></span></div><div class="indexitem">Algebraic number<span class="indexknowl">, <a knowl="./knowl/p-3229.html">Paragraph</a></span></div><div class="indexitem">Algorithm</div><div class="subindexitem">division<span class="indexknowl">, <a knowl="./knowl/theorem-90.html">Theorem</a></span></div><div class="subindexitem">Euclidean<span class="indexknowl">, <a knowl="./knowl/p-262.html">Paragraph</a></span></div><div class="indexitem">Ascending chain condition<span class="indexknowl">, <a knowl="./knowl/p-2793.html">Paragraph</a></span></div><div class="indexitem">Associate elements<span class="indexknowl">, <a knowl="./knowl/p-2768.html">Paragraph</a></span></div><div class="indexitem">Atom<span class="indexknowl">, <a knowl="./knowl/p-2959.html">Paragraph</a></span></div><div class="indexitem">Automorphism</div><div class="subindexitem">inner<span class="indexknowl">, <a knowl="./knowl/exercise-407.html">Exercise</a></span></div></div><div class="indexletter" id="indexletter-b"><div class="indexitem">Basis of a lattice<span class="indexknowl">, <a knowl="./knowl/p-1863.html">Paragraph</a></span></div><div class="indexitem">Bieberbach, L.<span class="indexknowl">, <a knowl="./knowl/p-1874.html">Paragraph</a></span></div><div class="indexitem">Binary operation<span class="indexknowl">, <a knowl="./knowl/p-377.html">Paragraph</a></span></div><div class="indexitem">Binary symmetric channel<span class="indexknowl">, <a knowl="./knowl/p-1209.html">Paragraph</a></span></div><div class="indexitem">Boole, George<span class="indexknowl">, <a knowl="./knowl/p-2992.html">Paragraph</a></span></div><div class="indexitem">Boolean algebra</div><div class="subindexitem">atom in a<span class="indexknowl">, <a knowl="./knowl/p-2959.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2938.html">Paragraph</a></span></div><div class="subindexitem">finite<span class="indexknowl">, <a knowl="./knowl/p-2957.html">Paragraph</a></span></div><div class="subindexitem">isomorphism<span class="indexknowl">, <a knowl="./knowl/p-2958.html">Paragraph</a></span></div><div class="indexitem">Boolean function<span class="indexknowl">, <a knowl="./knowl/p-2084.html">Paragraph</a> <a knowl="./knowl/exercise-668.html">Exercise</a></span></div><div class="indexitem">Burnside's Counting Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-69.html">Theorem</a></span></div><div class="indexitem">Burnside, William<span class="indexknowl">, <a knowl="./knowl/p-420.html">Paragraph</a> <a knowl="./knowl/p-1639.html">Paragraph</a> <a knowl="./knowl/p-2091.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-c"><div class="indexitem">Cancellation law</div><div class="subindexitem">for groups<span class="indexknowl">, <a knowl="./knowl/p-407.html">Paragraph</a></span></div><div class="subindexitem">for integral domains<span class="indexknowl">, <a knowl="./knowl/proposition-33.html">Proposition</a></span></div><div class="indexitem">Cardano, Gerolamo<span class="indexknowl">, <a knowl="./knowl/p-2627.html">Paragraph</a></span></div><div class="indexitem">Carmichael numbers<span class="indexknowl">, <a knowl="./knowl/exercise-273.html">Exercise</a></span></div><div class="indexitem">Cauchy's Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-cauchy.html">Theorem</a></span></div><div class="indexitem">Cauchy, Augustin-Louis<span class="indexknowl">, <a knowl="./knowl/p-822.html">Paragraph</a></span></div><div class="indexitem">Cayley table<span class="indexknowl">, <a knowl="./knowl/p-384.html">Paragraph</a></span></div><div class="indexitem">Cayley's Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-cayleys.html">Theorem</a></span></div><div class="indexitem">Cayley, Arthur<span class="indexknowl">, <a knowl="./knowl/p-1444.html">Paragraph</a></span></div><div class="indexitem">Centralizer</div><div class="subindexitem">of a subgroup<span class="indexknowl">, <a knowl="./knowl/p-2052.html">Paragraph</a></span></div><div class="indexitem">Characteristic of a ring<span class="indexknowl">, <a knowl="./knowl/p-2346.html">Paragraph</a></span></div><div class="indexitem">Chinese Remainder Theorem</div><div class="subindexitem">for integers<span class="indexknowl">, <a knowl="./knowl/theorem-87.html">Theorem</a></span></div><div class="indexitem">Cipher<span class="indexknowl">, <a knowl="./knowl/p-1095.html">Paragraph</a></span></div><div class="indexitem">Ciphertext<span class="indexknowl">, <a knowl="./knowl/p-1095.html">Paragraph</a></span></div><div class="indexitem">Circuit</div><div class="subindexitem">parallel<span class="indexknowl">, <a knowl="./knowl/p-2984.html">Paragraph</a></span></div><div class="subindexitem">series<span class="indexknowl">, <a knowl="./knowl/p-2984.html">Paragraph</a></span></div><div class="subindexitem">series-parallel<span class="indexknowl">, <a knowl="./knowl/p-2985.html">Paragraph</a></span></div><div class="indexitem">Class equation<span class="indexknowl">, <a knowl="./knowl/p-2052.html">Paragraph</a></span></div><div class="indexitem">Code</div><div class="subindexitem"><abbr class="acronym">BCH</abbr><span class="indexknowl">, <a knowl="./knowl/p-3487.html">Paragraph</a></span></div><div class="subindexitem">cyclic<span class="indexknowl">, <a knowl="./knowl/p-3464.html">Paragraph</a></span></div><div class="subindexitem">group<span class="indexknowl">, <a knowl="./knowl/p-1232.html">Paragraph</a></span></div><div class="subindexitem">linear<span class="indexknowl">, <a knowl="./knowl/p-1246.html">Paragraph</a></span></div><div class="subindexitem">minimum distance of<span class="indexknowl">, <a knowl="./knowl/p-1215.html">Paragraph</a></span></div><div class="subindexitem">polynomial<span class="indexknowl">, <a knowl="./knowl/p-3467.html">Paragraph</a></span></div><div class="indexitem">Commutative diagrams<span class="indexknowl">, <a knowl="./knowl/p-1733.html">Paragraph</a></span></div><div class="indexitem">Commutative rings<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="indexitem">Composite integer<span class="indexknowl">, <a knowl="./knowl/p-263.html">Paragraph</a></span></div><div class="indexitem">Composition series<span class="indexknowl">, <a knowl="./knowl/p-1964.html">Paragraph</a></span></div><div class="indexitem">Congruence modulo $n$<span class="indexknowl">, <a knowl="./knowl/example-sets-congruent-integers.html">Example</a></span></div><div class="indexitem">Conjugacy classes<span class="indexknowl">, <a knowl="./knowl/p-2052.html">Paragraph</a></span></div><div class="indexitem">Conjugate elements<span class="indexknowl">, <a knowl="./knowl/p-3593.html">Paragraph</a></span></div><div class="indexitem">Conjugate, complex<span class="indexknowl">, <a knowl="./knowl/p-590.html">Paragraph</a></span></div><div class="indexitem">Conjugation<span class="indexknowl">, <a knowl="./knowl/example-actions-conjugation.html">Example</a></span></div><div class="indexitem">Constructible number<span class="indexknowl">, <a knowl="./knowl/p-3303.html">Paragraph</a></span></div><div class="indexitem">Correspondence Theorem</div><div class="subindexitem">for groups<span class="indexknowl">, <a knowl="./knowl/theorem-correspondence.html">Theorem</a></span></div><div class="subindexitem">for rings<span class="indexknowl">, <a knowl="./knowl/theorem-correspondence-rings.html">Theorem</a></span></div><div class="indexitem">Coset</div><div class="subindexitem">leader<span class="indexknowl">, <a knowl="./knowl/p-1286.html">Paragraph</a></span></div><div class="subindexitem">left<span class="indexknowl">, <a knowl="./knowl/p-968.html">Paragraph</a></span></div><div class="subindexitem">representative<span class="indexknowl">, <a knowl="./knowl/p-968.html">Paragraph</a></span></div><div class="subindexitem">right<span class="indexknowl">, <a knowl="./knowl/p-968.html">Paragraph</a></span></div><div class="indexitem">Coset decoding<span class="indexknowl">, <a knowl="./knowl/p-1284.html">Paragraph</a></span></div><div class="indexitem">Cryptanalysis<span class="indexknowl">, <a knowl="./knowl/p-1100.html">Paragraph</a></span></div><div class="indexitem">Cryptosystem</div><div class="subindexitem">affine<span class="indexknowl">, <a knowl="./knowl/p-1103.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-1095.html">Paragraph</a></span></div><div class="subindexitem">monoalphabetic<span class="indexknowl">, <a knowl="./knowl/p-1102.html">Paragraph</a></span></div><div class="subindexitem">polyalphabetic<span class="indexknowl">, <a knowl="./knowl/p-1105.html">Paragraph</a></span></div><div class="subindexitem">private key<span class="indexknowl">, <a knowl="./knowl/p-1098.html">Paragraph</a></span></div><div class="subindexitem">public key<span class="indexknowl">, <a knowl="./knowl/p-1097.html">Paragraph</a></span></div><div class="subindexitem"><abbr class="acronym">RSA</abbr><span class="indexknowl">, <a knowl="./knowl/p-1110.html">Paragraph</a></span></div><div class="subindexitem">single key<span class="indexknowl">, <a knowl="./knowl/p-1098.html">Paragraph</a></span></div><div class="indexitem">Cycle</div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-793.html">Paragraph</a></span></div><div class="subindexitem">disjoint<span class="indexknowl">, <a knowl="./knowl/p-797.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-d"><div class="indexitem">De Morgan's laws</div><div class="subindexitem">for Boolean algebras<span class="indexknowl">, <a knowl="./knowl/li-665.html">Item</a></span></div><div class="subindexitem">for sets<span class="indexknowl">, <a knowl="./knowl/theorem-1.html">Theorem</a></span></div><div class="indexitem">De Morgan, Augustus<span class="indexknowl">, <a knowl="./knowl/p-2992.html">Paragraph</a></span></div><div class="indexitem">Decoding table<span class="indexknowl">, <a knowl="./knowl/p-1288.html">Paragraph</a></span></div><div class="indexitem">Deligne, Pierre<span class="indexknowl">, <a knowl="./knowl/p-3332.html">Paragraph</a></span></div><div class="indexitem">DeMoivre's Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-15.html">Theorem</a></span></div><div class="indexitem">Derivative<span class="indexknowl">, <a knowl="./knowl/p-3444.html">Paragraph</a></span></div><div class="indexitem">Determinant, Vandermonde<span class="indexknowl">, <a knowl="./knowl/p-3480.html">Paragraph</a></span></div><div class="indexitem">Dickson, L. E.<span class="indexknowl">, <a knowl="./knowl/p-1639.html">Paragraph</a></span></div><div class="indexitem">Diffie, W.<span class="indexknowl">, <a knowl="./knowl/p-1108.html">Paragraph</a></span></div><div class="indexitem">Direct product of groups</div><div class="subindexitem">external<span class="indexknowl">, <a knowl="./knowl/p-1451.html">Paragraph</a></span></div><div class="subindexitem">internal<span class="indexknowl">, <a knowl="./knowl/p-1467.html">Paragraph</a></span></div><div class="indexitem">Discriminant</div><div class="subindexitem">of the cubic equation<span class="indexknowl">, <a knowl="./knowl/exercise-614.html">Exercise</a></span></div><div class="subindexitem">of the quadratic equation<span class="indexknowl">, <a knowl="./knowl/exercise-608.html">Exercise</a></span></div><div class="indexitem">Division algorithm</div><div class="subindexitem">for integers<span class="indexknowl">, <a knowl="./knowl/theorem-integers-division_algorithm.html">Theorem</a></span></div><div class="subindexitem">for polynomials<span class="indexknowl">, <a knowl="./knowl/theorem-90.html">Theorem</a></span></div><div class="indexitem">Division ring<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="indexitem">Domain</div><div class="subindexitem">Euclidean<span class="indexknowl">, <a knowl="./knowl/p-2801.html">Paragraph</a></span></div><div class="subindexitem">principal ideal<span class="indexknowl">, <a knowl="./knowl/p-2777.html">Paragraph</a></span></div><div class="subindexitem">unique factorization<span class="indexknowl">, <a knowl="./knowl/p-2771.html">Paragraph</a></span></div><div class="indexitem">Doubling the cube<span class="indexknowl">, <a knowl="./knowl/paragraphs-20.html">Paragraphs</a></span></div></div><div class="indexletter" id="indexletter-e"><div class="indexitem">Eisenstein's Criterion<span class="indexknowl">, <a knowl="./knowl/theorem-eisenstein.html">Theorem</a></span></div><div class="indexitem">Element</div><div class="subindexitem">associate<span class="indexknowl">, <a knowl="./knowl/p-2768.html">Paragraph</a></span></div><div class="subindexitem">identity<span class="indexknowl">, <a knowl="./knowl/li-90.html">Item</a></span></div><div class="subindexitem">inverse<span class="indexknowl">, <a knowl="./knowl/li-91.html">Item</a></span></div><div class="subindexitem">irreducible<span class="indexknowl">, <a knowl="./knowl/p-2769.html">Paragraph</a></span></div><div class="subindexitem">order of<span class="indexknowl">, <a knowl="./knowl/p-569.html">Paragraph</a></span></div><div class="subindexitem">prime<span class="indexknowl">, <a knowl="./knowl/p-2769.html">Paragraph</a></span></div><div class="subindexitem">primitive<span class="indexknowl">, <a knowl="./knowl/p-3606.html">Paragraph</a></span></div><div class="subindexitem">transcendental<span class="indexknowl">, <a knowl="./knowl/p-3227.html">Paragraph</a></span></div><div class="indexitem">Equivalence class<span class="indexknowl">, <a knowl="./knowl/p-106.html">Paragraph</a></span></div><div class="indexitem">Equivalence relation<span class="indexknowl">, <a knowl="./knowl/p-98.html">Paragraph</a></span></div><div class="indexitem">Euclidean algorithm<span class="indexknowl">, <a knowl="./knowl/p-262.html">Paragraph</a></span></div><div class="indexitem">Euclidean domain<span class="indexknowl">, <a knowl="./knowl/p-2801.html">Paragraph</a></span></div><div class="indexitem">Euclidean group<span class="indexknowl">, <a knowl="./knowl/p-1849.html">Paragraph</a></span></div><div class="indexitem">Euclidean inner product<span class="indexknowl">, <a knowl="./knowl/p-1826.html">Paragraph</a></span></div><div class="indexitem">Euclidean valuation<span class="indexknowl">, <a knowl="./knowl/p-2801.html">Paragraph</a></span></div><div class="indexitem">Euler $\phi$-function<span class="indexknowl">, <a knowl="./knowl/p-1005.html">Paragraph</a></span></div><div class="indexitem">Euler, Leonhard<span class="indexknowl">, <a knowl="./knowl/p-1014.html">Paragraph</a> <a knowl="./knowl/p-3330.html">Paragraph</a></span></div><div class="indexitem">Extension</div><div class="subindexitem">algebraic<span class="indexknowl">, <a knowl="./knowl/p-3227.html">Paragraph</a></span></div><div class="subindexitem">field<span class="indexknowl">, <a knowl="./knowl/p-3216.html">Paragraph</a></span></div><div class="subindexitem">finite<span class="indexknowl">, <a knowl="./knowl/p-3247.html">Paragraph</a></span></div><div class="subindexitem">normal<span class="indexknowl">, <a knowl="./knowl/p-3621.html">Paragraph</a></span></div><div class="subindexitem">radical<span class="indexknowl">, <a knowl="./knowl/p-3653.html">Paragraph</a></span></div><div class="subindexitem">separable<span class="indexknowl">, <a knowl="./knowl/p-3442.html">Paragraph</a> <a knowl="./knowl/p-3603.html">Paragraph</a></span></div><div class="subindexitem">simple<span class="indexknowl">, <a knowl="./knowl/p-3227.html">Paragraph</a></span></div><div class="indexitem">External direct product<span class="indexknowl">, <a knowl="./knowl/p-1451.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-f"><div class="indexitem">Faltings, Gerd<span class="indexknowl">, <a knowl="./knowl/p-3332.html">Paragraph</a></span></div><div class="indexitem">Feit, W.<span class="indexknowl">, <a knowl="./knowl/p-1639.html">Paragraph</a> <a knowl="./knowl/p-2091.html">Paragraph</a></span></div><div class="indexitem">Fermat's factorizationalgorithm<span class="indexknowl">, <a knowl="./knowl/exercise-270.html">Exercise</a></span></div><div class="indexitem">Fermat's Little Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-cosets-theorem-13.html">Theorem</a></span></div><div class="indexitem">Fermat, Pierre de<span class="indexknowl">, <a knowl="./knowl/p-1011.html">Paragraph</a> <a knowl="./knowl/p-3329.html">Paragraph</a></span></div><div class="indexitem">Ferrari, Ludovico<span class="indexknowl">, <a knowl="./knowl/p-2627.html">Paragraph</a></span></div><div class="indexitem">Ferro, Scipione del<span class="indexknowl">, <a knowl="./knowl/p-2625.html">Paragraph</a></span></div><div class="indexitem">Field<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="subindexitem">algebraically closed<span class="indexknowl">, <a knowl="./knowl/p-3272.html">Paragraph</a></span></div><div class="subindexitem">base<span class="indexknowl">, <a knowl="./knowl/p-3216.html">Paragraph</a></span></div><div class="subindexitem">extension<span class="indexknowl">, <a knowl="./knowl/p-3216.html">Paragraph</a></span></div><div class="subindexitem">fixed<span class="indexknowl">, <a knowl="./knowl/p-3613.html">Paragraph</a></span></div><div class="subindexitem">Galois<span class="indexknowl">, <a knowl="./knowl/p-3451.html">Paragraph</a></span></div><div class="subindexitem">of fractions<span class="indexknowl">, <a knowl="./knowl/p-2757.html">Paragraph</a></span></div><div class="subindexitem">of quotients<span class="indexknowl">, <a knowl="./knowl/p-2757.html">Paragraph</a></span></div><div class="subindexitem">splitting<span class="indexknowl">, <a knowl="./knowl/p-3283.html">Paragraph</a></span></div><div class="indexitem">Finitely generated group<span class="indexknowl">, <a knowl="./knowl/p-1920.html">Paragraph</a></span></div><div class="indexitem">Fior, Antonio<span class="indexknowl">, <a knowl="./knowl/p-2625.html">Paragraph</a></span></div><div class="indexitem">First Isomorphism Theorem</div><div class="subindexitem">for groups<span class="indexknowl">, <a knowl="./knowl/theorem-first-isomorphism.html">Theorem</a></span></div><div class="subindexitem">for rings<span class="indexknowl">, <a knowl="./knowl/theorem-82.html">Theorem</a></span></div><div class="indexitem">Fixed point set<span class="indexknowl">, <a knowl="./knowl/p-2043.html">Paragraph</a></span></div><div class="indexitem">Freshman's Dream<span class="indexknowl">, <a knowl="./knowl/lemma-freshmans-dream.html">Lemma</a></span></div><div class="indexitem">Function</div><div class="subindexitem">bijective<span class="indexknowl">, <a knowl="./knowl/p-74.html">Paragraph</a></span></div><div class="subindexitem">Boolean<span class="indexknowl">, <a knowl="./knowl/p-2084.html">Paragraph</a> <a knowl="./knowl/exercise-668.html">Exercise</a></span></div><div class="subindexitem">composition of<span class="indexknowl">, <a knowl="./knowl/p-76.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-70.html">Paragraph</a></span></div><div class="subindexitem">domain of<span class="indexknowl">, <a knowl="./knowl/p-70.html">Paragraph</a></span></div><div class="subindexitem">identity<span class="indexknowl">, <a knowl="./knowl/p-90.html">Paragraph</a></span></div><div class="subindexitem">injective<span class="indexknowl">, <a knowl="./knowl/p-74.html">Paragraph</a></span></div><div class="subindexitem">invertible<span class="indexknowl">, <a knowl="./knowl/p-90.html">Paragraph</a></span></div><div class="subindexitem">one-to-one<span class="indexknowl">, <a knowl="./knowl/p-74.html">Paragraph</a></span></div><div class="subindexitem">onto<span class="indexknowl">, <a knowl="./knowl/p-74.html">Paragraph</a></span></div><div class="subindexitem">range of<span class="indexknowl">, <a knowl="./knowl/p-70.html">Paragraph</a></span></div><div class="subindexitem">surjective<span class="indexknowl">, <a knowl="./knowl/p-74.html">Paragraph</a></span></div><div class="subindexitem">switching<span class="indexknowl">, <a knowl="./knowl/p-2084.html">Paragraph</a> <a knowl="./knowl/exercise-668.html">Exercise</a></span></div><div class="indexitem">Fundamental Theorem</div><div class="subindexitem">of Algebra<span class="indexknowl">, <a knowl="./knowl/theorem-120.html">Theorem</a> <a knowl="./knowl/theorem-136.html">Theorem</a></span></div><div class="subindexitem">of Arithmetic<span class="indexknowl">, <a knowl="./knowl/theorem-fund-theorem-arithmetic.html">Theorem</a></span></div><div class="subindexitem">of Finite Abelian Groups<span class="indexknowl">, <a knowl="./knowl/theorem-finite-abelian-groups.html">Theorem</a></span></div><div class="indexitem">Fundamental Theorem of Galois Theory<span class="indexknowl">, <a knowl="./knowl/theorem-fundamental-galois.html">Theorem</a></span></div></div><div class="indexletter" id="indexletter-g"><div class="indexitem">Galois field<span class="indexknowl">, <a knowl="./knowl/p-3451.html">Paragraph</a></span></div><div class="indexitem">Galois group<span class="indexknowl">, <a knowl="./knowl/p-3588.html">Paragraph</a></span></div><div class="indexitem">Galois, Évariste<span class="indexknowl">, <a knowl="./knowl/p-419.html">Paragraph</a> <a knowl="./knowl/p-3649.html">Paragraph</a></span></div><div class="indexitem">Gauss's Lemma<span class="indexknowl">, <a knowl="./knowl/theorem-domains-gauss-lemma.html">Theorem</a></span></div><div class="indexitem">Gauss, Karl Friedrich<span class="indexknowl">, <a knowl="./knowl/p-2833.html">Paragraph</a></span></div><div class="indexitem">Gaussian integers<span class="indexknowl">, <a knowl="./knowl/example-rings-gaussian-integers.html">Example</a></span></div><div class="indexitem">Generator of a cyclic subgroup<span class="indexknowl">, <a knowl="./knowl/p-569.html">Paragraph</a></span></div><div class="indexitem">Generators for a group<span class="indexknowl">, <a knowl="./knowl/p-1920.html">Paragraph</a></span></div><div class="indexitem">Glide reflection<span class="indexknowl">, <a knowl="./knowl/p-1851.html">Paragraph</a></span></div><div class="indexitem">Gorenstein, Daniel<span class="indexknowl">, <a knowl="./knowl/p-1639.html">Paragraph</a></span></div><div class="indexitem">Greatest common divisor</div><div class="subindexitem">of two integers<span class="indexknowl">, <a knowl="./knowl/p-254.html">Paragraph</a></span></div><div class="subindexitem">of two polynomials<span class="indexknowl">, <a knowl="./knowl/p-2590.html">Paragraph</a></span></div><div class="indexitem">Greatest lower bound<span class="indexknowl">, <a knowl="./knowl/p-2908.html">Paragraph</a></span></div><div class="indexitem">Greiss, R.<span class="indexknowl">, <a knowl="./knowl/p-1639.html">Paragraph</a></span></div><div class="indexitem">Grothendieck, Alexander<span class="indexknowl">, <a knowl="./knowl/p-3332.html">Paragraph</a></span></div><div class="indexitem">Group</div><div class="subindexitem">abelian<span class="indexknowl">, <a knowl="./knowl/p-381.html">Paragraph</a></span></div><div class="subindexitem">action<span class="indexknowl">, <a knowl="./knowl/p-2029.html">Paragraph</a></span></div><div class="subindexitem">alternating<span class="indexknowl">, <a knowl="./knowl/p-816.html">Paragraph</a></span></div><div class="subindexitem">center of<span class="indexknowl">, <a knowl="./knowl/p-2052.html">Paragraph</a></span></div><div class="subindexitem">circle<span class="indexknowl">, <a knowl="./knowl/p-602.html">Paragraph</a></span></div><div class="subindexitem">commutative<span class="indexknowl">, <a knowl="./knowl/p-381.html">Paragraph</a></span></div><div class="subindexitem">cyclic<span class="indexknowl">, <a knowl="./knowl/p-569.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-377.html">Paragraph</a></span></div><div class="subindexitem">dihedral<span class="indexknowl">, <a knowl="./knowl/p-823.html">Paragraph</a></span></div><div class="subindexitem">Euclidean<span class="indexknowl">, <a knowl="./knowl/p-1849.html">Paragraph</a></span></div><div class="subindexitem">factor<span class="indexknowl">, <a knowl="./knowl/p-1610.html">Paragraph</a></span></div><div class="subindexitem">finite<span class="indexknowl">, <a knowl="./knowl/p-393.html">Paragraph</a></span></div><div class="subindexitem">finitely generated<span class="indexknowl">, <a knowl="./knowl/p-1920.html">Paragraph</a></span></div><div class="subindexitem">Galois<span class="indexknowl">, <a knowl="./knowl/p-3588.html">Paragraph</a></span></div><div class="subindexitem">general linear<span class="indexknowl">, <a knowl="./knowl/example-groups-gl2.html">Example</a> <a knowl="./knowl/p-1822.html">Paragraph</a></span></div><div class="subindexitem">generators of<span class="indexknowl">, <a knowl="./knowl/p-1920.html">Paragraph</a></span></div><div class="subindexitem">homomorphism of<span class="indexknowl">, <a knowl="./knowl/p-1706.html">Paragraph</a></span></div><div class="subindexitem">infinite<span class="indexknowl">, <a knowl="./knowl/p-393.html">Paragraph</a></span></div><div class="subindexitem">isomorphic<span class="indexknowl">, <a knowl="./knowl/p-1411.html">Paragraph</a></span></div><div class="subindexitem">isomorphism of<span class="indexknowl">, <a knowl="./knowl/p-1411.html">Paragraph</a></span></div><div class="subindexitem">nonabelian<span class="indexknowl">, <a knowl="./knowl/p-381.html">Paragraph</a></span></div><div class="subindexitem">noncommutative<span class="indexknowl">, <a knowl="./knowl/p-381.html">Paragraph</a></span></div><div class="subindexitem">of units<span class="indexknowl">, <a knowl="./knowl/example-groups-z6-mult.html">Example</a></span></div><div class="subindexitem">order of<span class="indexknowl">, <a knowl="./knowl/p-393.html">Paragraph</a></span></div><div class="subindexitem">orthogonal<span class="indexknowl">, <a knowl="./knowl/p-1824.html">Paragraph</a></span></div><div class="subindexitem">permutation<span class="indexknowl">, <a knowl="./knowl/p-787.html">Paragraph</a></span></div><div class="subindexitem">point<span class="indexknowl">, <a knowl="./knowl/p-1866.html">Paragraph</a></span></div><div class="subindexitem">quaternion<span class="indexknowl">, <a knowl="./knowl/example-groups-quaterions.html">Example</a></span></div><div class="subindexitem">quotient<span class="indexknowl">, <a knowl="./knowl/p-1610.html">Paragraph</a></span></div><div class="subindexitem">simple<span class="indexknowl">, <a knowl="./knowl/p-1621.html">Paragraph</a> <a knowl="./knowl/p-1639.html">Paragraph</a></span></div><div class="subindexitem">solvable<span class="indexknowl">, <a knowl="./knowl/p-1972.html">Paragraph</a></span></div><div class="subindexitem">space<span class="indexknowl">, <a knowl="./knowl/p-1865.html">Paragraph</a></span></div><div class="subindexitem">special linear<span class="indexknowl">, <a knowl="./knowl/example-groups-sl2.html">Example</a> <a knowl="./knowl/p-1822.html">Paragraph</a></span></div><div class="subindexitem">special orthogonal<span class="indexknowl">, <a knowl="./knowl/p-1849.html">Paragraph</a></span></div><div class="subindexitem">symmetric<span class="indexknowl">, <a knowl="./knowl/p-784.html">Paragraph</a></span></div><div class="subindexitem">symmetry<span class="indexknowl">, <a knowl="./knowl/p-1856.html">Paragraph</a></span></div><div class="indexitem">Group!$p$-group<span class="indexknowl">, <a knowl="./knowl/p-2174.html">Paragraph</a></span></div><div class="indexitem">Group$p$-group<span class="indexknowl">, <a knowl="./knowl/p-1927.html">Paragraph</a></span></div><div class="indexitem">Gödel, Kurt<span class="indexknowl">, <a knowl="./knowl/p-2993.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-h"><div class="indexitem">Hamming distance<span class="indexknowl">, <a knowl="./knowl/p-1215.html">Paragraph</a></span></div><div class="indexitem">Hamming, R.<span class="indexknowl">, <a knowl="./knowl/p-1231.html">Paragraph</a></span></div><div class="indexitem">Hellman, M.<span class="indexknowl">, <a knowl="./knowl/p-1108.html">Paragraph</a></span></div><div class="indexitem">Hilbert, David<span class="indexknowl">, <a knowl="./knowl/p-1874.html">Paragraph</a> <a knowl="./knowl/p-2399.html">Paragraph</a> <a knowl="./knowl/p-2993.html">Paragraph</a> <a knowl="./knowl/p-3331.html">Paragraph</a></span></div><div class="indexitem">Homomorphic image<span class="indexknowl">, <a knowl="./knowl/p-1706.html">Paragraph</a></span></div><div class="indexitem">Homomorphism</div><div class="subindexitem">canonical<span class="indexknowl">, <a knowl="./knowl/p-1730.html">Paragraph</a> <a knowl="./knowl/p-2378.html">Paragraph</a></span></div><div class="subindexitem">evaluation<span class="indexknowl">, <a knowl="./knowl/example-rings-continuous-function-homomorph.html">Example</a> <a knowl="./knowl/p-2576.html">Paragraph</a></span></div><div class="subindexitem">kernel of a group<span class="indexknowl">, <a knowl="./knowl/p-1723.html">Paragraph</a></span></div><div class="subindexitem">kernel of a ring<span class="indexknowl">, <a knowl="./knowl/p-2353.html">Paragraph</a></span></div><div class="subindexitem">natural<span class="indexknowl">, <a knowl="./knowl/p-1730.html">Paragraph</a> <a knowl="./knowl/p-2378.html">Paragraph</a></span></div><div class="subindexitem">of groups<span class="indexknowl">, <a knowl="./knowl/p-1706.html">Paragraph</a></span></div><div class="subindexitem">ring<span class="indexknowl">, <a knowl="./knowl/p-2352.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-i"><div class="indexitem">Ideal</div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2362.html">Paragraph</a></span></div><div class="subindexitem">maximal<span class="indexknowl">, <a knowl="./knowl/p-2385.html">Paragraph</a></span></div><div class="subindexitem">one-sided<span class="indexknowl">, <a knowl="./knowl/remark-7.html">Remark</a></span></div><div class="subindexitem">prime<span class="indexknowl">, <a knowl="./knowl/p-2390.html">Paragraph</a></span></div><div class="subindexitem">principal<span class="indexknowl">, <a knowl="./knowl/p-2366.html">Paragraph</a></span></div><div class="subindexitem">trivial<span class="indexknowl">, <a knowl="./knowl/example-rings-trivial-ideal.html">Example</a></span></div><div class="subindexitem">two-sided<span class="indexknowl">, <a knowl="./knowl/remark-7.html">Remark</a></span></div><div class="indexitem">Indeterminate<span class="indexknowl">, <a knowl="./knowl/p-2564.html">Paragraph</a></span></div><div class="indexitem">Index of a subgroup<span class="indexknowl">, <a knowl="./knowl/p-983.html">Paragraph</a></span></div><div class="indexitem">Induction</div><div class="subindexitem">first principle of<span class="indexknowl">, <a knowl="./knowl/principle-integers-first-pmi.html">Principle</a></span></div><div class="subindexitem">second principle of<span class="indexknowl">, <a knowl="./knowl/principle-integers-second-pmi.html">Principle</a></span></div><div class="indexitem">Infimum<span class="indexknowl">, <a knowl="./knowl/p-2908.html">Paragraph</a></span></div><div class="indexitem">Inner product<span class="indexknowl">, <a knowl="./knowl/p-1240.html">Paragraph</a></span></div><div class="indexitem">Integral domain<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="indexitem">Internal direct product<span class="indexknowl">, <a knowl="./knowl/p-1467.html">Paragraph</a></span></div><div class="indexitem">International standard book number<span class="indexknowl">, <a knowl="./knowl/exercise-128.html">Exercise</a></span></div><div class="indexitem">Irreducible element<span class="indexknowl">, <a knowl="./knowl/p-2769.html">Paragraph</a></span></div><div class="indexitem">Irreducible polynomial<span class="indexknowl">, <a knowl="./knowl/p-2596.html">Paragraph</a></span></div><div class="indexitem">Isometry<span class="indexknowl">, <a knowl="./knowl/p-1850.html">Paragraph</a></span></div><div class="indexitem">Isomorphism</div><div class="subindexitem">of Boolean algebras<span class="indexknowl">, <a knowl="./knowl/p-2958.html">Paragraph</a></span></div><div class="subindexitem">of groups<span class="indexknowl">, <a knowl="./knowl/p-1411.html">Paragraph</a></span></div><div class="subindexitem">ring<span class="indexknowl">, <a knowl="./knowl/p-2352.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-j"><div class="indexitem">Join<span class="indexknowl">, <a knowl="./knowl/p-2913.html">Paragraph</a></span></div><div class="indexitem">Jordan, C.<span class="indexknowl">, <a knowl="./knowl/p-1639.html">Paragraph</a></span></div><div class="indexitem">Jordan-Hölder Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-66.html">Theorem</a></span></div></div><div class="indexletter" id="indexletter-k"><div class="indexitem">Kernel</div><div class="subindexitem">of a group homomorphism<span class="indexknowl">, <a knowl="./knowl/p-1723.html">Paragraph</a></span></div><div class="subindexitem">of a ring homomorphism<span class="indexknowl">, <a knowl="./knowl/p-2353.html">Paragraph</a></span></div><div class="indexitem">Key</div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-1096.html">Paragraph</a></span></div><div class="subindexitem">private<span class="indexknowl">, <a knowl="./knowl/p-1098.html">Paragraph</a></span></div><div class="subindexitem">public<span class="indexknowl">, <a knowl="./knowl/p-1097.html">Paragraph</a></span></div><div class="subindexitem">single<span class="indexknowl">, <a knowl="./knowl/p-1098.html">Paragraph</a></span></div><div class="indexitem">Klein, Felix<span class="indexknowl">, <a knowl="./knowl/p-420.html">Paragraph</a> <a knowl="./knowl/p-1810.html">Paragraph</a> <a knowl="./knowl/p-2399.html">Paragraph</a></span></div><div class="indexitem">Kronecker delta<span class="indexknowl">, <a knowl="./knowl/lemma-parity-check.html">Lemma</a> <a knowl="./knowl/p-1834.html">Paragraph</a></span></div><div class="indexitem">Kronecker, Leopold<span class="indexknowl">, <a knowl="./knowl/p-3330.html">Paragraph</a></span></div><div class="indexitem">Kummer, Ernst<span class="indexknowl">, <a knowl="./knowl/p-3330.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-l"><div class="indexitem">Lagrange's Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-lagrange.html">Theorem</a></span></div><div class="indexitem">Lagrange, Joseph-Louis<span class="indexknowl">, <a knowl="./knowl/p-419.html">Paragraph</a> <a knowl="./knowl/p-822.html">Paragraph</a> <a knowl="./knowl/p-1014.html">Paragraph</a></span></div><div class="indexitem">Laplace, Pierre-Simon<span class="indexknowl">, <a knowl="./knowl/p-822.html">Paragraph</a></span></div><div class="indexitem">Lattice</div><div class="subindexitem">completed<span class="indexknowl">, <a knowl="./knowl/p-2934.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2913.html">Paragraph</a></span></div><div class="subindexitem">distributive<span class="indexknowl">, <a knowl="./knowl/p-2935.html">Paragraph</a></span></div><div class="indexitem">Lattice of points<span class="indexknowl">, <a knowl="./knowl/p-1863.html">Paragraph</a></span></div><div class="indexitem">Lattices, Principle of Duality for<span class="indexknowl">, <a knowl="./knowl/axiom-1.html">Axiom</a></span></div><div class="indexitem">Least upper bound<span class="indexknowl">, <a knowl="./knowl/p-2908.html">Paragraph</a></span></div><div class="indexitem">Left regular representation<span class="indexknowl">, <a knowl="./knowl/p-1443.html">Paragraph</a></span></div><div class="indexitem">Lie, Sophus<span class="indexknowl">, <a knowl="./knowl/p-420.html">Paragraph</a> <a knowl="./knowl/p-2197.html">Paragraph</a></span></div><div class="indexitem">Linear combination<span class="indexknowl">, <a knowl="./knowl/p-3106.html">Paragraph</a></span></div><div class="indexitem">Linear dependence<span class="indexknowl">, <a knowl="./knowl/p-3109.html">Paragraph</a></span></div><div class="indexitem">Linear independence<span class="indexknowl">, <a knowl="./knowl/p-3109.html">Paragraph</a></span></div><div class="indexitem">Linear map<span class="indexknowl">, <a knowl="./knowl/p-1811.html">Paragraph</a></span></div><div class="indexitem">Linear transformation</div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/example-sets-linear-map.html">Example</a> <a knowl="./knowl/p-1811.html">Paragraph</a></span></div><div class="indexitem">Lower bound<span class="indexknowl">, <a knowl="./knowl/p-2908.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-m"><div class="indexitem">Mapping<span class="indexknowl">, <em class="see">see</em>Function</span></div><div class="indexitem">Matrix</div><div class="subindexitem">distance-preserving<span class="indexknowl">, <a knowl="./knowl/p-1835.html">Paragraph</a></span></div><div class="subindexitem">generator<span class="indexknowl">, <a knowl="./knowl/p-1249.html">Paragraph</a></span></div><div class="subindexitem">inner product-preserving<span class="indexknowl">, <a knowl="./knowl/p-1835.html">Paragraph</a></span></div><div class="subindexitem">invertible<span class="indexknowl">, <a knowl="./knowl/p-1814.html">Paragraph</a></span></div><div class="subindexitem">length-preserving<span class="indexknowl">, <a knowl="./knowl/p-1835.html">Paragraph</a></span></div><div class="subindexitem">nonsingular<span class="indexknowl">, <a knowl="./knowl/p-1814.html">Paragraph</a></span></div><div class="subindexitem">null space of<span class="indexknowl">, <a knowl="./knowl/p-1242.html">Paragraph</a></span></div><div class="subindexitem">orthogonal<span class="indexknowl">, <a knowl="./knowl/p-1824.html">Paragraph</a></span></div><div class="subindexitem">parity-check<span class="indexknowl">, <a knowl="./knowl/p-1249.html">Paragraph</a></span></div><div class="subindexitem">similar<span class="indexknowl">, <a knowl="./knowl/example-sets-equivalent-matrices.html">Example</a></span></div><div class="subindexitem">unimodular<span class="indexknowl">, <a knowl="./knowl/p-1864.html">Paragraph</a></span></div><div class="indexitem">Matrix, Vandermonde<span class="indexknowl">, <a knowl="./knowl/p-3480.html">Paragraph</a></span></div><div class="indexitem">Maximal ideal<span class="indexknowl">, <a knowl="./knowl/p-2385.html">Paragraph</a></span></div><div class="indexitem">Maximum-likelihood decoding<span class="indexknowl">, <a knowl="./knowl/p-1208.html">Paragraph</a></span></div><div class="indexitem">Meet<span class="indexknowl">, <a knowl="./knowl/p-2913.html">Paragraph</a></span></div><div class="indexitem">Minimal generator polynomial<span class="indexknowl">, <a knowl="./knowl/p-3475.html">Paragraph</a></span></div><div class="indexitem">Minimal polynomial<span class="indexknowl">, <a knowl="./knowl/p-3238.html">Paragraph</a></span></div><div class="indexitem">Minkowski, Hermann<span class="indexknowl">, <a knowl="./knowl/p-3331.html">Paragraph</a></span></div><div class="indexitem">Monic polynomial<span class="indexknowl">, <a knowl="./knowl/p-2564.html">Paragraph</a></span></div><div class="indexitem">Mordell-Weil conjecture<span class="indexknowl">, <a knowl="./knowl/p-3332.html">Paragraph</a></span></div><div class="indexitem">Multiplicity of a root<span class="indexknowl">, <a knowl="./knowl/p-3603.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-n"><div class="indexitem">Noether, A. Emmy<span class="indexknowl">, <a knowl="./knowl/p-2398.html">Paragraph</a></span></div><div class="indexitem">Noether, Max<span class="indexknowl">, <a knowl="./knowl/p-2398.html">Paragraph</a></span></div><div class="indexitem">Normal extension<span class="indexknowl">, <a knowl="./knowl/p-3621.html">Paragraph</a></span></div><div class="indexitem">Normal series of a group<span class="indexknowl">, <a knowl="./knowl/p-1957.html">Paragraph</a></span></div><div class="indexitem">Normal subgroup<span class="indexknowl">, <a knowl="./knowl/p-1599.html">Paragraph</a></span></div><div class="indexitem">Normalizer<span class="indexknowl">, <a knowl="./knowl/p-2186.html">Paragraph</a></span></div><div class="indexitem">Null space</div><div class="subindexitem">of a matrix<span class="indexknowl">, <a knowl="./knowl/p-1242.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-o"><div class="indexitem">Odd Order Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-76.html">Theorem</a></span></div><div class="indexitem">Orbit<span class="indexknowl">, <a knowl="./knowl/p-2041.html">Paragraph</a></span></div><div class="indexitem">Orthogonal group<span class="indexknowl">, <a knowl="./knowl/p-1824.html">Paragraph</a></span></div><div class="indexitem">Orthogonal matrix<span class="indexknowl">, <a knowl="./knowl/p-1824.html">Paragraph</a></span></div><div class="indexitem">Orthonormal set<span class="indexknowl">, <a knowl="./knowl/p-1834.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-p"><div class="indexitem">Partial order<span class="indexknowl">, <a knowl="./knowl/p-2899.html">Paragraph</a></span></div><div class="indexitem">Partially ordered set<span class="indexknowl">, <a knowl="./knowl/p-2899.html">Paragraph</a></span></div><div class="indexitem">Partitions<span class="indexknowl">, <a knowl="./knowl/p-106.html">Paragraph</a></span></div><div class="indexitem">Permutation</div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/example-sets-permutation.html">Example</a> <a knowl="./knowl/p-782.html">Paragraph</a></span></div><div class="subindexitem">even<span class="indexknowl">, <a knowl="./knowl/p-815.html">Paragraph</a></span></div><div class="subindexitem">odd<span class="indexknowl">, <a knowl="./knowl/p-815.html">Paragraph</a></span></div><div class="indexitem">Permutation group<span class="indexknowl">, <a knowl="./knowl/p-787.html">Paragraph</a></span></div><div class="indexitem">Plaintext<span class="indexknowl">, <a knowl="./knowl/p-1095.html">Paragraph</a></span></div><div class="indexitem">Polynomial</div><div class="subindexitem">code<span class="indexknowl">, <a knowl="./knowl/p-3467.html">Paragraph</a></span></div><div class="subindexitem">content of<span class="indexknowl">, <a knowl="./knowl/p-2813.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2564.html">Paragraph</a></span></div><div class="subindexitem">degree of<span class="indexknowl">, <a knowl="./knowl/p-2564.html">Paragraph</a></span></div><div class="subindexitem">error<span class="indexknowl">, <a knowl="./knowl/p-3549.html">Paragraph</a></span></div><div class="subindexitem">error-locator<span class="indexknowl">, <a knowl="./knowl/exercise-760.html">Exercise</a></span></div><div class="subindexitem">greatest common divisor of<span class="indexknowl">, <a knowl="./knowl/p-2590.html">Paragraph</a></span></div><div class="subindexitem">irreducible<span class="indexknowl">, <a knowl="./knowl/p-2596.html">Paragraph</a></span></div><div class="subindexitem">leading coefficient of<span class="indexknowl">, <a knowl="./knowl/p-2564.html">Paragraph</a></span></div><div class="subindexitem">minimal<span class="indexknowl">, <a knowl="./knowl/p-3238.html">Paragraph</a></span></div><div class="subindexitem">minimal generator<span class="indexknowl">, <a knowl="./knowl/p-3475.html">Paragraph</a></span></div><div class="subindexitem">monic<span class="indexknowl">, <a knowl="./knowl/p-2564.html">Paragraph</a></span></div><div class="subindexitem">primitive<span class="indexknowl">, <a knowl="./knowl/p-2813.html">Paragraph</a></span></div><div class="subindexitem">root of<span class="indexknowl">, <a knowl="./knowl/p-2583.html">Paragraph</a></span></div><div class="subindexitem">separable<span class="indexknowl">, <a knowl="./knowl/p-3603.html">Paragraph</a></span></div><div class="subindexitem">zero of<span class="indexknowl">, <a knowl="./knowl/p-2583.html">Paragraph</a></span></div><div class="indexitem">Polynomial separable<span class="indexknowl">, <a knowl="./knowl/p-3442.html">Paragraph</a></span></div><div class="indexitem">Polynomial!in $n$ indeterminates<span class="indexknowl">, <a knowl="./knowl/p-2573.html">Paragraph</a></span></div><div class="indexitem">Poset</div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2899.html">Paragraph</a></span></div><div class="subindexitem">largest element in<span class="indexknowl">, <a knowl="./knowl/p-2933.html">Paragraph</a></span></div><div class="subindexitem">smallest element in<span class="indexknowl">, <a knowl="./knowl/p-2933.html">Paragraph</a></span></div><div class="indexitem">Power set<span class="indexknowl">, <a knowl="./knowl/example-boolean-power-set-subset.html">Example</a></span></div><div class="indexitem">Prime element<span class="indexknowl">, <a knowl="./knowl/p-2769.html">Paragraph</a></span></div><div class="indexitem">Prime ideal<span class="indexknowl">, <a knowl="./knowl/p-2390.html">Paragraph</a></span></div><div class="indexitem">Prime integer<span class="indexknowl">, <a knowl="./knowl/p-263.html">Paragraph</a></span></div><div class="indexitem">Primitive $n$th root of unity<span class="indexknowl">, <a knowl="./knowl/p-607.html">Paragraph</a> <a knowl="./knowl/p-3654.html">Paragraph</a></span></div><div class="indexitem">Primitive element<span class="indexknowl">, <a knowl="./knowl/p-3606.html">Paragraph</a></span></div><div class="indexitem">Primitive Element Theorem<span class="indexknowl">, <a knowl="./knowl/theorem-primitive-element.html">Theorem</a></span></div><div class="indexitem">Primitive polynomial<span class="indexknowl">, <a knowl="./knowl/p-2813.html">Paragraph</a></span></div><div class="indexitem">Principal ideal<span class="indexknowl">, <a knowl="./knowl/p-2366.html">Paragraph</a></span></div><div class="indexitem">Principal ideal domain (<abbr class="acronym">PID</abbr>)<span class="indexknowl">, <a knowl="./knowl/p-2777.html">Paragraph</a></span></div><div class="indexitem">Principal series<span class="indexknowl">, <a knowl="./knowl/p-1964.html">Paragraph</a></span></div><div class="indexitem">Pseudoprime<span class="indexknowl">, <a knowl="./knowl/exercise-271.html">Exercise</a></span></div></div><div class="indexletter" id="indexletter-q"><div class="indexitem">Quaternions<span class="indexknowl">, <a knowl="./knowl/example-groups-quaterions.html">Example</a> <a knowl="./knowl/example-rings-noncommutative.html">Example</a></span></div></div><div class="indexletter" id="indexletter-r"><div class="indexitem">Resolvent cubic equation<span class="indexknowl">, <a knowl="./knowl/exercise-619.html">Exercise</a></span></div><div class="indexitem">Rigid motion<span class="indexknowl">, <a knowl="./knowl/p-372.html">Paragraph</a> <a knowl="./knowl/p-1850.html">Paragraph</a></span></div><div class="indexitem">Ring</div><div class="subindexitem">characteristic of<span class="indexknowl">, <a knowl="./knowl/p-2346.html">Paragraph</a></span></div><div class="subindexitem">commutative<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2306.html">Paragraph</a></span></div><div class="subindexitem">division<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="subindexitem">factor<span class="indexknowl">, <a knowl="./knowl/p-2375.html">Paragraph</a></span></div><div class="subindexitem">homomorphism<span class="indexknowl">, <a knowl="./knowl/p-2352.html">Paragraph</a></span></div><div class="subindexitem">isomorphism<span class="indexknowl">, <a knowl="./knowl/p-2352.html">Paragraph</a></span></div><div class="subindexitem">Noetherian<span class="indexknowl">, <a knowl="./knowl/p-2793.html">Paragraph</a></span></div><div class="subindexitem">quotient<span class="indexknowl">, <a knowl="./knowl/p-2375.html">Paragraph</a></span></div><div class="subindexitem">with identity<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="subindexitem">with unity<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a></span></div><div class="indexitem">Rivest, R.<span class="indexknowl">, <a knowl="./knowl/p-1109.html">Paragraph</a></span></div><div class="indexitem"><abbr class="acronym">RSA</abbr> cryptosystem<span class="indexknowl">, <a knowl="./knowl/p-1110.html">Paragraph</a></span></div><div class="indexitem">Ruffini, P.<span class="indexknowl">, <a knowl="./knowl/p-3648.html">Paragraph</a></span></div><div class="indexitem">Russell, Bertrand<span class="indexknowl">, <a knowl="./knowl/p-2993.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-s"><div class="indexitem">Scalar product<span class="indexknowl">, <a knowl="./knowl/p-3083.html">Paragraph</a></span></div><div class="indexitem">Second Isomorphism Theorem</div><div class="subindexitem">for groups<span class="indexknowl">, <a knowl="./knowl/theorem-second-isomorphism.html">Theorem</a></span></div><div class="subindexitem">for rings<span class="indexknowl">, <a knowl="./knowl/theorem-83.html">Theorem</a></span></div><div class="indexitem">Shamir, A.<span class="indexknowl">, <a knowl="./knowl/p-1109.html">Paragraph</a></span></div><div class="indexitem">Shannon, C..<span class="indexknowl">, <a knowl="./knowl/p-1231.html">Paragraph</a></span></div><div class="indexitem">Sieve of Eratosthenes<span class="indexknowl">, <a knowl="./knowl/exercise-62.html">Exercise</a></span></div><div class="indexitem">Simple extension<span class="indexknowl">, <a knowl="./knowl/p-3227.html">Paragraph</a></span></div><div class="indexitem">Simple group<span class="indexknowl">, <a knowl="./knowl/p-1621.html">Paragraph</a></span></div><div class="indexitem">Simple root<span class="indexknowl">, <a knowl="./knowl/p-3603.html">Paragraph</a></span></div><div class="indexitem">Solvability by radicals<span class="indexknowl">, <a knowl="./knowl/p-3653.html">Paragraph</a></span></div><div class="indexitem">Spanning set<span class="indexknowl">, <a knowl="./knowl/p-3106.html">Paragraph</a></span></div><div class="indexitem">Splitting field<span class="indexknowl">, <a knowl="./knowl/p-3283.html">Paragraph</a></span></div><div class="indexitem">Squaring the circle is impossible<span class="indexknowl">, <a knowl="./knowl/paragraphs-21.html">Paragraphs</a></span></div><div class="indexitem">Standard decoding<span class="indexknowl">, <a knowl="./knowl/p-1284.html">Paragraph</a></span></div><div class="indexitem">Subgroup</div><div class="subindexitem">centralizer<span class="indexknowl">, <a knowl="./knowl/p-2052.html">Paragraph</a></span></div><div class="subindexitem">commutator<span class="indexknowl">, <a knowl="./knowl/p-2207.html">Paragraph</a></span></div><div class="subindexitem">cyclic<span class="indexknowl">, <a knowl="./knowl/p-569.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-421.html">Paragraph</a></span></div><div class="subindexitem">index of<span class="indexknowl">, <a knowl="./knowl/p-983.html">Paragraph</a></span></div><div class="subindexitem">isotropy<span class="indexknowl">, <a knowl="./knowl/p-2043.html">Paragraph</a></span></div><div class="subindexitem">normal<span class="indexknowl">, <a knowl="./knowl/p-1599.html">Paragraph</a></span></div><div class="subindexitem">normalizer of<span class="indexknowl">, <a knowl="./knowl/p-2186.html">Paragraph</a></span></div><div class="subindexitem">proper<span class="indexknowl">, <a knowl="./knowl/p-421.html">Paragraph</a></span></div><div class="subindexitem">stabilizer<span class="indexknowl">, <a knowl="./knowl/p-2043.html">Paragraph</a></span></div><div class="subindexitem">Sylow$p$-subgroup<span class="indexknowl">, <a knowl="./knowl/p-2185.html">Paragraph</a></span></div><div class="subindexitem">translation<span class="indexknowl">, <a knowl="./knowl/p-1866.html">Paragraph</a></span></div><div class="subindexitem">trivial<span class="indexknowl">, <a knowl="./knowl/p-421.html">Paragraph</a></span></div><div class="indexitem">Subgroup!$p$-subgroup<span class="indexknowl">, <a knowl="./knowl/p-2174.html">Paragraph</a></span></div><div class="indexitem">Subnormal series of a group<span class="indexknowl">, <a knowl="./knowl/p-1957.html">Paragraph</a></span></div><div class="indexitem">Subring<span class="indexknowl">, <a knowl="./knowl/p-2327.html">Paragraph</a></span></div><div class="indexitem">Supremum<span class="indexknowl">, <a knowl="./knowl/p-2908.html">Paragraph</a></span></div><div class="indexitem">Switch</div><div class="subindexitem">closed<span class="indexknowl">, <a knowl="./knowl/p-2983.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-2983.html">Paragraph</a></span></div><div class="subindexitem">open<span class="indexknowl">, <a knowl="./knowl/p-2983.html">Paragraph</a></span></div><div class="indexitem">Switching function<span class="indexknowl">, <a knowl="./knowl/p-2084.html">Paragraph</a> <a knowl="./knowl/exercise-668.html">Exercise</a></span></div><div class="indexitem">Sylow $p$-subgroup<span class="indexknowl">, <a knowl="./knowl/p-2185.html">Paragraph</a></span></div><div class="indexitem">Sylow, Ludvig<span class="indexknowl">, <a knowl="./knowl/p-2197.html">Paragraph</a></span></div><div class="indexitem">Syndrome of a code<span class="indexknowl">, <a knowl="./knowl/p-1278.html">Paragraph</a> <a knowl="./knowl/p-3549.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-t"><div class="indexitem">Tartaglia<span class="indexknowl">, <a knowl="./knowl/p-2626.html">Paragraph</a></span></div><div class="indexitem">Third Isomorphism Theorem</div><div class="subindexitem">for groups<span class="indexknowl">, <a knowl="./knowl/theorem-third-isomorphism.html">Theorem</a></span></div><div class="subindexitem">for rings<span class="indexknowl">, <a knowl="./knowl/theorem-84.html">Theorem</a></span></div><div class="indexitem">Thompson, J.<span class="indexknowl">, <a knowl="./knowl/p-1639.html">Paragraph</a> <a knowl="./knowl/p-2091.html">Paragraph</a></span></div><div class="indexitem">Transcendental element<span class="indexknowl">, <a knowl="./knowl/p-3227.html">Paragraph</a></span></div><div class="indexitem">Transcendental number<span class="indexknowl">, <a knowl="./knowl/p-3229.html">Paragraph</a></span></div><div class="indexitem">Transposition<span class="indexknowl">, <a knowl="./knowl/p-805.html">Paragraph</a></span></div><div class="indexitem">Trisection of an angle<span class="indexknowl">, <a href="section-constructions.ipynb#fields-subsection-trisect-angle">Subsection</a></span></div></div><div class="indexletter" id="indexletter-u"><div class="indexitem">Unique factorization domain (<abbr class="acronym">UFD</abbr>)<span class="indexknowl">, <a knowl="./knowl/p-2771.html">Paragraph</a></span></div><div class="indexitem">Unit<span class="indexknowl">, <a knowl="./knowl/p-2313.html">Paragraph</a> <a knowl="./knowl/p-2768.html">Paragraph</a></span></div><div class="indexitem">Universal Product Code<span class="indexknowl">, <a knowl="./knowl/exercise-125.html">Exercise</a></span></div><div class="indexitem">Upper bound<span class="indexknowl">, <a knowl="./knowl/p-2908.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-v"><div class="indexitem">Vandermonde determinant<span class="indexknowl">, <a knowl="./knowl/p-3480.html">Paragraph</a></span></div><div class="indexitem">Vandermonde matrix<span class="indexknowl">, <a knowl="./knowl/p-3480.html">Paragraph</a></span></div><div class="indexitem">Vector space</div><div class="subindexitem">basis of<span class="indexknowl">, <a knowl="./knowl/p-3118.html">Paragraph</a></span></div><div class="subindexitem">definition of<span class="indexknowl">, <a knowl="./knowl/p-3083.html">Paragraph</a></span></div><div class="subindexitem">dimension of<span class="indexknowl">, <a knowl="./knowl/p-3124.html">Paragraph</a></span></div><div class="subindexitem">subspace of<span class="indexknowl">, <a knowl="./knowl/p-3103.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-w"><div class="indexitem">Weight of a codeword<span class="indexknowl">, <a knowl="./knowl/p-1215.html">Paragraph</a></span></div><div class="indexitem">Weil, André<span class="indexknowl">, <a knowl="./knowl/p-3332.html">Paragraph</a></span></div><div class="indexitem">Well-defined map<span class="indexknowl">, <a knowl="./knowl/p-73.html">Paragraph</a></span></div><div class="indexitem">Well-ordered set<span class="indexknowl">, <a knowl="./knowl/p-239.html">Paragraph</a></span></div><div class="indexitem">Whitehead, Alfred North<span class="indexknowl">, <a knowl="./knowl/p-2993.html">Paragraph</a></span></div></div><div class="indexletter" id="indexletter-z"><div class="indexitem">Zero</div><div class="subindexitem">multiplicity of<span class="indexknowl">, <a knowl="./knowl/p-3603.html">Paragraph</a></span></div><div class="subindexitem">of a polynomial<span class="indexknowl">, <a knowl="./knowl/p-2583.html">Paragraph</a></span></div><div class="indexitem">Zero divisor<span class="indexknowl">, <a knowl="./knowl/p-2317.html">Paragraph</a></span></div></div></div>
| github_jupyter |
```
%matplotlib inline
%config InlineBackend.figure_formats = {'png', 'retina'}
data_key = pd.read_csv('key.csv')
data_key = data_key[data_key['station_nbr'] != 5]
data_weather = pd.read_csv('weather.csv')
data_weather = data_weather[data_weather['station_nbr'] != 5] ## Station 5번 제거한 나머지
data_train = pd.read_csv('train.csv')
df = pd.merge(data_weather, data_key)
station_nbr = df['station_nbr']
df.drop('station_nbr', axis=1, inplace=True)
df['station_nbr'] = station_nbr
df = pd.merge(df, data_train)
# Station 5번을 뺀 나머지 Merge 완성
# 'M'과 '-'을 np.nan으로 값을 변경하기 전에, ' T'값을 먼저 snowfall=0.05, preciptotal = 0.005로 변경하자
df['snowfall'][df['snowfall'] == ' T'] = 0.05
df['preciptotal'][df['preciptotal'] == ' T'] = 0.005
df['snowfall'][df['snowfall'] == ' T'], df['preciptotal'][df['preciptotal'] == ' T']
# T 값 변경 완료. 이제, 19개 Station 별로 정리하기 (5번 Station 생략)
df['snowfall'][df['snowfall'] == ' T'] = 0.05
df['preciptotal'][df['preciptotal'] == ' T'] = 0.005
# T 값 변경 완료. 이제, 19개 Station 별로 정리하기 (5번 Station 생략)
df_s_1 = df[df['station_nbr'] == 1]; df_s_8 = df[df['station_nbr'] == 8]; df_s_15 = df[df['station_nbr'] == 15]
df_s_2 = df[df['station_nbr'] == 2]; df_s_9 = df[df['station_nbr'] == 9]; df_s_16 = df[df['station_nbr'] == 16]
df_s_3 = df[df['station_nbr'] == 3]; df_s_10 = df[df['station_nbr'] == 10]; df_s_17 = df[df['station_nbr'] == 17]
df_s_4 = df[df['station_nbr'] == 4]; df_s_11 = df[df['station_nbr'] == 11]; df_s_18 = df[df['station_nbr'] == 18]
df_s_5 = df[df['station_nbr'] == 5]; df_s_12 = df[df['station_nbr'] == 12]; df_s_19 = df[df['station_nbr'] == 19]
df_s_6 = df[df['station_nbr'] == 6]; df_s_13 = df[df['station_nbr'] == 13]; df_s_20 = df[df['station_nbr'] == 20]
df_s_7 = df[df['station_nbr'] == 7]; df_s_14 = df[df['station_nbr'] == 14]
# Each Station resultspeed 의 M값을 np.nan으로 변경
df_s_1_resultspeed = df_s_1['resultspeed'].copy(); df_s_1_resultspeed = pd.to_numeric(df_s_1_resultspeed, errors = 'coerce')
df_s_2_resultspeed = df_s_2['resultspeed'].copy(); df_s_2_resultspeed = pd.to_numeric(df_s_2_resultspeed, errors = 'coerce')
df_s_3_resultspeed = df_s_3['resultspeed'].copy(); df_s_3_resultspeed = pd.to_numeric(df_s_3_resultspeed, errors = 'coerce')
df_s_4_resultspeed = df_s_4['resultspeed'].copy(); df_s_4_resultspeed = pd.to_numeric(df_s_4_resultspeed, errors = 'coerce')
df_s_5_resultspeed = df_s_5['resultspeed'].copy(); df_s_5_resultspeed = pd.to_numeric(df_s_5_resultspeed, errors = 'coerce')
df_s_6_resultspeed = df_s_6['resultspeed'].copy(); df_s_6_resultspeed = pd.to_numeric(df_s_6_resultspeed, errors = 'coerce')
df_s_7_resultspeed = df_s_7['resultspeed'].copy(); df_s_7_resultspeed = pd.to_numeric(df_s_7_resultspeed, errors = 'coerce')
df_s_8_resultspeed = df_s_8['resultspeed'].copy(); df_s_8_resultspeed = pd.to_numeric(df_s_8_resultspeed, errors = 'coerce')
df_s_9_resultspeed = df_s_9['resultspeed'].copy(); df_s_9_resultspeed = pd.to_numeric(df_s_9_resultspeed, errors = 'coerce')
df_s_10_resultspeed = df_s_10['resultspeed'].copy(); df_s_10_resultspeed = pd.to_numeric(df_s_10_resultspeed, errors = 'coerce')
df_s_11_resultspeed = df_s_11['resultspeed'].copy(); df_s_11_resultspeed = pd.to_numeric(df_s_11_resultspeed, errors = 'coerce')
df_s_12_resultspeed = df_s_12['resultspeed'].copy(); df_s_12_resultspeed = pd.to_numeric(df_s_12_resultspeed, errors = 'coerce')
df_s_13_resultspeed = df_s_13['resultspeed'].copy(); df_s_13_resultspeed = pd.to_numeric(df_s_13_resultspeed, errors = 'coerce')
df_s_14_resultspeed = df_s_14['resultspeed'].copy(); df_s_14_resultspeed = pd.to_numeric(df_s_14_resultspeed, errors = 'coerce')
df_s_15_resultspeed = df_s_15['resultspeed'].copy(); df_s_15_resultspeed = pd.to_numeric(df_s_15_resultspeed, errors = 'coerce')
df_s_16_resultspeed = df_s_16['resultspeed'].copy(); df_s_16_resultspeed = pd.to_numeric(df_s_16_resultspeed, errors = 'coerce')
df_s_17_resultspeed = df_s_17['resultspeed'].copy(); df_s_17_resultspeed = pd.to_numeric(df_s_17_resultspeed, errors = 'coerce')
df_s_18_resultspeed = df_s_18['resultspeed'].copy(); df_s_18_resultspeed = pd.to_numeric(df_s_18_resultspeed, errors = 'coerce')
df_s_19_resultspeed = df_s_19['resultspeed'].copy(); df_s_19_resultspeed = pd.to_numeric(df_s_19_resultspeed, errors = 'coerce')
df_s_20_resultspeed = df_s_20['resultspeed'].copy(); df_s_20_resultspeed = pd.to_numeric(df_s_20_resultspeed, errors = 'coerce')
# 각 각의 Station의 Nan 값에, 위에서 구한 각각 station의 평균 값을 넣어서 NaN 값 뺏을 때와 비교할 것.
df_s_1_resultspeed_with_mean = df_s_1_resultspeed.copy(); df_s_1_resultspeed_with_mean[df_s_1_resultspeed_with_mean.isnull()] = df_s_1_resultspeed.mean()
df_s_2_resultspeed_with_mean = df_s_2_resultspeed.copy(); df_s_2_resultspeed_with_mean[df_s_2_resultspeed_with_mean.isnull()] = df_s_2_resultspeed.mean()
df_s_3_resultspeed_with_mean = df_s_3_resultspeed.copy(); df_s_3_resultspeed_with_mean[df_s_3_resultspeed_with_mean.isnull()] = df_s_3_resultspeed.mean()
df_s_4_resultspeed_with_mean = df_s_4_resultspeed.copy(); df_s_4_resultspeed_with_mean[df_s_4_resultspeed_with_mean.isnull()] = df_s_4_resultspeed.mean()
df_s_5_resultspeed_with_mean = df_s_5_resultspeed.copy(); df_s_5_resultspeed_with_mean[df_s_5_resultspeed_with_mean.isnull()] = df_s_5_resultspeed.mean()
df_s_6_resultspeed_with_mean = df_s_6_resultspeed.copy(); df_s_6_resultspeed_with_mean[df_s_6_resultspeed_with_mean.isnull()] = df_s_6_resultspeed.mean()
df_s_7_resultspeed_with_mean = df_s_7_resultspeed.copy(); df_s_7_resultspeed_with_mean[df_s_7_resultspeed_with_mean.isnull()] = df_s_7_resultspeed.mean()
df_s_8_resultspeed_with_mean = df_s_8_resultspeed.copy(); df_s_8_resultspeed_with_mean[df_s_8_resultspeed_with_mean.isnull()] = df_s_8_resultspeed.mean()
df_s_9_resultspeed_with_mean = df_s_9_resultspeed.copy(); df_s_9_resultspeed_with_mean[df_s_9_resultspeed_with_mean.isnull()] = df_s_9_resultspeed.mean()
df_s_10_resultspeed_with_mean = df_s_10_resultspeed.copy(); df_s_10_resultspeed_with_mean[df_s_10_resultspeed_with_mean.isnull()] = df_s_10_resultspeed.mean()
df_s_11_resultspeed_with_mean = df_s_11_resultspeed.copy(); df_s_11_resultspeed_with_mean[df_s_11_resultspeed_with_mean.isnull()] = df_s_11_resultspeed.mean()
df_s_12_resultspeed_with_mean = df_s_12_resultspeed.copy(); df_s_12_resultspeed_with_mean[df_s_12_resultspeed_with_mean.isnull()] = df_s_12_resultspeed.mean()
df_s_13_resultspeed_with_mean = df_s_13_resultspeed.copy(); df_s_13_resultspeed_with_mean[df_s_13_resultspeed_with_mean.isnull()] = df_s_13_resultspeed.mean()
df_s_14_resultspeed_with_mean = df_s_14_resultspeed.copy(); df_s_14_resultspeed_with_mean[df_s_14_resultspeed_with_mean.isnull()] = df_s_14_resultspeed.mean()
df_s_15_resultspeed_with_mean = df_s_15_resultspeed.copy(); df_s_15_resultspeed_with_mean[df_s_15_resultspeed_with_mean.isnull()] = df_s_15_resultspeed.mean()
df_s_16_resultspeed_with_mean = df_s_16_resultspeed.copy(); df_s_16_resultspeed_with_mean[df_s_16_resultspeed_with_mean.isnull()] = df_s_16_resultspeed.mean()
df_s_17_resultspeed_with_mean = df_s_17_resultspeed.copy(); df_s_17_resultspeed_with_mean[df_s_17_resultspeed_with_mean.isnull()] = df_s_17_resultspeed.mean()
df_s_18_resultspeed_with_mean = df_s_18_resultspeed.copy(); df_s_18_resultspeed_with_mean[df_s_18_resultspeed_with_mean.isnull()] = df_s_18_resultspeed.mean()
df_s_19_resultspeed_with_mean = df_s_19_resultspeed.copy(); df_s_19_resultspeed_with_mean[df_s_19_resultspeed_with_mean.isnull()] = df_s_19_resultspeed.mean()
df_s_20_resultspeed_with_mean = df_s_20_resultspeed.copy(); df_s_20_resultspeed_with_mean[df_s_20_resultspeed_with_mean.isnull()] = df_s_20_resultspeed.mean()
# 각 각의 Station 별로 resultspeed의 값이 np.nan일 때, 즉, missing value를 뺏을 때의 전체 mean 값을 나타낸다.
print('#station1_without_nan_mean:', round(df_s_1_resultspeed.mean(),4), '#station1_without_nan_std:', round(df_s_1_resultspeed.std(),4));
print('#station2_without_nan_mean:', round(df_s_2_resultspeed.mean(),4), '#station2_without_nan_std:', round(df_s_2_resultspeed.std(),4));
print('#station3_without_nan_mean:', round(df_s_3_resultspeed.mean(),4), '#station3_without_nan_std:', round(df_s_3_resultspeed.std(),4));
print('#station4_without_nan_mean:', round(df_s_4_resultspeed.mean(),4), '#station4_without_nan_std:', round(df_s_4_resultspeed.std(),4));
print('#station5_without_nan_mean:', round(df_s_5_resultspeed.mean(),4), '#station5_without_nan_std:', round(df_s_5_resultspeed.std(),4));
print('#station6_without_nan_mean:', round(df_s_6_resultspeed.mean(),4), '#station6_without_nan_std:', round(df_s_6_resultspeed.std(),4));
print('#station7_without_nan_mean:', round(df_s_7_resultspeed.mean(),4), '#station7_without_nan_std:', round(df_s_7_resultspeed.std(),4));
print('#station8_without_nan_mean:', round(df_s_8_resultspeed.mean(),4), '#station8_without_nan_std:', round(df_s_8_resultspeed.std(),4));
print('#station9_without_nan_mean:', round(df_s_9_resultspeed.mean(),4), '#station9_without_nan_std:', round(df_s_9_resultspeed.std(),4));
print('#station10_without_nan_mean:', round(df_s_10_resultspeed.mean(),4), '#station10_without_nan_std:', round(df_s_10_resultspeed.std(),4));
print('#station11_without_nan_mean:', round(df_s_11_resultspeed.mean(),4), '#station11_without_nan_std:', round(df_s_11_resultspeed.std(),4));
print('#station12_without_nan_mean:', round(df_s_12_resultspeed.mean(),4), '#station12_without_nan_std:', round(df_s_12_resultspeed.std(),4));
print('#station13_without_nan_mean:', round(df_s_13_resultspeed.mean(),4), '#station13_without_nan_std:', round(df_s_13_resultspeed.std(),4));
print('#station14_without_nan_mean:', round(df_s_14_resultspeed.mean(),4), '#station14_without_nan_std:', round(df_s_14_resultspeed.std(),4));
print('#station15_without_nan_mean:', round(df_s_15_resultspeed.mean(),4), '#station15_without_nan_std:', round(df_s_15_resultspeed.std(),4));
print('#station16_without_nan_mean:', round(df_s_16_resultspeed.mean(),4), '#station16_without_nan_std:', round(df_s_16_resultspeed.std(),4));
print('#station17_without_nan_mean:', round(df_s_17_resultspeed.mean(),4), '#station17_without_nan_std:', round(df_s_17_resultspeed.std(),4));
print('#station18_without_nan_mean:', round(df_s_18_resultspeed.mean(),4), '#station18_without_nan_std:', round(df_s_18_resultspeed.std(),4));
print('#station19_without_nan_mean:', round(df_s_19_resultspeed.mean(),4), '#station19_without_nan_std:', round(df_s_19_resultspeed.std(),4));
print('#station20_without_nan_mean:', round(df_s_20_resultspeed.mean(),4), '#station20_without_nan_std:', round(df_s_20_resultspeed.std(),4));
print('stat1_nan_as_mean:',round(df_s_1_resultspeed_with_mean.mean(),4),'#stat1_nan_as_std:', round(df_s_1_resultspeed_with_mean.std(),4));
print('stat2_nan_as_mean:',round(df_s_2_resultspeed_with_mean.mean(),4),'#stat2_nan_as_std:', round(df_s_2_resultspeed_with_mean.std(),4));
print('stat3_nan_as_mean:',round(df_s_3_resultspeed_with_mean.mean(),4),'#stat3_nan_as_std:', round(df_s_3_resultspeed_with_mean.std(),4));
print('stat4_nan_as_mean:',round(df_s_4_resultspeed_with_mean.mean(),4),'#stat4_nan_as_std:', round(df_s_4_resultspeed_with_mean.std(),4));
print('stat5_nan_as_mean:',round(df_s_5_resultspeed_with_mean.mean(),4),'#stat5_nan_as_std:', round(df_s_5_resultspeed_with_mean.std(),4));
print('stat6_nan_as_mean:',round(df_s_6_resultspeed_with_mean.mean(),4),'#stat6_nan_as_std:', round(df_s_6_resultspeed_with_mean.std(),4));
print('stat7_nan_as_mean:',round(df_s_7_resultspeed_with_mean.mean(),4),'#stat7_nan_as_std:', round(df_s_7_resultspeed_with_mean.std(),4));
print('stat8_nan_as_mean:',round(df_s_8_resultspeed_with_mean.mean(),4),'#stat8_nan_as_std:', round(df_s_8_resultspeed_with_mean.std(),4));
print('stat9_nan_as_mean:',round(df_s_9_resultspeed_with_mean.mean(),4),'#stat9_nan_as_std:', round(df_s_9_resultspeed_with_mean.std(),4));
print('stat10_nan_as_mean:',round(df_s_10_resultspeed_with_mean.mean(),4),'#stat10_nan_as_std:', round(df_s_10_resultspeed_with_mean.std(),4));
print('stat11_nan_as_mean:',round(df_s_11_resultspeed_with_mean.mean(),4),'#stat11_nan_as_std:', round(df_s_11_resultspeed_with_mean.std(),4));
print('stat12_nan_as_mean:',round(df_s_12_resultspeed_with_mean.mean(),4),'#stat12_nan_as_std:', round(df_s_12_resultspeed_with_mean.std(),4));
print('stat13_nan_as_mean:',round(df_s_13_resultspeed_with_mean.mean(),4),'#stat13_nan_as_std:', round(df_s_13_resultspeed_with_mean.std(),4));
print('stat14_nan_as_mean:',round(df_s_14_resultspeed_with_mean.mean(),4),'#stat14_nan_as_std:', round(df_s_14_resultspeed_with_mean.std(),4));
print('stat15_nan_as_mean:',round(df_s_15_resultspeed_with_mean.mean(),4),'#stat15_nan_as_std:', round(df_s_15_resultspeed_with_mean.std(),4));
print('stat16_nan_as_mean:',round(df_s_16_resultspeed_with_mean.mean(),4),'#stat16_nan_as_std:', round(df_s_16_resultspeed_with_mean.std(),4));
print('stat17_nan_as_mean:',round(df_s_17_resultspeed_with_mean.mean(),4),'#stat17_nan_as_std:', round(df_s_17_resultspeed_with_mean.std(),4));
print('stat18_nan_as_mean:',round(df_s_18_resultspeed_with_mean.mean(),4),'#stat18_nan_as_std:', round(df_s_18_resultspeed_with_mean.std(),4));
print('stat19_nan_as_mean:',round(df_s_19_resultspeed_with_mean.mean(),4),'#stat19_nan_as_std:', round(df_s_19_resultspeed_with_mean.std(),4));
print('stat20_nan_as_mean:',round(df_s_20_resultspeed_with_mean.mean(),4),'#stat20_nan_as_std:', round(df_s_20_resultspeed_with_mean.std(),4));
y1 = np.array([df_s_1_resultspeed.mean(), df_s_2_resultspeed.mean(), df_s_3_resultspeed.mean(), df_s_4_resultspeed.mean(), df_s_5_resultspeed.mean(),
df_s_6_resultspeed.mean(), df_s_7_resultspeed.mean(), df_s_8_resultspeed.mean(), df_s_9_resultspeed.mean(), df_s_10_resultspeed.mean(),
df_s_11_resultspeed.mean(), df_s_12_resultspeed.mean(), df_s_13_resultspeed.mean(), df_s_14_resultspeed.mean(), df_s_15_resultspeed.mean(),
df_s_16_resultspeed.mean(), df_s_17_resultspeed.mean(), df_s_18_resultspeed.mean(), df_s_19_resultspeed.mean(), df_s_20_resultspeed.mean()])
y2 = np.array([df_s_1_resultspeed_with_mean.mean(), df_s_2_resultspeed_with_mean.mean(), df_s_3_resultspeed_with_mean.mean(), df_s_4_resultspeed_with_mean.mean()
,df_s_5_resultspeed_with_mean.mean(), df_s_6_resultspeed_with_mean.mean(), df_s_7_resultspeed_with_mean.mean(), df_s_8_resultspeed_with_mean.mean()
,df_s_9_resultspeed_with_mean.mean(), df_s_10_resultspeed_with_mean.mean(), df_s_11_resultspeed_with_mean.mean(), df_s_12_resultspeed_with_mean.mean()
,df_s_13_resultspeed_with_mean.mean(), df_s_14_resultspeed_with_mean.mean(), df_s_15_resultspeed_with_mean.mean(), df_s_16_resultspeed_with_mean.mean()
,df_s_17_resultspeed_with_mean.mean(),df_s_18_resultspeed_with_mean.mean(),df_s_19_resultspeed_with_mean.mean(),df_s_20_resultspeed_with_mean.mean()])
plt.figure(figsize=(10,5))
plt.plot()
x = range(1, 21)
plt.xlabel('Station Number')
plt.ylabel('resultspeed')
plt.bar(x, y1, color='y')
plt.bar(x, y2, color='r')
plt.show()
#### resultspeed 에서는
# 이 Graph가 나타내는 바는, y1과 y2가 차이가 거의 없다는 것. 그래프에는 r color와 y color이 있는데, 하나만 보인다는 것은
# 우리가 그래프에서 눈으로 보이지 않는 정도의 차이가 나온다는 것이다, 즉 차이가 극히 작다고 판단 할 수 있다.
# Missing Values를 np.nan으로 바꾸고, 거기서 Mean 값을 구하여 np.nan에 대입하여도,
# 따라서, 여기서는 Missing Value에 Mean값을 적용하여 회귀 분석에 적용 할 수있다고 판단된다.
plt.figure(figsize=(12,6))
xticks = range(1,21)
plt.plot(xticks, y1, 'ro-', label='stations_without_nan_mean')
plt.plot(xticks, y2, 'bd:', label='stations_nan_as_mean')
plt.legend(loc=0)
plt.show()
y3 = np.array([df_s_1_resultspeed.std(), df_s_2_resultspeed.std(), df_s_3_resultspeed.std(), df_s_4_resultspeed.std(), df_s_5_resultspeed.std(),
df_s_6_resultspeed.std(), df_s_7_resultspeed.std(), df_s_8_resultspeed.std(), df_s_9_resultspeed.std(), df_s_10_resultspeed.std(),
df_s_11_resultspeed.std(), df_s_12_resultspeed.std(), df_s_13_resultspeed.std(), df_s_14_resultspeed.std(), df_s_15_resultspeed.std(),
df_s_16_resultspeed.std(), df_s_17_resultspeed.std(), df_s_18_resultspeed.std(), df_s_19_resultspeed.std(), df_s_20_resultspeed.std()])
y4 = np.array([df_s_1_resultspeed_with_mean.std(), df_s_2_resultspeed_with_mean.std(), df_s_3_resultspeed_with_mean.std(), df_s_4_resultspeed_with_mean.std()
,df_s_5_resultspeed_with_mean.std(), df_s_6_resultspeed_with_mean.std(), df_s_7_resultspeed_with_mean.std(), df_s_8_resultspeed_with_mean.std()
,df_s_9_resultspeed_with_mean.std(), df_s_10_resultspeed_with_mean.std(), df_s_11_resultspeed_with_mean.std(), df_s_12_resultspeed_with_mean.std()
,df_s_13_resultspeed_with_mean.std(), df_s_14_resultspeed_with_mean.std(), df_s_15_resultspeed_with_mean.std(), df_s_16_resultspeed_with_mean.std()
,df_s_17_resultspeed_with_mean.std(),df_s_18_resultspeed_with_mean.std(),df_s_19_resultspeed_with_mean.std(),df_s_20_resultspeed_with_mean.std()])
plt.figure(figsize=(10,5))
plt.plot()
x = range(1, 21)
plt.xlabel('Station Number')
plt.ylabel('resultspeed')
plt.bar(x, y3, color='r')
plt.bar(x, y4, color='g')
plt.show()
plt.figure(figsize=(12,6))
xticks = range(1,21)
plt.plot(xticks, y3, 'ro-', label='stations_without_nan_std')
plt.plot(xticks, y4, 'bd:', label='stations_nan_as_std')
plt.legend(loc=0)
plt.show()
```
| github_jupyter |
### CTCF Perturbation using SimpleNet
In this tutorial, we will learn how to use Ledidi to design genomic edits that affect CTCF binding in K562. For computational efficiency, we will use a very small neural network, named SimpleNet, in as the pre-trained oracle model that Ledidi relies on. SimpleNet can be run on a CPU in a reasonable amount of time, making it ideal for training purposes. However, this same simplicity means that SimpleNet does not exhibit state-of-the-art performance, and so the edits designed by Ledidi may not be as compact or targetted as more complex models.
```
%pylab inline
import pyBigWig
import seaborn; seaborn.set_style('whitegrid')
```
### Visually validating SimpleNet-CTCF
Before we get to engineering sequences using Ledidi, lets get familiar with SimpleNet and make sure that the predictions that it is making are reasonable. We can start by loading the model up using the keras module in TensorFlow.
```
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import plot_model
model = load_model("SimpleNet-CTCF")
plot_model(model)
```
To validate the predictions of SimpleNet, we need to load up some nucleotide sequence to use as input to the model, as well as the experimental signal to compare the predictions against. The nucleotide sequence comes as a one-hot encoding, where each row is a position in the sequence, each column is a nucleotide, and a value of 1 means that position contains that nucleotide. The experimental signal is the fold-change, which is the observed read counts divided by the read counts from a control experiment. As a technical note, pyBigWig will return a signal array that has NaNs where no reads map to a position. We replace these NaNs with 0s.
Note: you will need to download the ENCFF050CCI.bigwig file from the ENCODE portal if you do not already have it.
```
from ledidi import fasta_to_ohe
ohe_chrom = fasta_to_ohe("hg38.fa", include_chroms=['chr1'])['chr1']
signal = pyBigWig.open("ENCFF050CCI.bigWig").values("chr1", 0, -1, numpy=True)
signal = numpy.nan_to_num(signal)
```
Now, let's visualize some of the predictions of SimpleNet against the actual experimental signal at regions with high experimental signal.
```
mids = signal.argsort()
x = numpy.arange(0, 1000, 25)
plt.figure(figsize=(12, 3))
for i, mid in enumerate(mids[::-100][:5]):
start, end = mid - 500, mid + 500
plt.subplot(2, 5, i+1)
plt.fill_between(x, 0, signal[start:end].reshape(40, 25).mean(axis=1), color='c')
plt.xlim(0, 1000)
plt.ylabel("Experimental Signal" if i == 0 else None, fontsize=10)
y_pred = model.predict(numpy.array(ohe_chrom[start:end, :4], ndmin=3))[0, :]
plt.subplot(2, 5, 5+i+1)
plt.fill_between(x, 0, y_pred, color='g')
plt.xlim(0, 1000)
plt.ylabel("Predicted Signal" if i == 0 else None, fontsize=10)
plt.tight_layout()
plt.show()
```
It looks like SimpleNet is able to predict peaks at each of these locations. A weakness seems to be that the predicted signal is significantly lower than the experimental signal (fold change).
Now let's look at some randomly selected positions that do not exhibit high experimental signal, to ensure that our model isn't simply making predictions of peaks everywhere.
```
plt.figure(figsize=(12, 3))
for i, mid in enumerate(mids[10000::100][:5]):
start, end = mid - 500, mid + 500
plt.subplot(2, 5, i+1)
plt.fill_between(x, 0, signal[start:end].reshape(40, 25).mean(axis=1), color='c')
plt.xlim(0, 1000)
plt.ylabel("Experimental Signal" if i == 0 else None, fontsize=9)
plt.xlabel("Relative Position" if i == 2 else None, fontsize=12)
plt.ylim(0, 5)
y_pred = model.predict(numpy.array(ohe_chrom[start:end, :4], ndmin=3))[0, :]
plt.subplot(2, 5, 5+i+1)
plt.fill_between(x, 0, y_pred, color='g')
plt.xlim(0, 1000)
plt.ylabel("Predicted Signal" if i == 0 else None, fontsize=9)
plt.xlabel("Relative Position" if i == 2 else None, fontsize=12)
plt.ylim(0, 5)
plt.tight_layout()
plt.show()
```
Looks good!
### Using Ledidi
Using Ledidi in Python is fairly straightforward. The steps are as follows:
1. Pass your TensorFlow model into the built-in wrapper
2. Pass this wrapper object into the Ledidi object
3. Call the `fit_transform` method on the Ledidi object using the original sequence and desired output
Let's take these steps in order and pretend that we're starting from scratch, even though we've already loaded the SimpleNet model.
Step 1. Load the model and wrap it.
```
from tensorflow.keras.models import load_model
from ledidi import TensorFlowRegressor
model = load_model("SimpleNet-CTCF")
regressor = TensorFlowRegressor(model=model)
```
Step 2. Now, we need to pass the wrapped model into Ledidi, along with the parameters we want to use for our optimization.
```
from ledidi import Ledidi
mutator = Ledidi(regressor, l=1, max_iter=250, random_state=111)
```
This object is the one that will edit sequences given the hyperparameters of the optimization, e.g. lambda and the maximum number of iterations, and the pre-trained model.
Step 3. Now, we need to get our sequence (in one-hot encoded format) and our desired output. Both need to be 3 dimensional to fit in with the style of neural network prediction. This means that the first dimension should always have a size of 1 (because you are only editing one sequence at a time).
Let's say that our desired sequence is from a FIMO-identified motif hit and our goal is to knock out binding, i.e., exhibit no signal.
```
mid = signal.argmax()
start, end = mid - 500, mid + 500
ohe_sequence = numpy.load("CTCF-seqs.npz")['arr_0'][0][65036:66036]
desired_epi = numpy.zeros((1, 40))
```
Finally, we can run the optimization process using the `fit_transform` method. If you find the output annoying, you can set `verbose=False` when you create the Ledidi object.
```
edited_sequence = mutator.fit_transform(ohe_sequence, desired_epi)
```
Great. The return from the `fit_transform` is the edited sequence in one-hot encoded format. If you want to take a look at the sequence in canonical form, you can do the following:
```
''.join(numpy.array(['A', 'C', 'G', 'T'])[edited_sequence.argmax(axis=1)])
```
Perhaps more informatively we can look at the difference in predicted signal when using the original sequence and when using the edited sequence.
```
mid = signal.argmax()
start, end = mid - 500, mid + 500
x = range(0, 1000, 25)
y_pred = model.predict(ohe_sequence.reshape(1, 1000, 4))[0, :]
y_new_pred = model.predict(edited_sequence.reshape(1, 1000, 4))[0, :]
plt.figure(figsize=(8, 5))
plt.subplot(311)
plt.title("Experimental Signal", fontsize=14)
plt.fill_between(x, 0, signal[start:end].reshape(40, 25).mean(axis=1), color='c')
plt.ylabel("Fold Change", fontsize=12)
plt.xlim(0, 1000)
plt.subplot(312)
plt.title("Predicted Signal", fontsize=14)
plt.fill_between(x, 0, y_pred, color='g')
plt.ylabel("Fold Change", fontsize=12)
plt.ylim(0, 125)
plt.xlim(0, 1000)
plt.subplot(313)
plt.title("Ledidi-perturbed Signal", fontsize=14)
plt.fill_between(x, 0, y_new_pred, color='r')
plt.ylabel("Fold Change", fontsize=12)
plt.ylim(0, 125)
plt.xlim(0, 1000)
plt.tight_layout()
plt.show()
```
Looks like we are significantly reducing the predicted CTCF binding signal.
### Evaluating Ledidi
Now that we have the edited sequence, how can we evaluate it? Well, there are several performance measures that we can use to evaluate the edited sequence. The most intuitive initial question is: does this sequence achieve the desired result? The simplest performance measure for this question is the mean-squared-error between the desired output and the predicted signal from the edited sequence, perhaps compared to the mean-squared-error when using the original sequence.
```
y_pred = model.predict(ohe_sequence.reshape(1, 1000, 4))[0, :]
y_new_pred = model.predict(edited_sequence.reshape(1, 1000, 4))[0, :]
print(((y_new_pred - desired_epi) ** 2).mean())
print(((y_pred - desired_epi) ** 2).mean())
```
An equally valid question is: how different is the edited sequence from my original sequence?
```
edits = ohe_sequence.argmax(axis=1) != edited_sequence.argmax(axis=1)
numpy.sum(edits)
```
That seems high for the CTCF motif, which is only length 19. This is due, in large part, to the SimpleNet-CTCF model being a very simple model. When using a more complicated model, Ledidi has been able to knock out CTCF activity using only a single nucleotide, and a median of ~3 nucleotides.
However, let's look more specifically at the edits being proposed.
```
for i in range(490, 510):
print(i, 'ACGT'[ohe_sequence[i].argmax()], 'ACGT'[edited_sequence[i].argmax()])
```
| github_jupyter |
# Introduction
This notebook demonstrates how to plot time series from the UKESM1 Geoengineering simulations
#### Firstly, import packages and define functions for calculations
```
'''Import packages for loading data, analysing, and plotting'''
import xarray
import matplotlib
import numpy
import cftime
%matplotlib inline
```
# Plotting time series computed by the CVDP package
The Climate Variability Diagnostics Package computes a whole bunch of time series. Some of then, such as the Nino3.4 index, need to be treated with caution. But some are really useful.
*Run only one of the following two cells depending whether your Notebook server is at UCL or on your own laptop*
```
# Specify the filenames for the 3 scenarios
filenameG6='~/geog0121_shared_space/PMIP_GeoMIP_summary_files/UKESM1_G6sulfur_1850-1900.cvdp_data.1850-2100.nc'
filename245='~/geog0121_shared_space/UKESM1_summary_files/UKESM1_ssp245_1851-1900.cvdp_data.1850-2100.nc'
filename585='~/geog0121_shared_space/UKESM1_summary_files/UKESM1_ssp585_1851-1900.cvdp_data.1850-2100.nc'
```
First, let us remind ourselves about the difference between the two standard scenarios - SSP585 and SSP245.
Here we will plot the global average temperature over land.
```
#Start with SSP245
# open the file to get the data
ssp245=xarray.open_dataset(filename245)
# Let smooth this to create a 5-year mean
glt_245=ssp245.ipcc_GLOBAL_lnd_tas
glt_245_smoothed=glt_245.rolling(time=60,center=True).mean() #take the 5-year running mean (i.e. rolling over 60 months)
# create a plot of the variable called ipcc_GLOBAL_lnd_tas
glt_245_smoothed.plot() #by default the first line is blue
#Then move onto SSP585
# open the file to get the data
ssp585=xarray.open_dataset(filename585)
glt_585=ssp585.ipcc_GLOBAL_lnd_tas
glt_585_smoothed=glt_585.rolling(time=60,center=True).mean() #take the 5-year running mean (i.e. rolling over 60 months)
# create a plot of the variable called ipcc_GLOBAL_all_tas
glt_585_smoothed.plot(color='firebrick') #by default the second line is orange
```
#### Introducing the G6sulfur scenario
So the SSP585 scenario results in the land surface warming by approaching 10oC above preindustrial, which is not pleasant. Perhaps we might want to intervene and deploy some technology to mask out the effect of the increasing carbon dioxide levels.
```
#Now lets look at the G6sulfur experiment...
# open the file to get the data
G6=xarray.open_dataset(filenameG6)
# Let's smooth this to create a 5-year mean
glt_G6=G6.ipcc_GLOBAL_lnd_tas
glt_G6_smoothed=glt_G6.rolling(time=60,center=True).mean() #take the 5-year running mean (i.e. rolling over 60 months)
# create a plot of the variable called nino34.
glt_245_smoothed.plot() #by default the first line is blue
glt_585_smoothed.plot(color='firebrick')
glt_G6_smoothed.plot(color='green') #by default the second line is orangeglt_G6_smoothed.plot(color='green')
```
So injecting sulphur into the atmosphere (a bit like artificial volcanoes) can successfully change the temperature trajectory.
#### What about precipitation though?
We can also look at other properties of the climate system, to see if it masks all the impacts of climate change though. Let us look at regional rainfall. Here will pick the South East Asian summer rainfall, which is roughly akin to the summer monsoon.
```
# Let smooth this to create a 5-year mean
sea_245=ssp245.ipcc_SEA_lnd_pr #load the monthly time series
sea_245_seasonal=sea_245.resample(time="QS-DEC").mean()
sea_sm245=sea_245_seasonal[2::4]
sea_sm245_smoothed=sea_sm245.rolling(time=10,center=True).mean() #take the 5-year running mean (i.e. rolling over 60 months)
# Let smooth this to create a 5-year mean
sea_585=ssp585.ipcc_SEA_lnd_pr #load the monthly time series
sea_585_seasonal=sea_585.resample(time="QS-DEC").mean()
sea_sm585=sea_585_seasonal[2::4]
sea_sm585_smoothed=sea_sm585.rolling(time=10,center=True).mean() #take the 5-year running mean (i.e. rolling over 60 months)
# Let smooth this to create a 5-year mean
sea_G6=G6.ipcc_SEA_lnd_pr #load the monthly time series
sea_G6_seasonal=sea_G6.resample(time="QS-DEC").mean()
sea_smG6=sea_G6_seasonal[2::4]
sea_smG6_smoothed=sea_smG6.rolling(time=10,center=True).mean() #take the 5-year running mean (i.e. rolling over 60 months)
sea_sm245_smoothed.plot() #by default the first line is blue
sea_sm585_smoothed.plot(color='firebrick')
sea_smG6_smoothed.plot(color='green') #by default the second line is orangeglt_G6_smoothed.plot(color='green')
start_date=cftime.Datetime360Day(2010,1,1,0,0,0)
end_date=cftime.Datetime360Day(2100,12,30,0,0,0)
# alter the limits of the axes
matplotlib.pyplot.axis([start_date,end_date, -1.5, 0.5]) #[xmin, xmax, ymin, ymax]; Note that the years are strings
# add a grid in the background
matplotlib.pyplot.grid(True)
#Finally save the figure to your current directory...
#plt.savefig("nino34_timeseries_plot.pdf")
```
Here we can see that the geoengineering does not ameliorate the reduction in rainfall over Indonesia. In fact, it makes it even worse.
## Machine dependent locations
```
#FOR THE GEOGRAPHY CLUSTER
'''Select file and variable names'''
#first of all specify some names
reference_period='1851-1900'
ssp_names=["ssp126", "ssp245", "ssp370","ssp585"]
directory='/data/aod/cvdp_cmip6/geog0121/UKESM1-0-LL_ssps.wrt_%s' %reference_period
# determine the filenames for the 3 scenarios
filename126='%s/UKESM1_%s_%s.cvdp_data.1850-2100.nc'%(directory,ssp_names[0],reference_period)
filename245='%s/UKESM1_%s_%s.cvdp_data.1850-2100.nc'%(directory,ssp_names[1],reference_period)
filename585='%s/UKESM1_%s_%s.cvdp_data.1850-2100.nc'%(directory,ssp_names[3],reference_period)
#FOR YOUR OWN LAPTOP
'''Select file and variable names'''
#first of all specify some names
reference_period='1851-1900'
ssp_names=["ssp126", "ssp245", "ssp370","ssp585"]
directory='Data'
# determine the filenames for the 3 scenarios
filename126='%s/UKESM1_%s_%s.cvdp_data.1850-2100.nc'%(directory,ssp_names[0],reference_period)
filename245='%s/UKESM1_%s_%s.cvdp_data.1850-2100.nc'%(directory,ssp_names[1],reference_period)
filename585='%s/UKESM1_%s_%s.cvdp_data.1850-2100.nc'%(directory,ssp_names[3],reference_period)
#Download the files if they are not already here
import os
import urllib
if not os.path.isfile(filename126):
!mkdir Data
urllib.request.urlretrieve("http://www2.geog.ucl.ac.uk/~ucfaccb/geog0121/downloads/UKESM1_ssp126_1851-1900.cvdp_data.1850-2100.nc", filename126)
if not os.path.isfile(filename245):
urllib.request.urlretrieve("http://www2.geog.ucl.ac.uk/~ucfaccb/geog0121/downloads/UKESM1_ssp245_1851-1900.cvdp_data.1850-2100.nc", filename245)
if not os.path.isfile(filename585):
urllib.request.urlretrieve("http://www2.geog.ucl.ac.uk/~ucfaccb/geog0121/downloads/UKESM1_ssp585_1851-1900.cvdp_data.1850-2100.nc", filename585)
```
| github_jupyter |
```
import pymc4 as pm
import tensorflow as tf
from fundl.datasets import make_graph_counting_dataset
from fundl.utils import pad_graph
import numpy as onp
import networkx as nx
import jax.numpy as np
from chemgraph import atom_graph
import janitor.chemistry
import pandas as pd
df = (
pd.read_csv("bace.csv")
.rename_column("mol", "structure")
.smiles2mol("structure", "mol")
.join_apply(lambda x: atom_graph(x["mol"]), "graph")
.join_apply(lambda x: len(x["graph"]), "graph_size")
)
Gs = df["graph"].tolist()
print("Generating feature matrices and adjacency matrices...")
Fs = []
As = []
for G in Gs:
Fs.append(onp.vstack([d["features"] for n, d in G.nodes(data=True)]))
As.append(onp.asarray(nx.adjacency_matrix(G).todense()))
largest_graph_size = max([len(G) for G in Gs])
print("Preparing outputs...")
# Next line is a dummy task, count number of nodes in graph.
# y = np.array([len(G) for G in Gs]).reshape(-1, 1)
# Next line is a real task.
y = df["pIC50"].values.reshape(-1, 1)
print("Padding graphs to correct size...")
for i, (F, A) in enumerate(zip(Fs, As)):
F, A = pad_graph(F, A, largest_graph_size)
Fs[i] = F
As[i] = A
Fs = onp.stack(Fs).astype(float)
As = onp.stack(As).astype(float)
print(Fs.shape)
print(As.shape)
As_tensor = tf.convert_to_tensor(As, dtype=float)
Fs_tensor = tf.convert_to_tensor(Fs, dtype=float)
from fundl.activations import relu
from jax import lax
def mpnn(w, b, A, F, nonlin=relu):
"""Follow semantics of fundl.layers.graph.mpnn"""
# F = lax.batch_matmul(A, F)
F = tf.keras.backend.batch_dot(A, F)
F = tf.matmul(F, w) + b
return nonlin(F)
def gather(F):
"""Follow semantics of fundl.layers.graph.gather"""
return np.reduce_sum(F, axis=1)
def dense(w, b, x, nonlin=relu):
"""Follow semantics of fundl.layers.dense"""
a = nonlin(tf.matmul(x, w) + b)
return a
@pm.model
def graph_neural_network():
g1w = yield pm.Normal(f"g1w", mu=0, sigma=0.1, shape=(9, 9))
g1b = yield pm.Normal(f"g1b", mu=0, sigma=0.1, shape=(9,))
g2w = yield pm.Normal(f"g2w", mu=0, sigma=0.1, shape=(9, 5))
g2b = yield pm.Normal(f"g2b", mu=0, sigma=0.1, shape=(5,))
d1w = yield pm.Normal(f"d1w", mu=0, sigma=0.1, shape=(5, 5))
d1b = yield pm.Normal(f"d1b", mu=0, sigma=0.1, shape=(5,))
d1w = yield pm.Normal(f"d2w", mu=0, sigma=0.1, shape=(5, 1))
d1b = yield pm.Normal(f"d2b", mu=0, sigma=0.1, shape=(1,))
acts1 = mpnn(g1w, g1b, As_tensor, Fs_tensor)
acts2 = mpnn(g2w, g2b, As_tensor, acts1)
out = gather(acts2)
out = dense(d1w, d1b, out)
out = dense(d2w, d2b, out)
# Prior on noise in measurement.
sd = yield pm.Exponential("sd", loc=1)
# Likelihood
like = yield pm.Normal("like", mu=out, sigma=sd, observed=y)
pm.inference.sampling.sample(graph_neural_network())
```
| github_jupyter |
```
# -*- coding: utf-8 -*-
# @author: tongzi
# @description: Learing best practices for model evaluation and hyperparameter tuning
# @created date: 2019/08/30
# @last modification: 2019/08/30
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
**In this chapter, we will learn how to:
(1) Obtain unbiased estimation of a model's performance
(2) Diagnose the common problem of maching learning algorithms
(3) Fine-tune machine learning models
(4) Evaluate predictive models using different performance metrics**
### Loading the Breast Cancer Wisconsin dataset
```
df = pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data',
header=None)
from sklearn.preprocessing import LabelEncoder
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
# malignant (M) is represented as class 1, benign (B) is represented as class 0
le.transform(['M', 'B'])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.2, random_state=1)
```
### Combining transformers and estimators in a pipeline
```
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
pipe_lr = Pipeline([('scl', StandardScaler()),
('pca', PCA(n_components=2)),
('clf', LogisticRegression(random_state=1))])
pipe_lr.fit(X_train, y_train)
print(f'Test accuracy: {pipe_lr.score(X_test, y_test):.3f}')
```

### Using k-fold cross-validation to assess model performance
```
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=10, random_state=1)
scores =[]
for k, (train_index, test_index) in enumerate(kfold.split(X_train, y_train)):
pipe_lr.fit(X_train[train_index], y_train[train_index])
score = pipe_lr.score(X_train[test_index], y_train[test_index])
scores.append(score)
print(f'Fold: {k+1}, Class dist: {np.bincount(y_train[train_index])}, Acc: {score:.3f}')
print(f'CV accuracy: {np.mean(scores):.3f} +/- {np.std(scores):.3f}')
```
Although the previous code example was useful to illustrate how k-fold
cross-validation works, scikit-learn also implements a k-fold cross-validation
scorer, which allows us to evaluate our model using stratified k-fold
cross-validation more efficiently:
```
from sklearn.model_selection import cross_val_score
scores = cross_val_score(estimator=pipe_lr, X=X_train, y=y_train, cv=10, n_jobs=1)
print(f"cross-validation accuracy scores: {scores}")
print(f'cross-validation accuracy: {np.mean(scores):.3f} +/- {np.std(scores):.3f}')
```
### Dignosing bias and variance problems with learning curves
```
from sklearn.model_selection import learning_curve
pipe_lr = Pipeline([('scl', StandardScaler()),
('clf', LogisticRegression(penalty='l2', random_state=0))])
train_sizes, train_scores, test_scores = \
learning_curve(estimator=pipe_lr, X=X_train, y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10), cv=10, n_jobs=1)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_mean, color='red', marker='o', markersize=5,
label='train accuracy')
plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std,
alpha=0.2, color='blue')
plt.plot(train_sizes, test_mean, color='green', marker='s', markersize=5,
label='validation accuracy', linestyle='--')
plt.fill_between(train_sizes, test_mean + test_std, test_mean - test_std,
alpha=0.2, color='green')
plt.grid(True)
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim([0.8, 1.0])
```
### Addressing overfitting and underfitting with validation curves
```
from sklearn.model_selection import validation_curve
param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
train_scores, test_scores = \
validation_curve(estimator=pipe_lr, X=X_train, y=y_train,
param_name='clf__C', param_range=param_range, cv=10)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='red', marker='o', markersize=5,
label='train accuracy')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std,
alpha=0.2, color='blue')
plt.plot(param_range, test_mean, color='green', marker='s', markersize=5,
label='validation accuracy', linestyle='--')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std,
alpha=0.2, color='green')
plt.grid(True)
plt.xscale('log')
plt.xlabel('Parameter C')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim([0.8, 1.0])
```
### Fine-tuning machine learning via grid search
In machine learing, we have two types of parameters: those that are learing from the training data, for example, the weights in logistic regression, and the parameters of a learning algorithm that is optimized separatly. The latter are the tuning parameters, also called hyperpatameters, of a model, for example, the **regularization** parameters in logistic regression or the **depth** parameter of a decision tree.
#### Tuning hyperparameter via grid search
```
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
pipe_svc = Pipeline([('scl', StandardScaler()),
('clf', SVC(random_state=1))])
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'clf__C': param_range,
'clf__kernel': ['linear']},
{'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']}]
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring='accuracy',
cv=10,
n_jobs=-1)
gs = gs.fit(X_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
```
We will use the independent test dataset to estimate the best selected model, which is accessed via the ***best_estimator_*** attribute of the ***GridSearchCV*** object:
```
clf = gs.best_estimator_
clf.fit(X_train, y_train)
print(f'Test accuracy: {clf.score(X_test, y_test):.3f}')
```
>Although ***grid_search*** is a powerful approach for finding the optimal set of parameters, the evaluation of all possible parameters combination is also computationally very expensive. An alternative approach to sampling different parameter combinations using scikit-learn is randomized search. Using the ***RandomizedSearchCV*** class in scikit-learn, we can draw random parameter combinations from sampling distribution with specified budget. More details and examples of its usage can be found at http://scikit-learn.org/stable/modules/grid_search.html#randomized-parameter-optimization.
### Algorithm selection with nested cross-validation
```
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring='accuracy',
cv=10,
n_jobs=-1)
# 交叉验证分数
scores = cross_val_score(gs, X, y, scoring='accuracy', cv=5)
GridSearchCV?
print(f'Cross-Validation accuracy: {np.mean(scores):.3f} +/- {np.std(scores):.3f}')
```
Use the nested cross-validation to approach to compare an SVM model to a simple decision tress classifier; for simplicity, we will only tune its ***depth*** parameter:
```
from sklearn.tree import DecisionTreeClassifier
gs = GridSearchCV(estimator=DecisionTreeClassifier(random_state=0),
param_grid=[{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]}],
scoring='accuracy',
cv=5)
scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5)
print(f'Cross-Validation accuracy: {np.mean(scores):.3f} +/- {np.std(scores):.3f}')
```
### Looking at different performance evaluation metrics
#### Reading a confusion matrix

```
from sklearn.metrics import confusion_matrix
pipe_svc.fit(X_train, y_train)
y_pred = pipe_svc.predict(X_test)
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
```
The array that was returned after executing the preceding code provides us with information about the different types of errors the classifier made on the test dataset that we can map onto the confusion matrix illustration in the previous figure using ***matplotlib's matshow***() function:
```
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat.shape[0]):
for j in range(confmat.shape[1]):
ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center')
plt.xlabel('predicted label')
plt.ylabel('actual label')
```
#### Optimizing the precision and recall of a classification model
$$ERR = \frac{FP+FN}{FP+FN+TP+TN}$$
$$ACCC = \frac{TP+TN}{FP+FN+TP+TN} = 1 - ERR$$
```
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score, recall_score
print(f'Precision: {precision_score(y_true=y_test, y_pred=y_pred):.3f}')
print(f"Recall: {recall_score(y_true=y_test, y_pred=y_pred):.3f}")
print(f'F1-score: {f1_score(y_true=y_test, y_pred=y_pred):.3f}')
```
Furthermore, we can use a different scoring metric other than accuracy in
GridSearch via the scoring parameter. A complete list of the different values that are accepted by the scoring parameter can be found at http://scikit-learn.org/ stable/modules/model_evaluation.html .
Remember that the positive class in scikit-learn is the class that is labeled as class 1. If we want to specify a different positive label, we can construct our own scorer via the make_scorer function, which we can then directly provide as an argument to the scoring parameter in GridSearchCV:
```
from sklearn.metrics import make_scorer, f1_score
scorer = make_scorer(f1_score, pos_label=0)
gs = GridSearchCV(estimator=pipe_svc, param_grid=param_grid,
scoring=scorer, cv=10)
```
#### Plotting a receiver operating characteristic
**Receiver operator characteristic (ROC)** graphs are useful tools for selecting models for classification based on their performance with respect to the false positive and true positive rates, which are computed by shifting the decision threshold of the classifier. The diagonal of an ROC graph can be interpreted as random guessing, and classification models that fall below the diagonal are considered as worse than random guessing. A perfect classifier would fall into the top-left corner of the graph with a true positive rate of 1 and a false positive rate of 0. Based on the ROC curve, we can then compute the so-called **area under the curve (AUC)** to characterize the performance of a classification model.
| github_jupyter |
# Optimization Methods
Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you'll gain skills with some more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
By the end of this notebook, you'll be able to:
* Apply optimization methods such as (Stochastic) Gradient Descent, Momentum, RMSProp and Adam
* Use random minibatches to accelerate convergence and improve optimization
Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
<img src="images/cost.jpg" style="width:650px;height:300px;">
<caption><center> <u> <b>Figure 1</b> </u>: <b>Minimizing the cost is like finding the lowest point in a hilly landscape</b><br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
**Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
Let's get started!
## Table of Contents
- [1- Packages](#1)
- [2 - Gradient Descent](#2)
- [Exercise 1 - update_parameters_with_gd](#ex-1)
- [3 - Mini-Batch Gradient Descent](#3)
- [Exercise 2 - random_mini_batches](#ex-2)
- [4 - Momentum](#4)
- [Exercise 3 - initialize_velocity](#ex-3)
- [Exercise 4 - update_parameters_with_momentum](#ex-4)
- [5 - Adam](#5)
- [Exercise 5 - initialize_adam](#ex-5)
- [Exercise 6 - update_parameters_with_adam](#ex-6)
- [6 - Model with different Optimization algorithms](#6)
- [6.1 - Mini-Batch Gradient Descent](#6-1)
- [6.2 - Mini-Batch Gradient Descent with Momentum](#6-2)
- [6.3 - Mini-Batch with Adam](#6-3)
- [6.4 - Summary](#6-4)
- [7 - Learning Rate Decay and Scheduling](#7)
- [7.1 - Decay on every iteration](#7-1)
- [Exercise 7 - update_lr](#ex-7)
- [7.2 - Fixed Interval Scheduling](#7-2)
- [Exercise 8 - schedule_lr_decay](#ex-8)
- [7.3 - Using Learning Rate Decay for each Optimization Method](#7-3)
- [7.3.1 - Gradient Descent with Learning Rate Decay](#7-3-1)
- [7.3.2 - Gradient Descent with Momentum and Learning Rate Decay](#7-3-2)
- [7.3.3 - Adam with Learning Rate Decay](#7-3-3)
- [7.4 - Achieving similar performance with different methods](#7-4)
<a name='1'></a>
## 1- Packages
```
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from copy import deepcopy
from testCases import *
from public_tests import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
```
<a name='2'></a>
## 2 - Gradient Descent
A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
<a name='ex-1'></a>
### Exercise 1 - update_parameters_with_gd
Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 1 in the `for` loop as the first parameters are $W^{[1]}$ and $b^{[1]}$.
```
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(1, L + 1):
# (approx. 2 lines)
# parameters["W" + str(l)] =
# parameters["b" + str(l)] =
# YOUR CODE STARTS HERE
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * grads["dW" + str(l)]
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * grads["db" + str(l)]
# YOUR CODE ENDS HERE
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
learning_rate = 0.01
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
update_parameters_with_gd_test(update_parameters_with_gd)
```
A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent, where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
- **(Batch) Gradient Descent**:
``` python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
# Forward propagation
a, caches = forward_propagation(X, parameters)
# Compute cost.
cost += compute_cost(a, Y)
# Backward propagation.
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
- **Stochastic Gradient Descent**:
```python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
for j in range(0, m):
# Forward propagation
a, caches = forward_propagation(X[:,j], parameters)
# Compute cost
cost += compute_cost(a, Y[:,j])
# Backward propagation
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here's what that looks like:
<img src="images/kiank_sgd.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> <b>Figure 1</b> </u><font color='purple'> : <b>SGD vs GD</b><br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence, but each step is a lot faster to compute for SGD than it is for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
**Note** also that implementing SGD requires 3 for-loops in total:
1. Over the number of iterations
2. Over the $m$ training examples
3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
In practice, you'll often get faster results if you don't use the entire training set, or just one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
<img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> <b>Figure 2</b> </u>: <font color='purple'> <b>SGD vs Mini-Batch GD</b><br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
<a name='3'></a>
## 3 - Mini-Batch Gradient Descent
Now you'll build some mini-batches from the training set (X, Y).
There are two steps:
- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
<img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
- **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
<img src="images/kiank_partition.png" style="width:550px;height:300px;">
<a name='ex-2'></a>
### Exercise 2 - random_mini_batches
Implement `random_mini_batches`. The shuffling part has already been coded for you! To help with the partitioning step, you've been provided the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
```python
first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
...
```
Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\left\lfloor \frac{m}{mini\_batch\_size}\right\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be $\left(m-mini_\_batch_\_size \times \left\lfloor \frac{m}{mini\_batch\_size}\right\rfloor\right)$.
**Hint:**
$$mini\_batch\_X = shuffled\_X[:, i : j]$$
Think of a way in which you can use the for loop variable `k` help you increment `i` and `j` in multiples of mini_batch_size.
As an example, if you want to increment in multiples of 3, you could the following:
```python
n = 3
for k in (0 , 5):
print(k * n)
```
```
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1, m))
inc = mini_batch_size
# Step 2 - Partition (shuffled_X, shuffled_Y).
# Cases with a complete mini batch size only i.e each of 64 examples.
num_complete_minibatches = math.floor(m / mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
# (approx. 2 lines)
# mini_batch_X =
# mini_batch_Y =
# YOUR CODE STARTS HERE
mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]
# YOUR CODE ENDS HERE`
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# For handling the end case (last mini-batch < mini_batch_size i.e less than 64)
if m % mini_batch_size != 0:
#(approx. 2 lines)
# mini_batch_X =
# mini_batch_Y =
# YOUR CODE STARTS HERE
mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]
mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]
# YOUR CODE ENDS HERE
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
np.random.seed(1)
mini_batch_size = 64
nx = 12288
m = 148
X = np.array([x for x in range(nx * m)]).reshape((m, nx)).T
Y = np.random.randn(1, m) < 0.5
mini_batches = random_mini_batches(X, Y, mini_batch_size)
n_batches = len(mini_batches)
assert n_batches == math.ceil(m / mini_batch_size), f"Wrong number of mini batches. {n_batches} != {math.ceil(m / mini_batch_size)}"
for k in range(n_batches - 1):
assert mini_batches[k][0].shape == (nx, mini_batch_size), f"Wrong shape in {k} mini batch for X"
assert mini_batches[k][1].shape == (1, mini_batch_size), f"Wrong shape in {k} mini batch for Y"
assert np.sum(np.sum(mini_batches[k][0] - mini_batches[k][0][0], axis=0)) == ((nx * (nx - 1) / 2 ) * mini_batch_size), "Wrong values. It happens if the order of X rows(features) changes"
if ( m % mini_batch_size > 0):
assert mini_batches[n_batches - 1][0].shape == (nx, m % mini_batch_size), f"Wrong shape in the last minibatch. {mini_batches[n_batches - 1][0].shape} != {(nx, m % mini_batch_size)}"
assert np.allclose(mini_batches[0][0][0][0:3], [294912, 86016, 454656]), "Wrong values. Check the indexes used to form the mini batches"
assert np.allclose(mini_batches[-1][0][-1][0:3], [1425407, 1769471, 897023]), "Wrong values. Check the indexes used to form the mini batches"
print("\033[92mAll test passed!")
t_X, t_Y, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(t_X, t_Y, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
random_mini_batches_test(random_mini_batches)
```
<font color='blue'>
**What you should remember**:
- Shuffling and Partitioning are the two steps required to build mini-batches
- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
<a name='4'></a>
## 4 - Momentum
Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
Momentum takes into account the past gradients to smooth out the update. The 'direction' of the previous gradients is stored in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
<img src="images/opt_momentum.png" style="width:400px;height:250px;">
<caption><center> <u><font color='purple'><b>Figure 3</b> </u><font color='purple'>: The red arrows show the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, the gradient is allowed to influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
<a name='ex-3'></a>
### Exercise 3 - initialize_velocity
Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
for $l =1,...,L$:
```python
v["dW" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l)])
v["db" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l)])
```
**Note** that the iterator l starts at 1 in the for loop as the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript).
```
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(1, L + 1):
# (approx. 2 lines)
# v["dW" + str(l)] =
# v["db" + str(l)] =
# YOUR CODE STARTS HERE
v["dW" + str(l)] = np.zeros_like(parameters["W" + str(l)])
v["db" + str(l)] = np.zeros_like(parameters["b" + str(l)])
# YOUR CODE ENDS HERE
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
initialize_velocity_test(initialize_velocity)
```
<a name='ex-4'></a>
### Exercise 4 - update_parameters_with_momentum
Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
$$ \begin{cases}
v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
\end{cases}\tag{3}$$
$$\begin{cases}
v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
\end{cases}\tag{4}$$
where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 1 in the `for` loop as the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript).
```
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(1, L + 1):
# (approx. 4 lines)
# compute velocities
# v["dW" + str(l)] = ...
# v["db" + str(l)] = ...
# update parameters
# parameters["W" + str(l)] = ...
# parameters["b" + str(l)] = ...
# YOUR CODE STARTS HERE
# compute velocities
v["dW" + str(l)] = beta * v["dW" + str(l)] + (1 - beta) * grads['dW' + str(l)]
v["db" + str(l)] = beta * v["db" + str(l)] + (1 - beta) * grads['db' + str(l)]
# update parameters
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * v["dW" + str(l)]
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * v["db" + str(l)]
# YOUR CODE ENDS HERE
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
update_parameters_with_momentum_test(update_parameters_with_momentum)
```
**Note that**:
- The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
- If $\beta = 0$, then this just becomes standard gradient descent without momentum.
**How do you choose $\beta$?**
- The larger the momentum $\beta$ is, the smoother the update, because it takes the past gradients into account more. But if $\beta$ is too big, it could also smooth out the updates too much.
- Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
- Tuning the optimal $\beta$ for your model might require trying several values to see what works best in terms of reducing the value of the cost function $J$.
<font color='blue'>
**What you should remember**:
- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
- You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
<a name='5'></a>
## 5 - Adam
Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
**How does Adam work?**
1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
3. It updates parameters in a direction based on combining information from "1" and "2".
The update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
\end{cases}$$
where:
- t counts the number of steps taken of Adam
- L is the number of layers
- $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
- $\alpha$ is the learning rate
- $\varepsilon$ is a very small number to avoid dividing by zero
As usual, all parameters are stored in the `parameters` dictionary
<a name='ex-5'></a>
### Exercise 5 - initialize_adam
Initialize the Adam variables $v, s$ which keep track of the past information.
**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
for $l = 1, ..., L$:
```python
v["dW" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l)])
v["db" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l)])
s["dW" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l)])
s["db" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l)])
```
```
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient. Initialized with zeros.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient. Initialized with zeros.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(1, L + 1):
# (approx. 4 lines)
# v["dW" + str(l)] = ...
# v["db" + str(l)] = ...
# s["dW" + str(l)] = ...
# s["db" + str(l)] = ...
# YOUR CODE STARTS HERE
v["dW" + str(l)] = np.zeros_like(parameters["W" + str(l)])
v["db" + str(l)] = np.zeros_like(parameters["b" + str(l)])
s["dW" + str(l)] = np.zeros_like(parameters["W" + str(l)])
s["db" + str(l)] = np.zeros_like(parameters["b" + str(l)])
# YOUR CODE ENDS HERE
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
initialize_adam_test(initialize_adam)
```
<a name='ex-6'></a>
### Exercise 6 - update_parameters_with_adam
Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
\end{cases}$$
**Note** that the iterator `l` starts at 1 in the `for` loop as the first parameters are $W^{[1]}$ and $b^{[1]}$.
```
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
t -- Adam variable, counts the number of taken steps
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(1, L + 1):
# Moving average of the gradients. Inputs: 'v, grads, beta1'. Output: 'v'.
# (approx. 2 lines)
# v['dW' + str(l)] = ...
# v['db' + str(l)] = ...
# YOUR CODE STARTS HERE
v['dW' + str(l)] = beta1 * v['dW' + str(l)] + (1 - beta1) * grads['dW' + str(l)]
v['db' + str(l)] = beta1 * v['db' + str(l)] + (1 - beta1) * grads['db' + str(l)]
# YOUR CODE ENDS HERE
# Compute bias-corrected first moment estimate. Inputs: 'v, beta1, t'. Output: 'v_corrected'.
# (approx. 2 lines)
# v_corrected['dW' + str(l)] = ...
# v_corrected['db' + str(l)] = ...
# YOUR CODE STARTS HERE
v_corrected['dW' + str(l)] = v['dW' + str(l)] / (1 - beta1**t)
v_corrected['db' + str(l)] = v['db' + str(l)] / (1 - beta1**t)
# YOUR CODE ENDS HERE
# Moving average of the squared gradients. Inputs: 's, grads, beta2'. Output: 's'.
#(approx. 2 lines)
# s['dW' + str(l)] = ...
# s['db' + str(l)] = ...
# YOUR CODE STARTS HERE
s['dW' + str(l)] = beta2 * s['dW' + str(l)] + (1 - beta2) * (grads['dW' + str(l)])**2
s['db' + str(l)] = beta2 * s['db' + str(l)] + (1 - beta2) * (grads['db' + str(l)])**2
# YOUR CODE ENDS HERE
# Compute bias-corrected second raw moment estimate. Inputs: 's, beta2, t'. Output: 's_corrected'.
# (approx. 2 lines)
# s_corrected['dW' + str(l)] = ...
# s_corrected['db' + str(l)] = ...
# YOUR CODE STARTS HERE
s_corrected['dW' + str(l)] = s['dW' + str(l)] / (1 - beta2**t)
s_corrected['db' + str(l)] = s['db' + str(l)] / (1 - beta2**t)
# YOUR CODE ENDS HERE
# Update parameters. Inputs: 'parameters, learning_rate, v_corrected, s_corrected, epsilon'. Output: 'parameters'.
# (approx. 2 lines)
# parameters['W' + str(l)] = ...
# parameters['b' + str(l)] = ...
# YOUR CODE STARTS HERE
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate * v_corrected['dW' + str(l)] / (np.sqrt(s_corrected['dW' + str(l)]) + epsilon)
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate * v_corrected['db' + str(l)] / (np.sqrt(s_corrected['db' + str(l)]) + epsilon)
# YOUR CODE ENDS HERE
return parameters, v, s, v_corrected, s_corrected
parametersi, grads, vi, si = update_parameters_with_adam_test_case()
t = 2
learning_rate = 0.02
beta1 = 0.8
beta2 = 0.888
epsilon = 1e-2
parameters, v, s, vc, sc = update_parameters_with_adam(parametersi, grads, vi, si, t, learning_rate, beta1, beta2, epsilon)
print(f"W1 = \n{parameters['W1']}")
print(f"W2 = \n{parameters['W2']}")
print(f"b1 = \n{parameters['b1']}")
print(f"b2 = \n{parameters['b2']}")
update_parameters_with_adam_test(update_parameters_with_adam)
```
**Expected values:**
```
W1 =
[[ 1.63942428 -0.6268425 -0.54320974]
[-1.08782943 0.85036983 -2.2865723 ]]
W2 =
[[ 0.33356139 -0.26425199 1.47707772]
[-2.04538458 -0.30744933 -0.36903141]
[ 1.14873036 -1.09256871 -0.15734651]]
b1 =
[[ 1.75854357]
[-0.74616067]]
b2 =
[[-0.89228024]
[ 0.02707193]
[ 0.56782561]]
```
You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
<a name='6'></a>
## 6 - Model with different Optimization algorithms
Below, you'll use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
```
train_X, train_Y = load_dataset()
```
A 3-layer neural network has already been implemented for you! You'll train it with:
- Mini-batch **Gradient Descent**: it will call your function:
- `update_parameters_with_gd()`
- Mini-batch **Momentum**: it will call your functions:
- `initialize_velocity()` and `update_parameters_with_momentum()`
- Mini-batch **Adam**: it will call your functions:
- `initialize_adam()` and `update_parameters_with_adam()`
```
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 5000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s, _, _ = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
```
Now, run this 3 layer neural network with each of the 3 optimization methods.
<a name='6-1'></a>
### 6.1 - Mini-Batch Gradient Descent
Run the following code to see how the model does with mini-batch gradient descent.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
<a name='6-2'></a>
### 6.2 - Mini-Batch Gradient Descent with Momentum
Next, run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small - but for more complex problems you might see bigger gains.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
<a name='6-3'></a>
### 6.3 - Mini-Batch with Adam
Finally, run the following code to see how the model does with Adam.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
<a name='6-4'></a>
### 6.4 - Summary
<table>
<tr>
<td>
<b>optimization method</b>
</td>
<td>
<b>accuracy</b>
</td>
<td>
<b>cost shape</b>
</td>
</tr>
<td>
Gradient descent
</td>
<td>
>71%
</td>
<td>
smooth
</td>
<tr>
<td>
Momentum
</td>
<td>
>71%
</td>
<td>
smooth
</td>
</tr>
<tr>
<td>
Adam
</td>
<td>
>94%
</td>
<td>
smoother
</td>
</tr>
</table>
Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligible.
On the other hand, Adam clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
Some advantages of Adam include:
- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
- Usually works well even with little tuning of hyperparameters (except $\alpha$)
**References**:
- Adam paper: https://arxiv.org/pdf/1412.6980.pdf
<a name='7'></a>
## 7 - Learning Rate Decay and Scheduling
Lastly, the learning rate is another hyperparameter that can help you speed up learning.
During the first part of training, your model can get away with taking large steps, but over time, using a fixed value for the learning rate alpha can cause your model to get stuck in a wide oscillation that never quite converges. But if you were to slowly reduce your learning rate alpha over time, you could then take smaller, slower steps that bring you closer to the minimum. This is the idea behind learning rate decay.
Learning rate decay can be achieved by using either adaptive methods or pre-defined learning rate schedules.
Now, you'll apply scheduled learning rate decay to a 3-layer neural network in three different optimizer modes and see how each one differs, as well as the effect of scheduling at different epochs.
This model is essentially the same as the one you used before, except in this one you'll be able to include learning rate decay. It includes two new parameters, decay and decay_rate.
```
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 5000, print_cost = True, decay=None, decay_rate=1):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
lr_rates = []
learning_rate0 = learning_rate # the original learning rate
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s, _, _ = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
if decay:
learning_rate = decay(learning_rate0, i, decay_rate)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if decay:
print("learning rate after epoch %i: %f"%(i, learning_rate))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
```
<a name='7-1'></a>
### 7.1 - Decay on every iteration
For this portion of the assignment, you'll try one of the pre-defined schedules for learning rate decay, called exponential learning rate decay. It takes this mathematical form:
$$\alpha = \frac{1}{1 + decayRate \times epochNumber} \alpha_{0}$$
<a name='ex-7'></a>
### Exercise 7 - update_lr
Calculate the new learning rate using exponential weight decay.
```
# GRADED FUNCTION: update_lr
def update_lr(learning_rate0, epoch_num, decay_rate):
"""
Calculates updated the learning rate using exponential weight decay.
Arguments:
learning_rate0 -- Original learning rate. Scalar
epoch_num -- Epoch number. Integer
decay_rate -- Decay rate. Scalar
Returns:
learning_rate -- Updated learning rate. Scalar
"""
#(approx. 1 line)
# learning_rate =
# YOUR CODE STARTS HERE
# YOUR CODE ENDS HERE
return learning_rate
learning_rate = 0.5
print("Original learning rate: ", learning_rate)
epoch_num = 2
decay_rate = 1
learning_rate_2 = update_lr(learning_rate, epoch_num, decay_rate)
print("Updated learning rate: ", learning_rate_2)
update_lr_test(update_lr)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd", learning_rate = 0.1, num_epochs=5000, decay=update_lr)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
Notice that if you set the decay to occur at every iteration, the learning rate goes to zero too quickly - even if you start with a higher learning rate.
<table>
<tr>
<td>
<b>Epoch Number</b>
</td>
<td>
<b>Learning Rate</b>
</td>
<td>
<b>Cost</b>
</td>
</tr>
<tr>
<td>
0
</td>
<td>
0.100000
</td>
<td>
0.701091
</td>
</tr>
<tr>
<td>
1000
</td>
<td>
0.000100
</td>
<td>
0.661884
</td>
</tr>
<tr>
<td>
2000
</td>
<td>
0.000050
</td>
<td>
0.658620
</td>
</tr>
<tr>
<td>
3000
</td>
<td>
0.000033
</td>
<td>
0.656765
</td>
</tr>
<tr>
<td>
4000
</td>
<td>
0.000025
</td>
<td>
0.655486
</td>
</tr>
<tr>
<td>
5000
</td>
<td>
0.000020
</td>
<td>
0.654514
</td>
</tr>
</table>
When you're training for a few epoch this doesn't cause a lot of troubles, but when the number of epochs is large the optimization algorithm will stop updating. One common fix to this issue is to decay the learning rate every few steps. This is called fixed interval scheduling.
<a name='7-2'></a>
### 7.2 - Fixed Interval Scheduling
You can help prevent the learning rate speeding to zero too quickly by scheduling the exponential learning rate decay at a fixed time interval, for example 1000. You can either number the intervals, or divide the epoch by the time interval, which is the size of window with the constant learning rate.
<img src="images/lr.png" style="width:400px;height:250px;">
<a name='ex-8'></a>
### Exercise 8 - schedule_lr_decay
Calculate the new learning rate using exponential weight decay with fixed interval scheduling.
**Instructions**: Implement the learning rate scheduling such that it only changes when the epochNum is a multiple of the timeInterval.
**Note:** The fraction in the denominator uses the floor operation.
$$\alpha = \frac{1}{1 + decayRate \times \lfloor\frac{epochNum}{timeInterval}\rfloor} \alpha_{0}$$
**Hint:** [numpy.floor](https://numpy.org/doc/stable/reference/generated/numpy.floor.html)
```
# GRADED FUNCTION: schedule_lr_decay
def schedule_lr_decay(learning_rate0, epoch_num, decay_rate, time_interval=1000):
"""
Calculates updated the learning rate using exponential weight decay.
Arguments:
learning_rate0 -- Original learning rate. Scalar
epoch_num -- Epoch number. Integer.
decay_rate -- Decay rate. Scalar.
time_interval -- Number of epochs where you update the learning rate.
Returns:
learning_rate -- Updated learning rate. Scalar
"""
# (approx. 1 lines)
# learning_rate = ...
# YOUR CODE STARTS HERE
# YOUR CODE ENDS HERE
return learning_rate
learning_rate = 0.5
print("Original learning rate: ", learning_rate)
epoch_num_1 = 10
epoch_num_2 = 100
decay_rate = 0.3
time_interval = 100
learning_rate_1 = schedule_lr_decay(learning_rate, epoch_num_1, decay_rate, time_interval)
learning_rate_2 = schedule_lr_decay(learning_rate, epoch_num_2, decay_rate, time_interval)
print("Updated learning rate after {} epochs: ".format(epoch_num_1), learning_rate_1)
print("Updated learning rate after {} epochs: ".format(epoch_num_2), learning_rate_2)
schedule_lr_decay_test(schedule_lr_decay)
```
**Expected output**
```
Original learning rate: 0.5
Updated learning rate after 10 epochs: 0.5
Updated learning rate after 100 epochs: 0.3846153846153846
```
<a name='7-3'></a>
### 7.3 - Using Learning Rate Decay for each Optimization Method
Below, you'll use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
<a name='7-3-1'></a>
#### 7.3.1 - Gradient Descent with Learning Rate Decay
Run the following code to see how the model does gradient descent and weight decay.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd", learning_rate = 0.1, num_epochs=5000, decay=schedule_lr_decay)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
<a name='7-3-2'></a>
#### 7.3.2 - Gradient Descent with Momentum and Learning Rate Decay
Run the following code to see how the model does gradient descent with momentum and weight decay.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "momentum", learning_rate = 0.1, num_epochs=5000, decay=schedule_lr_decay)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent with momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
<a name='7-3-3'></a>
#### 7.3.3 - Adam with Learning Rate Decay
Run the following code to see how the model does Adam and weight decay.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam", learning_rate = 0.01, num_epochs=5000, decay=schedule_lr_decay)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
<a name='7-4'></a>
### 7.4 - Achieving similar performance with different methods
With SGD or SGD with Momentum, the accuracy is significantly lower than Adam, but when learning rate decay is added on top, either can achieve performance at a speed and accuracy score that's similar to Adam.
In the case of Adam, notice that the learning curve achieves a similar accuracy but faster.
<table>
<tr>
<td>
<b>optimization method</b>
</td>
<td>
<b>accuracy</b>
</td>
</tr>
<td>
Gradient descent
</td>
<td>
>94.6%
</td>
<tr>
<td>
Momentum
</td>
<td>
>95.6%
</td>
</tr>
<tr>
<td>
Adam
</td>
<td>
94%
</td>
</tr>
</table>
**Congratulations**! You've made it to the end of the Optimization methods notebook. Here's a quick recap of everything you're now able to do:
* Apply three different optimization methods to your models
* Build mini-batches for your training set
* Use learning rate decay scheduling to speed up your training
Great work!
| github_jupyter |
```
import pandas as pd
dataset = pd.read_csv('housing.csv')
print(dataset.shape)
dataset[:5]
# Move 'medv' column to front
dataset = pd.concat([dataset['medv'], dataset.drop(['medv'], axis=1)], axis=1)
from sklearn.model_selection import train_test_split
training_dataset, validation_dataset = train_test_split(dataset, test_size=0.1)
print(training_dataset.shape)
print(validation_dataset.shape)
training_dataset.to_csv('training_dataset.csv', index=False, header=False)
validation_dataset.to_csv('validation_dataset.csv', index=False, header=False)
import sagemaker
print(sagemaker.__version__)
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = 'boston-housing'
training_data_path = sess.upload_data(path='training_dataset.csv', key_prefix=prefix + '/input/training')
validation_data_path = sess.upload_data(path='validation_dataset.csv', key_prefix=prefix + '/input/validation')
print(training_data_path)
print(validation_data_path)
from sagemaker import get_execution_role
from sagemaker.image_uris import retrieve
from sagemaker.estimator import Estimator
region = sess.boto_session.region_name
container = retrieve('xgboost', region, version='latest')
xgb_estimator = Estimator(container,
role=get_execution_role() ,
instance_count=1,
instance_type='ml.m5.large',
output_path='s3://{}/{}/output'.format(bucket, prefix)
)
xgb_estimator.set_hyperparameters(
objective='reg:linear',
num_round=200,
early_stopping_rounds=10)
from sagemaker import TrainingInput
training_data_channel = TrainingInput(s3_data=training_data_path, content_type='text/csv')
validation_data_channel = TrainingInput(s3_data=validation_data_path, content_type='text/csv')
xgb_data = {'train': training_data_channel, 'validation': validation_data_channel}
xgb_estimator.fit(xgb_data)
from time import strftime, gmtime
timestamp = strftime('%d-%H-%M-%S', gmtime())
endpoint_name = 'xgb-demo-'+timestamp
print(endpoint_name)
xgb_predictor = xgb_estimator.deploy(
endpoint_name=endpoint_name,
initial_instance_count=1,
instance_type='ml.t2.medium')
test_sample = '0.00632,18.00,2.310,0,0.5380,6.5750,65.20,4.0900,1,296.0,15.30,4.98'
xgb_predictor.serializer = sagemaker.serializers.CSVSerializer()
xgb_predictor.deserializer = sagemaker.deserializers.CSVDeserializer()
response = xgb_predictor.predict(test_sample)
print(response)
test_samples = ['0.00632,18.00,2.310,0,0.5380,6.5750,65.20,4.0900,1,296.0,15.30,4.98',
'0.02731,0.00,7.070,0,0.4690,6.4210,78.90,4.9671,2,242.0,17.80,9.14']
response = xgb_predictor.predict(test_samples)
print(response)
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='text/csv',
Body=test_sample)
print(response['Body'].read())
xgb_predictor.delete_endpoint()
```
| github_jupyter |
# spectre Benchmarks
```
import pandas as pd
print('pandas', pd.__version__)
start, end = pd.Timestamp('2013-01-02', tz='UTC'), pd.Timestamp('2018-01-03', tz='UTC')
import sys
sys.path = ['..\\..\\spectre'] + sys.path
from spectre import factors, parallel, data
import pandas as pd
loader = data.ArrowLoader('../../historical_data/us/prices/quandl/wiki_prices.feather')
engine = factors.FactorEngine(loader)
engine.to_cuda()
engine.remove_all_factors()
engine.add(factors.MA(100), 'ma')
%time dfsp=engine.run(start, end)
dfsp.head(5).append(dfsp.tail(5))
%timeit engine.run(start, end)
%timeit -n 3 -r 10 engine.run(start, end)
engine.remove_all_factors()
engine.add(factors.EMA(50, adjust=True), 'ema50')
%time dfsp=engine.run(start, end)
dfsp.head(5).append(dfsp.tail(5))
%timeit engine.run(start, end)
%timeit -n 3 -r 10 engine.run(start, end)
engine.remove_all_factors()
f = factors.MACD()+factors.RSI()+factors.STOCHF()
engine.add(f.rank().zscore(), 'f')
%time dfsp=engine.run(start, end)
dfsp.head(5).append(dfsp.tail(5))
%timeit engine.run(start, end)
%timeit -n 3 -r 10 engine.run(start, end)
import torch
torch.cuda.max_memory_allocated()/1024**3, 'GB'
```
## Backtesting
--------------------
```
import sys
sys.path = ['..\\..\\spectre'] + sys.path
from spectre import factors, trading
from spectre.data import ArrowLoader, DataLoaderFastGetter
import pandas as pd
loader = ArrowLoader('../../historical_data/us/prices/quandl/wiki_prices.feather')
import plotly.io as pio
pio.renderers.default = "svg"
class MyAlg(trading.CustomAlgorithm):
first_break = True
def initialize(self):
# setup engine
engine = self.get_factor_engine()
engine.to_cuda()
universe = factors.AverageDollarVolume(win=120).top(500)
engine.set_filter(universe)
# add your factors
rsi = factors.RSI()
engine.add( rsi.top(3).shift(1), 'longs' )
engine.add( rsi.bottom(3).shift(1), 'shorts' )
# schedule rebalance before market close
self.schedule_rebalance(trading.event.MarketClose(self.rebalance, offset_ns=-10000))
# simulation parameters
self.blotter.capital_base = 10000
self.blotter.max_shares = 1e+19
self.blotter.set_commission(percentage=0, per_share=0.005, minimum=1)
# self.blotter.set_slippage(percentage=0, per_share=0.4)
def rebalance(self, data: 'pd.DataFrame', history: 'pd.DataFrame'):
if self.first_break:
self.first_break = False # Keep consistent with zipline
return
all_assets = data.index
longs = set(all_assets[data.longs])
shorts = set(all_assets[data.shorts])
self.record(universe_size=len(all_assets))
# Build a 2x-leveraged, equal-weight, long-short portfolio.
one_third = 1.0 / 3.0
self.blotter.batch_order_target_percent(longs, [one_third]*len(longs))
self.blotter.batch_order_target_percent(shorts, [-one_third]*len(shorts))
# Remove any assets that should no longer be in our portfolio.
portfolio_assets = longs | shorts
positions = self.blotter.portfolio.positions.keys()
removes = positions - set(portfolio_assets)
self.blotter.batch_order_target_percent(removes, [0] * len(removes))
def terminate(self, records: 'pd.DataFrame'):
self.plot()
%time results = trading.run_backtest(loader, MyAlg, '2013-1-3', '2018-1-3')
import torch
torch.cuda.max_memory_allocated()/1024**3, 'GB'
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Yazanmy/ML/blob/master/Exercises_(Important_Python_Packages).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
Ex1: Create a program that asks the user to enter their
name and their age. Print out a message addressed to them
that tells them the year that they will turn 100 years old.
```
```
import datetime
name = input("Your Name : ")
age = int(input("Your age : "))
date = datetime.datetime.now()
print ("Hello ",name, "in" , 100- age + int(date.year) )
```
```
Ex3: Take a list, say for example this one: a = [1, 1, 2, 3, 5, 8,
13, 21, 34, 55, 89] and write a program that prints out all
the elements of the list that are less than 5.
```
```
a = [1, 1, 2, 3, 5, 8,13, 21, 34, 55, 89]
for x in a :
if x<5 :
print (x)
```
```
Ex5: Take two lists, say for example these two: a = [1, 1, 2, 3,
5, 8, 13, 21, 34, 55, 89] and b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13] and write a program that returns a list that
contains only the elements that are common between the
lists (without duplicates). Make sure your program works on
two lists of different sizes.
```
```
a = [1, 1, 2, 3,5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12, 13]
print (set(a) & set(b))
both=[]
if len(a)<len(b) :
for i in b :
if i in a and i not in both :
both.append(i)
if len(b)<len(a):
for i in a :
if i in b and i not in both :
both.append(i)
print(both)
```
```
Ex28: Implement a function that takes as input three
variables, and returns the largest of the three. Do this
without using the Python max() function!
```
```
def fun(a,b,c):
if a>b and a>c:
return(a)
if b>a and b>c:
return(b)
if c>a and c>b:
return c
print(fun(5,9,7))
```
~~~
Class 9: Write a Python class which has two
methods get_String and print_String. get_String
accept a string from the user and print_String print
the string in upper case.
~~~
```
class myclass():
def __init__(self):
self.Name = ""
def get_String(self):
self.Name =input()
def print_String(self):
print(self.Name.upper())
S= myclass()
S.get_String()
S.print_String()
```
```
Class 10: Write a Python class named Rectangle
constructed by a length and width and a method
which will compute the area of a rectangle
```
```
class Rectangle():
def __init__(self,length,width):
self.length=length
self.width=width
def computeArea(self):
return self.length*self.width
Rectangle=Rectangle(5,10)
print(Rectangle.computeArea())
```
~~~
NumPy 3: Create a 3x3 matrix with values ranging from 2 to 10.
~~~
```
import numpy as np
array=np.arange(2,11,1).reshape(3,3)
print(array)
```
~~~
NumPy 73: Write a Python program to create an array of (3, 4) shape,
multiply every element value by 3 and display the new array.
~~~
```
import numpy as np
array=np.ones((3,4),np.int16)
print(array*3)
```
```
Pandas DataFrame 4 and 5: Write a Python program to get the first 3 rows
and the 'name' and 'score' columns from the following DataFrame.
```
```
import pandas as pd
exam_data = {'name': ['Anastasia', 'Dima', 'Katherine','James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin','Jonas'],
'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8,19],
'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes','no', 'no', 'yes']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df=pd.DataFrame(exam_data)
print(df.ix[0:3,['name','score']])
```
| github_jupyter |
```
# load packages
import pandas as pd
import numpy as np
import seaborn as sns
import itertools
import statsmodels.api as sm
import matplotlib.pyplot as plt
plt.style.use('bmh')
import sys
import warnings
warnings.filterwarnings('ignore')
from IPython.display import display
# Further Markdown settings
# load libraries and set plot parameters
# specifying the path
import os
path = os.chdir(r'C:\Users\Mohamed Ahmed Warsam\Desktop\PythonScripts')
datapath1 = 'ts_modelling1.xlsx'
# import the data
data = pd.read_excel(datapath1, sheetname=0, index_col='Created')
# view data
display(data.head(10))
display(data.tail(10))
display(data.info())
# turning several columns into categories
data['Category'] = data['Category'].astype('category')
data['Priority'] = data['Priority'].astype('category')
data['Incident state'] = data['Incident state'].astype('category')
data['Urgency'] = data['Urgency'].astype('category')
data['Impact'] = data['Impact'].astype('category')
# checking change
data.info()
data.Priority.value_counts()
```
# Ticket Volume Forecasting Project - Pearson by Mohamed Warsame
This mini-project aims to provide ticket volume forecast over December 2016 and February 2017, on the basis of historical ticket volume recorded between July 2014 and November 2016. It is important to inspect and evaluate the dataset before delving into the modelling process. The underlying dataset has 7 columns in total, the date range starting on 2014-07-01 19:16:00 and ending on 2016-11-21 19:45:00. There are 25479 tickets recorded during that time period.
## Exploratory Data Analysis
The figure below provides a visual impression of the historical ticket volume (total) over the entire date range covered by the dataset. As can be seen, the series initially exhibits a moderate linear growth trend and roughly gains 5000 tickets every six months. However, there is an unprecedented jump in total ticket count occurring at the end of August 2016. The count of tickets gains over 1000 additional tickets on a single day. This event would be referred to as a structural break, or exogenous shock to the system in the terminology of statistical literature. The shape of the line also changes, displaying characteristics of a concave mathematical function with a very steep slope, rather than the moderate linear growth trend prior to that. To further examine this, we can zoom into the figure by only selecting a subset of the overall date range.
```
# explore the time series as visualisation
# Sample goes from July 2014 to November 2016
sns.set(font_scale = 1.25)
y = data['Number']
y.plot(linestyle='-.', figsize=(15, 8))
plt.xlabel('Date Created', fontsize=14)
plt.ylabel('Total Ticket Count', fontsize=14)
plt.title('Evolution of Ticket Count', fontsize=16)
plt.show()
fig = plt.figure()
fig.savefig('ETC.png', dpi=100)
plt.close(fig)
```
From the zoomed in perspective we can see that the extreme outlier day was the last day of August, 2016-08-31, after which the trend seems to indicate that roughly 1 thousand tickets come in every month. Contingent upon the hypothesis that this trend continues into the forecasting period this task aims to predict, we can already infer that total ticket count will reach close to 30,000 tickets by February 2017. However, further statistical analysis is required to establish the robustness of this rough estimate inferred from basic exploratory data analysis.
```
# Subset of ticket count of outburst
doubling = data.loc['2016-08-01': '2016-11-21',:]
y1 = doubling['Number']
y1.plot(linestyle='-.', figsize=(15, 8))
plt.xlabel('Date Created', fontsize=14)
plt.ylabel('Total Ticket Count', fontsize=14)
plt.title('Subset of Ticket Count', fontsize=16)
plt.show()
```
The following horizontal bar chart shows us the exact date of the outlier observed on the prior visualisations. Identified to be the last day of August, we can see that close to 1400 tickets have been recorded in that day alone. The prior day, 30th of August, 2016, also recorded an abnormally high number of tickets. To conclude on this point, those two days alone contributed close to 2000 tickets, a figure that is usually only reached over the course of several months. Perhaps, a system outage, or other technical problem with the overall technological infrastructure of Pearson failed around that time. In fact, after taking a closer look at that week, it appears that a ticket raised at around 11pm on the 29th August marked with the highest priority level 'critical' may have triggered the abnormally high ticket count of the following day. This supports my suspicion that a database, or systems related issue may have resulted in an outage of the entire technology infrastructure, severely limiting the actions of users.
```
# Showing the day
sns.set(font_scale = 1.25)
outlier = data.loc['2016-08-28':'2016-09-06','Number'].resample('D').count()
#data1 = data.loc['2016-08-28':'2016-09-06',:]
outlier.plot(x=outlier.index, y=outlier, kind='barh', figsize=(15, 8))
plt.xlabel('Total Ticket Count', fontsize=16)
plt.ylabel('Date', fontsize=16)
plt.title('Outlier Week Count', fontsize=18)
plt.show()
```
In order not to make erroneous decisions in the modelling process that follows, we need to exclude those two outliers from the sample. The line plot below shows the total ticket count grouped by day and not just the cumulative count of the 'Number' column in the dataset. It also perfectly illustrates the issue of outliers in forecasting, as they introduce a scaling problem of the axes and also misinform the statistical model, since they are not representative of the overall behaviour of the data generating process. With over 200 tickets, there is another day in March 2016 that has a value which is extremely different from neighbouring observations. For now, we can treat those three days that exhibit an abnormally high number of tickets as random bursts. The subsequent figure will show how the scales of the vertical axis will change after removing the extreme value in March and also the two more recent outliers.
```
# potential idea - I can use .resample() to resample by hour and count .mean to get average ticket volume
yd = data.loc[:,'Number'].resample('D').count()
#display(yd.head(20))
# plot daily count
yd.plot(linestyle='-.', figsize=(15, 8))
plt.xlabel('Date Created', fontsize=14)
plt.ylabel('Total Ticket Count', fontsize=14)
plt.title('Daily View of Ticket Count', fontsize=16)
plt.show()
# import changed excel
datapath2 = 'ts_modelling2.xlsx'
newdata = pd.read_excel(datapath2, sheetname=0, index_col='Created')
```
After removing those two outlier days, the scaling of our vertical axis changed significantly. The number of tickets coming in on a daily basis now range between 0 and 175, reaching this boundary only once in September 2016, when not taking into account those three outlier days that have been removed. The overall average for the entire date range gives us a figure of 26 tickets per day, again, this mean value does not take the 3 outlier days into consideration. A striking insight that one can derive from analysing the figure below is that the series becomes more volatile from January 2016 onwards, with a much larger variance and also containing more extreme values. In conclusion, the trend of daily ticket volume was more stable in 2015 than in 2016 and these characteristics indicate that a seasonal ARIMA (Auto-regressive Integrated Moving-average) model seems to be most suitable for the forecasting task. This would enable us to account for seasonal variation, the trend factor and the random noise component which is inherent in the underlying dataset.
```
# y2
y_ = newdata.loc[:,'Number'].resample('D').count()
y_.plot(linestyle='-', figsize=(15, 8))
#y_mean = [np.mean(y_)]*len(newdata.index)
plt.xlabel('Date Created', fontsize=14)
plt.ylabel('Total Ticket Count', fontsize=14)
plt.title('Daily View of Ticket Count', fontsize=16)
plt.show()
```
## Time Series Methodology and Analysis
The seasonal ARIMA model incorporates both non-seasonal and seasonal factors in a multiplicative model. One shorthand notation for the model is: $ARIMA(p, d, q) × (P, D, Q)S$ with with $p$ = non-seasonal AR order, $d$ = non-seasonal differencing, $q$ = non-seasonal MA order, $P$ = seasonal AR order, $D$ = seasonal differencing, $Q$ = seasonal MA order, and $S$ = time span of repeating seasonal pattern.
The regression output below shows the implementation of a **seasonal ARIMA** model, which suggests that all the selected parameters are statistically significant and of large magnitude, i.e., explaining the underlying time series. The **coef** column illustrates the weighting (i.e. importance) of each parameter and shows how each one affects daily ticket volume. The **P>|z|** column shows us the magnitude of each feature weight. Here, each weight has a **p-value** lower than 0.05, enabling us to infer that we can keep all of the parameters in our model.
```
# Fitting the model - ARIMA(1, 1, 1)x(1, 1, 1, 12)12
mod = sm.tsa.statespace.SARIMAX(y_,
order=(1, 1, 1),
seasonal_order=(1, 1, 1, 12),
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print(results.summary().tables[1])
```
However, when estimating and fitting seasonal ARIMA models, it is crucial to also compute model diagnostics. This ensures that none of the assumptions made by the statistical algorithm have been violated. The plot_diagnostics object enables us to quickly visualise model diagnostics and investigate for any unusual patterns.
```
# Plotting model diagnostics
results.plot_diagnostics(figsize=(15, 12))
plt.show()
```
The main point of concern for this modelling exercise is to verify whether the residuals of the seasonal ARIMA model are uncorrelated and normally distributed with zero-mean. If the model does not satisfy these properties, it simply means that it can be enhanced by additional hyperparameter tuning, i.e., tweaking the estimated equation such that our model achieves the desired statistical properties. Unfortunately, engaging in such painstaking manual steps is beyond the scope of this analysis. In the above case, our model diagnostics plots suggest that the residuals are approximately normally distributed. \begin{enumerate}
\item In the top right plot, we see that the green **KDE** line roughly follows the **N(0,1)** line (**where N(0,1)**) is the standard notation for a normal distribution with mean 0 and standard deviation of 1).
\item The qq-plot on the bottom left shows that the ordered distribution of residuals (blue dots) follows the linear trend of the samples taken from a standard normal distribution with **N(0, 1)**. Again, this is a strong indication that the model residuals are approximately normally distributed.
\item The residuals over time (top left plot) do not exhibit any obvious seasonality and appear to be white noise. The only cause for concern is the high variance towards the end of 2016. Also, the autocorrelation (i.e. correlogram) plot on the bottom right, shows that the time series residuals exhibit _some_ correlation with lagged versions of itself. This implies that there is a presence of autocorrelation.
\end{enumerate}
.
Taking these insights into consideration leads us to conclude that our model can indeed produce a satisfactory fit that could enable us to better understand our time series data and forecast future values. Another conclusion implicit in the observations elaborated on above is that 2016 is unlike 2015. Thus, it would be better to only feed the values of 2016 into our forecasting model.
### Validating the Model
As the discussion above has shown, we have obtained a rigorous methodology of a statistical model for our time series data of ticket volume that can now be used to produce forecasts. The first step of validating our seasonal ARIMA model is to compare the predicted values to the real values of the time series, which will aid us in understanding the accuracy of the forecast to come. The below plot shows the observed data of total ticket volume (blue colour) compared to our one-step ahead forecast (red colour) that was produced by the selected seasonal ARIMA methodology. The shaded area represents confidence intervals that provide us with a measure of certainty. Owing to the strong variability of the data for 2016, the confidence bounds are quite wide. Overall, the model forecasts align with the real observed values very well.
```
# comparing forecast values with actual values
pred = results.get_prediction(start=pd.to_datetime('01/01/2016'), dynamic=False)
pred_ci = pred.conf_int()
# Plotting original versus forecast
ax = y_['01/01/2016':].plot(label='observed', figsize=(15, 8))
pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', color='r', alpha=.7, figsize=(15, 8))
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax.set_xlabel('Date', fontsize=14)
ax.set_ylabel('Total Count of Tickets', fontsize=14)
plt.legend()
plt.show()
```
It is also useful to compute a measure of the accuracy of our forecasts. We will utilise the MSE (Mean Squared Error), which summarises the average error of the forecasts. For each predicted value, we compute its distance to the true value and square the result. The results need to be squared so that positive/negative differences do not cancel each other out when we compute the overall mean. As can be seen below, the MSE of our one-step ahead forecasts yields a value of 422.45, which is quite high as it should be close to 0. An MSE of 0 would mean that the estimator is predicting observations of the parameter with perfect accuracy, which would be an ideal scenario but it not typically possible. One reason for this high value of MSE is that our dataset for 2016 exhibits a very high variability, as could be seen by the confidence intervals in the graph above. In conclusion, this forecasting task may require a more advanced modelling methodology to reduce the MSE further.
```
# computing the MSE
y_forecasted = pred.predicted_mean
y_truth = y_['01/01/2016':]
# Compute the mean square error
mse = ((y_forecasted - y_truth) ** 2).mean()
print('The Mean Squared Error of our forecast is {}'.format(round(mse, 2)))
```
### Forecast for December-February 2017
As can be seen from the forecast for December, January and February, the seasonal ARIMA model provided us with an estimate of roughly 20-30 tickets per day, fluctuating around an average of 20 tickets. This is simply an extension of the lower daily average that started to decrease in September 2016. Prior visualisations have shown that there is a decreasing trend visible from the latter part of the second half of 2016. This feature is inherent in any seasonal ARIMA model, which picks up on the nearest seasonal patterns and adjusts its forecast accordingly. Another noticable characteristic of the forecast is that it exhibits less variability than the actual series, something that a more enhanced methodology could improve on. Furthermore, there are many other variables that impact ticket volume, such as: (1) the number of users that utilise a given software infrastructure, (2) the average number of bugs, and (3) the average number of technical staff that develop and maintain a given software infrastructure. Obtaining data on the number of users per day and hour of the day would remove a substantial amount of uncertainty from the errors and enhance the modelling results further.
```
# Forecast 99 days into the future, last date of sample is 21/11/2016
# Get forecast 500 steps ahead in future
pred_uc = results.get_forecast(steps=99)
# Get confidence intervals of forecasts
pred_ci = pred_uc.conf_int()
# plot forecast
sns.set(font_scale = 1.25)
ax = y_['11/01/2016':].plot(label='Observed', figsize=(15, 8))
pred_uc.predicted_mean.plot(ax=ax, label='Forecast', color='r', alpha=.7, figsize=(15, 8))
ax.set_xlabel('Date', fontsize=14)
ax.set_ylabel('Total Count of Tickets', fontsize=14)
plt.legend()
plt.title('Daily Ticket Count Forecast', fontsize=16)
plt.show()
# Time Series Modelling, DEFINING THE ARIMA TERMS
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0, 2)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
# Generate all different combinations of seasonal p, q and q triplets
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
#print('Examples of parameter combinations for Seasonal ARIMA...')
#print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
#print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))
#print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))
#print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))
# Hyper Parameter tuning for our SARIMA model
# chosen model ARIMA(1, 1, 1)x(1, 1, 1, 12)12
import statsmodels.api as sm
warnings.filterwarnings("ignore") # specify to ignore warning messages
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y_,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
# calculating the mean of y_
y_mean = np.mean(y_)
print(y_mean)
# turning the index into a list
y_mean = [np.mean(y_)]*len(newdata.index)
# investigating the difference in the number of rows
#print(len(data))
#print(len(newdata))
# maybe you can count the number of rows (category) using the .count() on resample
# there must be a method
# the number of users that utilise a given software infrastructure
# the average number of bugs
# the average number of technical staff that develop and maintain a given software infrastructure
# Section 1 Exploratory data analysis
# Section 2 Methodology of modelling
# Section 3 Results and insights
# Characterize the growth trend and try to annotate the jump
# slice the index to further investigate the jump in ticket volume
# dropping outliers
#start_remove = '2016-08-30'
#end_remove = '2016-08-31'
#dropThis = data.loc['2016-08-30':'2016-08-31']
# newdata = data.drop(data.index[['2016-08-30','2016-08-31']])
#newdata1 = data[~data.index.isin(dropThis)]
```
| github_jupyter |
```
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
from matplotlib import rc
rc('text', usetex=True)
rc('text.latex', preamble=[r'\usepackage{sansmath}', r'\sansmath']) #r'\usepackage{DejaVuSans}'
rc('font',**{'family':'sans-serif','sans-serif':['DejaVu Sans']})
rc('xtick.major', pad=12)
rc('ytick.major', pad=12)
rc('grid', linewidth=1.3)
%matplotlib inline
import cmocean
# onp.savez("augmentation_ce_fit_03",
# x_train=train_set[0][0],
# y_train=train_set[1][0],
# x_test=test_set[0][0],
# y_test=test_set[1][0],
# grid_d1=grid_d1,
# grid_d2=grid_d2,
# all_grid_preds=all_grid_preds,
# all_test_preds=all_test_preds
# )
arr = np.load("augmentation_ce_fit_03.npz")
levels = np.linspace(0., 1., 10)
grid_preds = arr["all_grid_preds"][10:].mean(0)[:, 0].reshape((50, 50))
plt.figure(figsize=(4, 4))
plt.contourf(
arr["grid_d1"], arr["grid_d2"], grid_preds,
vmin=0., vmax=1., levels=levels, cmap=cmocean.cm.deep,
alpha=0.5#cmap=cmocean.cm.thermal
)
# plt.colorbar()
class_1 = (arr["y_train"] == 1)
class_0 = np.logical_not(class_1)
mask = np.logical_and(arr["x_train"][:, 0] < 0, arr["x_train"][:, 1] < 0)
plt.plot(arr["x_train"][:, 0][mask & class_1], arr["x_train"][:, 1][mask & class_1], "ro", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][mask & class_0], arr["x_train"][:, 1][mask & class_0], "mo", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][~mask & class_1], arr["x_train"][:, 1][~mask & class_1], "ro", ms=8, mec="k", alpha=0.3)
plt.plot(arr["x_train"][:, 0][~mask & class_0], arr["x_train"][:, 1][~mask & class_0], "mo", ms=8, mec="k", alpha=0.3)
plt.contour(
arr["grid_d1"], arr["grid_d2"], grid_preds,
levels=[0.5], colors=["k"], zorder=10, linewidths=3
)
plt.xticks([])
plt.yticks([])
# test_acc = (np.argmax(arr["all_test_preds"][10:].mean(axis=0), -1) == arr["y_test"]).mean()
# plt.title("Test Acc: {:.1f}\%".format(test_acc * 100), fontsize=18)
# plt.hlines(0., -15., 0., color="b", ls="solid", zorder=10, lw=3)
# plt.vlines(0., -15., 0., color="b", ls="solid", zorder=10, lw=3)
# plt.hlines(0., -15., 0., color="k", ls="dashed", zorder=10, lw=3)
# plt.vlines(0., -15., 0., color="k", ls="dashed", zorder=10, lw=3)
plt.hlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
plt.vlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
plt.savefig("../../Papers/data_aug_bnns/arxiv/figures/fig_1/ce_aug_fit.pdf")
# class_1 = (test_set[1][0] == 1)
# class_0 = jnp.logical_not(class_1)
# plt.plot(test_set[0][0, :, 0][class_1], test_set[0][0, :, 1][class_1], "rs", ms=10, mec="k")
# plt.plot(test_set[0][0, :, 0][class_0], test_set[0][0, :, 1][class_0], "ms", ms=10, mec="k")
# plt.xlim(-15., 0.)
# plt.ylim(-15., 0.)
arr = np.load("ce_fit_noaug_03.npz")
levels = np.linspace(0., 1., 10)
grid_preds = arr["all_grid_preds"][10:].mean(0)[:, 0].reshape((50, 50))
plt.figure(figsize=(4, 4))
plt.contourf(
arr["grid_d1"], arr["grid_d2"], grid_preds,
vmin=0., vmax=1., levels=levels, cmap=cmocean.cm.deep,
alpha=0.5#cmap=cmocean.cm.thermal
)
# plt.colorbar()
class_1 = (arr["y_train"] == 1)
class_0 = np.logical_not(class_1)
plt.plot(arr["x_train"][:, 0][class_1], arr["x_train"][:, 1][class_1], "ro", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][class_0], arr["x_train"][:, 1][class_0], "mo", ms=8, mec="k", alpha=0.9)
plt.contour(
arr["grid_d1"], arr["grid_d2"], grid_preds,
levels=[0.5], colors=["k"], zorder=10, linewidths=3
)
plt.xticks([])
plt.yticks([])
plt.hlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
plt.vlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
# plt.hlines(0., -15., 15., color="k", ls="dashed", zorder=10, lw=1)
# plt.vlines(0., -15., 15., color="k", ls="dashed", zorder=10, lw=1)
# test_acc = (np.argmax(arr["all_test_preds"][10:].mean(axis=0), -1) == arr["y_test"]).mean()
# plt.title("Test Acc: {:.1f}\%".format(test_acc * 100), fontsize=18)
plt.savefig("../../Papers/data_aug_bnns/arxiv/figures/fig_1/ce_noaug_fit.pdf")
# class_1 = (test_set[1][0] == 1)
# class_0 = jnp.logical_not(class_1)
# plt.plot(test_set[0][0, :, 0][class_1], test_set[0][0, :, 1][class_1], "rs", ms=10, mec="k")
# plt.plot(test_set[0][0, :, 0][class_0], test_set[0][0, :, 1][class_0], "ms", ms=10, mec="k")
# plt.xlim(-15., 0.)
# plt.ylim(-15., 0.)
arr = np.load("augmentation_ce_fit_03_cold.npz")
levels = np.linspace(0., 1., 10)
grid_preds = arr["all_grid_preds"][10:].mean(0)[:, 0].reshape((50, 50))
plt.figure(figsize=(4, 4))
plt.contourf(
arr["grid_d1"], arr["grid_d2"], grid_preds,
vmin=0., vmax=1., levels=levels, cmap=cmocean.cm.deep,
alpha=0.5#cmap=cmocean.cm.thermal
)
# plt.colorbar()
class_1 = (arr["y_train"] == 1)
class_0 = np.logical_not(class_1)
mask = np.logical_and(arr["x_train"][:, 0] < 0, arr["x_train"][:, 1] < 0)
plt.plot(arr["x_train"][:, 0][mask & class_1], arr["x_train"][:, 1][mask & class_1], "ro", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][mask & class_0], arr["x_train"][:, 1][mask & class_0], "mo", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][~mask & class_1], arr["x_train"][:, 1][~mask & class_1], "ro", ms=8, mec="k", alpha=0.3)
plt.plot(arr["x_train"][:, 0][~mask & class_0], arr["x_train"][:, 1][~mask & class_0], "mo", ms=8, mec="k", alpha=0.3)
plt.contour(
arr["grid_d1"], arr["grid_d2"], grid_preds,
levels=[0.5], colors=["k"], zorder=10, linewidths=3
)
plt.xticks([])
plt.yticks([])
plt.hlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
plt.vlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
# plt.hlines(0., -15., 15., color="k", ls="dashed", zorder=10, lw=1)
# plt.vlines(0., -15., 15., color="k", ls="dashed", zorder=10, lw=1)
# test_acc = (np.argmax(arr["all_test_preds"][10:].mean(axis=0), -1) == arr["y_test"]).mean()
# plt.title("Test Acc: {:.1f}\%".format(test_acc * 100), fontsize=18)
plt.savefig("../../Papers/data_aug_bnns/arxiv/figures/fig_1/ce_aug_cold_fit.pdf")
# class_1 = (test_set[1][0] == 1)
# class_0 = jnp.logical_not(class_1)
# plt.plot(test_set[0][0, :, 0][class_1], test_set[0][0, :, 1][class_1], "rs", ms=10, mec="k")
# plt.plot(test_set[0][0, :, 0][class_0], test_set[0][0, :, 1][class_0], "ms", ms=10, mec="k")
# plt.xlim(-15., 0.)
# plt.ylim(-15., 0.)
arr = np.load("augmentation_dirichlet_fit.npz")
levels = np.linspace(0., 1., 11)
grid_preds = arr["all_grid_preds"][10:].mean(0)[:, 0].reshape((50, 50))
plt.figure(figsize=(4.8, 4))
plt.contourf(
arr["grid_d1"], arr["grid_d2"], grid_preds,
vmin=0., vmax=1., levels=levels, cmap=cmocean.cm.deep,
alpha=0.5#cmap=cmocean.cm.thermal
)
cbar = plt.colorbar(pad=0.05, aspect=15, shrink=0.9)
# cbar.ax.tick_params(labelsize=16)
# cbar = plt.colorbar(orientation="horizontal", pad=0.05, aspect=10, shrink=0.9)
cbar.set_ticks(levels)
cbar.ax.set_yticklabels(
[r"$0$", "", "$0.2$", "", "$0.4$", "", "$0.6$", "", "$0.8$", "", "$1$"], fontsize=16)
cbar.ax.tick_params(axis='both', which='major', pad=4)
class_1 = (arr["y_train"] == 1)
class_0 = np.logical_not(class_1)
mask = np.logical_and(arr["x_train"][:, 0] < 0, arr["x_train"][:, 1] < 0)
plt.plot(arr["x_train"][:, 0][mask & class_1], arr["x_train"][:, 1][mask & class_1], "ro", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][mask & class_0], arr["x_train"][:, 1][mask & class_0], "mo", ms=8, mec="k", alpha=0.9)
plt.plot(arr["x_train"][:, 0][~mask & class_1], arr["x_train"][:, 1][~mask & class_1], "ro", ms=8, mec="k", alpha=0.3)
plt.plot(arr["x_train"][:, 0][~mask & class_0], arr["x_train"][:, 1][~mask & class_0], "mo", ms=8, mec="k", alpha=0.3)
plt.contour(
arr["grid_d1"], arr["grid_d2"], grid_preds,
levels=[0.5], colors=["k"], zorder=10, linewidths=3
)
plt.xticks([])
plt.yticks([])
# test_acc = (np.argmax(arr["all_test_preds"][10:].mean(axis=0), -1) == arr["y_test"]).mean()
# plt.title("Test Acc: {:.1f}\%".format(test_acc * 100), fontsize=18)
plt.hlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
plt.vlines(0., -15., 15., color="b", ls="solid", zorder=10, lw=1.5)
plt.savefig("../../Papers/data_aug_bnns/arxiv/figures/fig_1/dirichlet_aug_fit.pdf")
# class_1 = (test_set[1][0] == 1)
# class_0 = jnp.logical_not(class_1)
# plt.plot(test_set[0][0, :, 0][class_1], test_set[0][0, :, 1][class_1], "rs", ms=10, mec="k")
# plt.plot(test_set[0][0, :, 0][class_0], test_set[0][0, :, 1][class_0], "ms", ms=10, mec="k")
# plt.xlim(-15., 0.)
# plt.ylim(-15., 0.)
```
| github_jupyter |
# Problem Set 2: Classification
To run and solve this assignment, one must have a working IPython Notebook installation. The easiest way to set it up for both Windows and Linux is to install [Anaconda](https://www.continuum.io/downloads). Then save this file to your computer (use "Raw" link on gist\github), run Anaconda and choose this file in Anaconda's file explorer. Use `Python 3` version. Below statements assume that you have already followed these instructions. If you are new to Python or its scientific library, Numpy, there are some nice tutorials [here](https://www.learnpython.org/) and [here](http://www.scipy-lectures.org/).
To run code in a cell or to render [Markdown](https://en.wikipedia.org/wiki/Markdown)+[LaTeX](https://en.wikipedia.org/wiki/LaTeX) press `Ctr+Enter` or `[>|]`(like "play") button above. To edit any code or text cell [double]click on its content. To change cell type, choose "Markdown" or "Code" in the drop-down menu above.
If a certain output is given for some cells, that means that you are expected to get similar results in order to receive full points (small deviations are fine). For some parts we have already written the code for you. You should read it closely and understand what it does.
Total: 100 points.
### 1. Logistic Regression
In this part of the exercise, you will build a logistic regression model to predict whether a student
gets admitted into a university.
Suppose that you are the administrator of a university department and you want to determine
each applicant’s chance of admission based on their results on two exams. You have historical
data from previous applicants in *ex2data1.txt* that you can use as a training set for logistic regression. For each
training example, you have the applicant’s scores on two exams and the admissions decision.
Your task is to build a classification model that estimates an applicant’s probability of admission based on the scores from those two exams. This outline and code framework will guide you through the exercise.
**1\.1 Implementation**
```
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
print('Tested with:')
print('Python', sys.version)
print({x.__name__: x.__version__ for x in [np, matplotlib]})
```
**1.1.1 Visualizing the data**
Before starting to implement any learning algorithm, it is always good to visualize the data if possible. This first part of the code will load the data and display it on a 2-dimensional plot by calling the function plotData. The axes are the two exam scores, and the positive and negative examples are shown with different markers.
```
################################################################################
# Try to fit your code and comments into 80 charecters because
# - it is guaranteed to look as intened on any screen size
# - it encourages you to write "flater" logic that is easier to reason about
# - it encourages you to decompose logic into comprehansible blocks.
#
# Try to avoid reassinging/mutating variables because when you encounter an
# unexplainable error (and you will) it is easier to have the whole history
# of values to reason about.
# it is good to isolate logical parts to avoid variables leaking into the
# global scope and messing up your logic later in weird ways
def read_classification_csv_data(fn, add_ones=False):
# read comma separated data
data = np.loadtxt(fn, delimiter=',')
X_, y_ = data[:, :-1], data[:, -1, None] # a fast way to keep last dim
# printing statistics of data before working with it might have saved
# hundreds hours of of my time, do not repeat my errors :)
print(X_.shape, X_.min(), X_.max(), X_.dtype)
print(y_.shape, y_.min(), y_.max(), y_.dtype)
# aha, y is float! this is not what we expected
# what might go wrong with further y == 0 checks?
# A: floating point equality comparison, that's what!
# insert the column of 1's into the "X" matrix (for bias)
X = np.insert(X_, X_.shape[1], 1, axis=1) if add_ones else X_
y = y_.astype(np.int32)
return X, y
X_data, y_data = read_classification_csv_data('ex2data1.txt', add_ones=True)
print(X_data.shape, X_data.min(), X_data.max(), X_data.dtype)
print(y_data.shape, y_data.min(), y_data.max(), y_data.dtype)
# how does the *X[y.ravel()==1, :2].T trick work?
# https://docs.python.org/3/tutorial/controlflow.html#unpacking-argument-lists
def plot_data(X, y, labels, markers, xlabel, ylabel, figsize=(10, 6), ax=None):
if figsize is not None:
plt.figure(figsize=figsize)
ax = ax or plt.gca()
for label_id, (label, marker) in enumerate(zip(labels, markers)):
ax.plot(*X[y.ravel()==label_id, :2].T, marker, label=label)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.legend()
ax.grid(True)
student_plotting_spec = {
'X': X_data,
'y': y_data,
'xlabel': 'Exam 1 score',
'ylabel': 'Exam 2 score',
'labels': ['Not admitted', 'Admitted'],
'markers': ['yo', 'k+'],
'figsize': (10, 6)
}
plot_data(**student_plotting_spec)
plt.show()
```
**1.1.2 [5pts] Sigmoid function**
Before you start with the actual cost function, recall that the logistic regression hypothesis is defined as:
$h_\theta(x) = g(\theta^Tx)$
where function g is the sigmoid function. The sigmoid function is defined as:
$g(z) = \dfrac{1}{1+e^{-z}}$
Your first step is to implement/find a sigmoid function so it can be called by the rest of your program. Your code should also work with vectors and matrices. For a matrix, your function should perform the sigmoid function on every element.
When you are finished, (a) plot the sigmoid function, and (b) test the function with a scalar, a vector, and a matrix. For scalar large positive values of x, the sigmoid should be close to 1, while for scalar large negative values, the sigmoid should be close to 0. Evaluating sigmoid(0) should give you exactly 0.5.
```
# check out scipy.special for great variaty of vectorized functions
# remember that sigmoid is the inverse of logit function
# maybe worth checking out scipy.special.logit first
from scipy.special import expit
sigmoid = expit
def check_that_sigmoid_f(f):
# don't use np.arange with float step because it works as
# val_{i+1} = val_i + step while val_i < end
# what might do wrong with float precision?
x_test = np.linspace(-10, 10, 50)
sigm_test = f(x_test)
plt.plot(x_test, sigm_test)
plt.title("Sigmoid function")
plt.grid(True)
plt.show()
# why should analytical_diff almost== finite_diff for sigmoid?
analytical_diff = sigm_test*(1-sigm_test)
finite_step = x_test[1]-x_test[0]
finite_diff = np.diff(sigm_test) / finite_step
print(x_test.shape, finite_diff.shape)
plt.plot(x_test[:-1]+finite_step/2, finite_diff)
plt.plot(x_test, analytical_diff)
plt.title("Numerical (finite difference) derivative of 1d sigmoid")
plt.grid(True)
plt.show()
check_that_sigmoid_f(sigmoid)
```
**1.1.3 [15pts] Cost function and gradient**
Now you will implement the cost function and gradient for logistic regression. Complete the code
in the functions *hyposesis_function* and *binary_logistic_loss* below to return the value of the hypothesis function and the cost, respectively. Recall that the cost function in logistic regression is
$j(\theta) \ = \ \frac{1}{m} \ \sum_{i=1}^{m} \ [ \ -y^{(i)} log(h_\theta(x^{(i)})) \ - \ (1 - y^{(i)})log(1-h_\theta(x^{(i)})) \ ]$
and the gradient of the cost is a vector of the same length as $\theta$ where the $j^{th}$ element (for $j = 0, 1,...,n$) is defined as follows:
$\frac{\partial J(\theta)}{\partial \theta_{j}} \ = \ \frac{1}{m} \ \sum_{i=1}^{m} \ (h_\theta(x^{(i)})-y^{(i)}) x_j^{(i)}$
where $m$ is the number of points and $n$ is the number of features. Note that while this gradient looks identical to the linear regression gradient, the formula is
actually different because linear and logistic regression have different definitions of $h_\theta(x)$.
What is the value of loss function for $\theta = \bar 0$ regardless of input? Make sure your code also outputs this value.
```
# we are trying to fit a function that would return a
# "probability of "
# hyposesis_function describes parametric family of functions that we are
# going to pick our "best fitting function" from. It is parameterized by
# real-valued vector theta, i.e. we are going to pick
# h_best = argmin_{h \in H} logistic_loss_h(x, y, h)
# but because there exist a bijection between theta's and h's it is
# eqvivalent to choosing
# theta_best = argmin_{theta \in H} logistic_loss_theta(x, y, theta)
def hyposesis_function(x, theta):
return sigmoid(np.dot(x, theta))
def binary_logistic_loss(y, y_pred):
"""
Arguments (np arrays of shape):
x : [m, n] ground truth data
y : [m, 1] ground truth prediction
h : [m, n] -> [m, 1] our guess for a prediction function
"""
assert y_pred.shape == y.shape
# or weird sign stuff happens! like -1*y != -y
y, y_pred = y.astype(np.float64), y_pred.astype(np.float64)
neg_log_likelihoods = -y*np.log(y_pred) - (1-y)*np.log(1-y_pred)
return np.mean(neg_log_likelihoods)
def logistic_loss_theta_grad(x, y, h, theta):
y_pred = h(x, theta)
point_wise_grads = (y_pred - y)*x
grad = np.mean(point_wise_grads, axis=0)[:, None]
assert grad.shape == theta.shape
return grad
def logistic_loss_theta(x, y, h, theta):
return binary_logistic_loss(y, h(x, theta))
# Check that with theta as zeros, cost is about 0.693:
theta_init = np.zeros((X_data.shape[1], 1))
print(logistic_loss_theta(X_data, y_data, hyposesis_function, theta_init))
print(logistic_loss_theta_grad(X_data, y_data, hyposesis_function, theta_init))
```
**1.1.4 Learning parameters using *fmin***
In the previous assignment, you found the optimal parameters of a linear regression model by
implementing gradient descent. You wrote a cost function and calculated its gradient, then took
a gradient descent step accordingly. This time, instead of taking gradient descent steps, you will
use a scipy.optimize built-in function called *fmin*.
The final $\theta$ value will then be used to plot the
decision boundary on the training data, as seen in the figure below.
```
import climin
from functools import partial
def optimize(theta_init, loss, loss_grad, max_iter=10000, print_every=1000, optimizer_fn=None, show=False):
theta = theta_init.copy()
opt_args = (theta, loss_grad)
if optimizer_fn is None:
optimizer_fn = partial(climin.GradientDescent, step_rate=1e-3, momentum=0.999)
optimizer = optimizer_fn(*opt_args)
loss_curve = []
for opt_info in optimizer:
n_iter = opt_info['n_iter']
f_value = loss(theta)
loss_curve.append(f_value)
if print_every != 0 and n_iter % print_every == 0:
print(n_iter, f_value)
if n_iter == max_iter:
break
if show:
plt.plot(loss_curve)
plt.show()
return theta, f_value
theta_init = np.zeros((3, 1))
loss = partial(logistic_loss_theta, X_data, y_data, hyposesis_function)
loss_grad = partial(logistic_loss_theta_grad, X_data, y_data, hyposesis_function)
theta, best_cost = optimize(theta_init, loss, loss_grad, show=True)
print(best_cost)
# Plotting the decision boundary: two points, draw a line between
# Decision boundary occurs when h = 0, or when
# theta_0*x1 + theta_1*x2 + theta_2 = 0
# y=mx+b is replaced by x2 = (-1/theta1)(theta2 + theta0*x1)
line_xs = np.array([np.min(X_data[:,0]), np.max(X_data[:,0])])
line_ys = (-1./theta[1])*(theta[2] + theta[0]*line_xs)
plot_data(**student_plotting_spec)
plt.plot(line_xs, line_ys, 'b-', lw=10, alpha=0.2, label='Decision Boundary')
plt.legend()
plt.show()
```
**1.1.5 [15pts] Evaluating logistic regression**
After learning the parameters, you can use the model to predict whether a particular student will
be admitted.
(a) [5 pts] Show that for a student with an Exam 1 score of 45 and an Exam 2 score of 85, you should
expect to see an admission probability of 0.776.
Another way to evaluate the quality of the parameters we have found is to see how well the
learned model predicts on our training set.
(b) [10 pts] In this part, your task is to complete the code in
*makePrediction*. The predict function will produce “1” or “0” predictions given a dataset and a learned
parameter vector $\theta$. After you have completed the code, the script below will proceed to report the
training accuracy of your classifier by computing the percentage of examples it got correct. You
should also see a Training Accuracy of 89.0.
```
# For a student with an Exam 1 score of 45 and an Exam 2 score of 85,
# you should expect to see an admission probability of 0.776.
check_data = np.array([[45., 85., 1]])
print(check_data.shape)
print(hyposesis_function(check_data, theta))
def predict(x, theta):
return hyposesis_function(x, theta) >= 0.5
def accuracy(x, y, theta):
return np.mean(predict(x, theta) == y)
print(accuracy(X_data, y_data, theta))
```
### 2. Regularized logistic regression
In this part of the exercise, you will implement regularized logistic regression to predict whether microchips from a fabrication plant pass quality assurance (QA). During QA, each microchip goes through various tests to ensure it is functioning correctly. Suppose you are the product manager of the factory and you have the test results for some microchips on two different tests. From these two tests, you would like to determine whether the microchips should be accepted or rejected. To help you make the decision, you have a dataset of test results on past microchips in *ex2data2.txt*, from which you can build a logistic regression model.
**2.1 Visualizing the data**
Similar to the previous parts of this exercise, plotData is used to generate the figure below,
where the axes are the two test scores, and the positive (y = 1, accepted) and negative (y = 0,
rejected) examples are shown with different markers.
The figure below shows that our dataset cannot be separated into positive and negative examples by a
straight line. Therefore, a straightforward application of logistic regression will not perform well on this dataset since logistic regression will only be able to find a linear decision boundary.
```
X_data_, y_data = read_classification_csv_data('ex2data2.txt')
X_data = X_data_ - X_data_.mean(axis=0)[None, :]
print(X_data.shape, X_data.min(), X_data.max(), X_data.dtype)
print(y_data.shape, y_data.min(), y_data.max(), y_data.dtype)
chip_plotting_spec = {
'X': X_data,
'y': y_data,
'xlabel': 'Microchip Test 1 Result',
'ylabel': 'Microchip Test 2 Result',
'labels': ['rejected', 'accepted'],
'markers': ['yo', 'k+'],
'figsize': (6, 6)
}
plot_data(**chip_plotting_spec)
plt.show()
```
**2.2 Nonlinear feature mapping**
One way to fit the data better is to create more features from each data point. In *mapFeature* below, we will map the features into all polynomial terms of $x_1$ and $x_2$ up to the
sixth power as follows:
\begin{equation}
mapFeature(x) \ = \
\begin{bmatrix}
1 \\
x_1 \\
x_2 \\
x_1^2 \\
x_1x_2 \\
x_2^2 \\
x_1^3 \\
\vdots \\
x_1x_2^5 \\
x_2^6 \\
\end{bmatrix}
\end{equation}
As a result of this mapping, our vector of two features (the scores
on two QA tests) has been transformed into a 28-dimensional
vector. A logistic regression classifier trained on this
higher-dimension feature vector will have a more complex
decision boundary and will appear nonlinear when drawn in our
2-dimensional plot.
While the feature mapping allows us to build a more expressive
classifier, it is also more susceptible to overfitting. In the next parts
of the exercise, you will implement regularized logistic regression
to fit the data and also see for yourself how regularization can help combat the overfitting problem.
Either finite dimentional (or even infinite-dimentional, as you would see in the SVM leacture and the corresponding home assingment) feature mappings are usually denoted by $\Phi$ and therefore our hyposesis is now that the Bernoulli probability of chip matfunctioning might be described as
$$ p_i = \sigma(\Phi(x_i)^T \theta)$$
```
from itertools import combinations_with_replacement
def polynomial_feature_map(X_data, degree=20, show_me_ur_powers=False):
assert len(X_data.shape) == 2
group_size = X_data.shape[1]
# hm.. how to get all groups of size `group_size` of ints
# such that their sum <= dergee?
comb_iterator = combinations_with_replacement(range(degree+1), group_size)
not_quite_powers = np.array(list(comb_iterator))
powers_bad_order = not_quite_powers.copy()
powers_bad_order[:, 1] -= not_quite_powers[:, 0]
rising_power_idx = np.argsort(powers_bad_order.sum(axis=1))
powers = powers_bad_order[rising_power_idx]
if show_me_ur_powers is True:
print(powers.T)
print('total power per monomial', powers.sum(axis=1))
X_with_powers = np.power(X_data[:, :, None], powers.T[None])
# tu tu power rangers (with replacement)
X_poly = np.prod(X_with_powers, axis=1)
return X_poly
X_pf = polynomial_feature_map(X_data, show_me_ur_powers=True)
print(X_pf.shape)
```
**2.3 Cost function and gradient**
Now you will implement code to compute the cost function and gradient for regularized logistic
regression. Recall that the regularized cost function in logistic regression is:
$j(\theta) \ = \ [ \ \frac{1}{m} \ \sum_{i=1}^{m} \ [ \ -y^{(i)} log(h_\theta(x^{(i)})) \ - \ (1 - y^{(i)})log(1-h_\theta(x^{(i)})) \ ] \ ] \ + \frac{\lambda}{2m} \sum_{j=2}^{n} \theta_j^2 $
Note that you should not regularize the parameter $\theta_0$ (Why not? Think about why that would be a bad idea).
The gradient of the cost function is a vector where the j element is defined as follows (you should understand how to obtain this expression):
$\frac{\partial J(\theta)}{\partial \theta_{0}} \ = \ \frac{1}{m} \ \sum_{i=1}^{m} \ (h_\theta(x^{(i)})-y^{(i)}) x_j^{(i)} \quad \quad \quad \quad \quad \quad$ for $\quad j=0$
$\frac{\partial J(\theta)}{\partial \theta_{j}} \ = \ (\frac{1}{m} \ \sum_{i=1}^{m} \ (h_\theta(x^{(i)})-y^{(i)}) x_j^{(i)}) + \frac{\lambda}{m}\theta_j \quad \quad \quad$ for $\quad j \ge 1$
**2.3.1 [10pts] Implementing regularized logistic regression**
Re-implement computeCost with regularization.
```
# Cost function, default lambda (regularization) 0
def logistic_loss_theta_w_reg(x, y, h, theta, lambda_=0.0):
m = x.shape[0]
reg_term = np.sum(theta[1:]**2)/(2*m)
loss = logistic_loss_theta(x, y, h, theta)
total_loss = loss + lambda_ * reg_term
return total_loss
def logistic_loss_theta_w_reg_grad(x, y, h, theta, lambda_=0.0):
m = x.shape[0]
grad = logistic_loss_theta_grad(x, y, h, theta)
reg_term_grad = lambda_ * theta / m
reg_term_grad[0] = 0
return grad + reg_term_grad
```
Once you are done, you will call your cost function using the initial value of
θ (initialized to all zeros). You should see that the cost is about 0.693.
```
theta_init = np.zeros((X_pf.shape[1], 1))
print(logistic_loss_theta_w_reg(X_pf, y_data, hyposesis_function, theta_init))
print(logistic_loss_theta_w_reg_grad(X_pf, y_data, hyposesis_function, theta_init))
loss = partial(logistic_loss_theta_w_reg, X_pf, y_data, hyposesis_function)
loss_grad = partial(logistic_loss_theta_w_reg_grad, X_pf, y_data, hyposesis_function)
theta, best_cost = optimize(theta_init, loss, loss_grad, max_iter=10000, print_every=0, show=True)
print('best loss', best_cost)
print('best acc', accuracy(X_pf, y_data, theta))
```
**2.3.2 [15pts] Learning parameters using *minimize***
You will use *optimize.minimize* to learn the optimal parameters $\theta$. If you
have completed the cost and gradient for regularized logistic regression correctly, you should be able to learn the parameters
$\theta$ using *minimize*. Implement the function *optimizeRegularizedTheta* below.
**2.4 Plotting the decision boundary**
To help you visualize the model learned by this classifier, we have provided the function
*plotBoundary* which plots the (non-linear) decision boundary that separates the
positive and negative examples.
```
def plot_boundary(theta, ax=None):
"""
Function to plot the decision boundary for arbitrary theta, X, y, lambda value
Inside of this function is feature mapping, and the minimization routine.
It works by making a grid of x1 ("xvals") and x2 ("yvals") points,
And for each, computing whether the hypothesis classifies that point as
True or False. Then, a contour is drawn with a built-in pyplot function.
"""
ax = ax or plt.gca()
x_range = np.linspace(-1,1.5,50)
y_range = np.linspace(-1,1.5,50)
xx, yy = np.meshgrid(x_range, y_range)
X_fake = np.stack([xx, yy]).reshape(2, -1).T
X_fake_fm = polynomial_feature_map(X_fake)
y_pred_fake = hyposesis_function(X_fake_fm, theta)
return ax.contour( x_range, y_range, y_pred_fake.reshape(50, 50).T, [0.5])
```
**2.4.1 [10pts] Plot Decision Boundaries**
(a) [4 pts] Use *plotBoundary* to obtain four subplots of the decision boundary for the following values of the regularization parameter: $\lambda \ = \ 0, 1, 10, 100$
(b) [2 pts] Comment on which plots are overfitting and which plots are underfitting.
(c) [2 pts] Which is the model with the highest bias? The highest variance?
(d) [2 pts] What is another way to detect overfitting?
```
# (a) Build a figure showing contours for various values of regularization parameter, lambda
np.random.seed(2)
train_idx_mask = np.random.rand(X_pf.shape[0]) < 0.3
X_pf_train, y_train = X_pf[train_idx_mask], y_data[train_idx_mask]
X_pf_test, y_test = X_pf[~train_idx_mask], y_data[~train_idx_mask]
print([x.shape for x in (X_pf_train, y_train, X_pf_test, y_test)])
def silent_optimize_w_lambda(lambda_):
theta_init = np.zeros((X_pf.shape[1], 1))
data = (X_pf_train, y_train, hyposesis_function)
loss = partial(logistic_loss_theta_w_reg, *data, lambda_=lambda_)
loss_grad = partial(logistic_loss_theta_w_reg_grad, *data, lambda_=lambda_)
optimizer_fn = partial(climin.GradientDescent, step_rate=1e-4, momentum=0.999)
theta, final_loss = optimize(theta_init, loss, loss_grad, optimizer_fn=optimizer_fn, max_iter=1000, print_every=0, show=False)
return theta, final_loss
thetas = []
plt.figure(figsize=(12,10))
# wow, I mutates an object used in the scope of another function (plot_data)
# don't do that! it is really hard to debug later
chip_plotting_spec['figsize'] = None
for id_, lambda_ in enumerate([0, 1, 5, 10]):
ax = plt.subplot(2, 2, id_+1)
theta, final_loss = silent_optimize_w_lambda(lambda_)
thetas.append(theta)
cnt = plot_boundary(theta, ax)
plot_data(**chip_plotting_spec)
cnt_fmt = {0.5: 'Lambda = %d' % lambda_}
ax.clabel(cnt, inline=1, fontsize=15, fmt=cnt_fmt)
acc = accuracy(X_pf_test, y_test, theta)
ax.set_title("Decision Boundary, Accuracy = %.2f, Loss=%.2f" % (acc, final_loss))
ax.plot(*X_data[train_idx_mask].T, 'r.', alpha=0.3)
plt.show()
ax = None
for th_id, theta in enumerate(thetas):
ax = plt.subplot(2, 2, th_id+1, sharey=ax)
ax.plot(theta)
plt.show()
```
### 3. Written part
These problems are extremely important preparation for the exam. Submit solutions to each problem by filling the markdown cells below.
**3.1 [10pts]** Maximum likelihood for Logistic Regression
Showing all steps, derive the LR cost function using maximum likelihood. Assume that
the probability of y given x is described by:
$P(\ y=1 \; \vert \; x \ ; \ \theta \ ) = h_{\theta}(x)$
$P(\ y=0 \; \vert \; x \ ; \ \theta \ ) = 1 - h_{\theta}(x)$
**First we write down the likelihood of the data given the parameters, which is:**
**$L(\theta) = \prod_{i=1}^{m}{h(x^i)}^{y^i}(1-h(x^i))^{1-y^i}$**
**Then we take log of both sides to get:**
**$ln L(\theta) = \sum_{i=1}^{m}{h(x^i)y^i+(1-h(x^i))(1-y^i)}$**
**which is the same (up to a constant multiplier) as the logistic regression cost.**
**3.2 [10pts]** Logistic Regression Classification with Label Noise
Suppose you are building a logistic regression classifier for images of dogs, represented by a feature vector x, into one of two categories $y \in \{0,1\}$, where 0 is “terrier” and 1 is “husky.” You decide to use the logistic regression model $p(y = 1 \ \vert \ x) = h_{\theta}(x)=\sigma(\theta^Tx).$ You collected an image dataset **D**$\ = \{x^{(i)},t^{(i)}\}$, however, you were very tired and made
some mistakes in assigning labels $t^{(i)}.$ You estimate that you were correct in about $\tau$ fraction of all cases.
(a) Write down the equation for the posterior probability $p(t = 1 \ \vert \ x)$ of the label being 1 for some point x, in terms of the probability of the true class, $p(y = 1 \ \vert \ x).$
(b) Derive the modified cost function in terms of $\ \theta, x^{(i)},t^{(i)}$ and $\tau$.
**(a) Using the sum rule, **
**$p(t=1 \ \vert \ x) \ = \ p(t=1,y=1 \ \vert \ x) \ + \ p(t=1,y=0 \ \vert \ x)$**
**$ \quad \quad \quad \quad \ = \ p(t=1 \ \vert \ y=1,x)p(y=1 \ \vert \ x) \ + \ p(t=1 \ \vert \ y=0,x)p(y=0 \ \vert \ x)$**
**$ \quad \quad \quad \quad \ = \ \tau \ p(y=1 \ \vert \ x) \ + \ (1-\tau)(1-p(y=1 \ \vert \ x))$**
**Here we used the fact that $p(t=1 \ \vert \ y=1,x)$ is the probability of the label being correct and $p(t=1 \ \vert \ y=0,x)$ is the probability of the label being incorrect.**
**(b) Substituting the expression for $p(t=1 \ \vert \ x)$ from (a) gives the final cost**
**$-ln \ p(D \ \vert \ \theta) \ = \ -\sum_{i=1}^{m} t^{(i)} \ ln \ [\ \tau \ p(y=1 \ \vert \ x) \ + \ (1-\tau)(1-p(y=1 \ \vert \ x)) \ ]$**
**$\quad \quad \quad \quad \quad \quad + \ (1-t^{(i)}) \ ln \ (1 - [ \ \tau \ p(y=1 \ \vert \ x) + (1- \tau)(1-p(y=1 \ \vert \ x)) \ ])$**
**$\quad \quad \quad \quad \ = \ -\sum_{i=1}^{m} t^{(i)} \ ln \ [\ \tau \ \sigma(\theta^Tx^{(i)}) \ + \ (1-\tau)(1-\sigma(\theta^Tx^{(i)})) \ ]$**
**$\quad \quad \quad \quad \quad \quad + \ (1-t^{(i)}) \ ln \ (1 - [ \ \tau \ \sigma(\theta^Tx^{(i)}) + (1- \tau)(1-\sigma(\theta^Tx^{(i)})) \ ])$**
**This is okay, but we can also simplify further:**
**$-ln \ p(D \ \vert \ \theta) \ = \ -\sum_{i=1}^{m} t^{(i)} \ ln \ [\ \sigma(\theta^Tx^{(i)})(2 \tau - 1) - \tau + 1 \ ]$**
**$\quad \quad \quad \quad \quad \quad + \ (1-t^{(i)}) \ ln \ (-\sigma(\theta^Tx^{(i)})(2 \tau -1) + \tau) $**
**3.3 [10pts] Cross-entropy loss for multiclass classification**
This problem asks you to derive the cross-entropy loss for a multiclass classification problem using maximum likelihood.
Consider the multiclass classification problem in which each input is assigned to one of $K$ mutually exclusive classes. The binary target variables $y_k$ ∈ {0, 1} have a "one-hot" coding scheme, where the value is 1 for the indicated class and 0 for all others. Assume that we can interpret the network outputs as $h_k(x,\theta) = p(y_k = 1|x)$, or the probability of the kth class.
Show that the maximum likelihood estimate of the parameters $\theta$ can be obtained by minimizing the multiclass *cross-entropy* loss function
<p>
$L(\theta)= - \frac{1}{N}\sum_{i=1}^{N} \sum_{k=1}^{K} y_{ik} \log(h_k(x_i,\theta))$
</p>
<p>
where $N$ is the number of examples $\{x_i,y_i\}$. </p>
**For a single example $x_i,y_i$, the log-likelihood function can be written as:**
**$\log P(y_{i}\ \vert \ x_i, \theta) = \log \prod_{k=1}^Kh_k(x_i, \theta)^{y_{ik}} =\sum_{k=1}^Ky_{ik} \log h_k(x_i,\theta)$**
**Due to the fact that $y_i$ is one-hot. Then the maximum likelihood solution maximizes**
**$\sum_{i=1}^N \log(P(y_{i}\ \vert \ x_i, \theta)) = \sum_{i=1}^N \sum_{k=1}^K y_{ik} \log(h_k(x_i, \theta))$**
**which is equivalent to minimizing $L(\theta)$. The constant factor $\frac{1}{N}$ does not change the solution.**
### Fix for Windows users
So here's an alternative implementation of `optimize()` that uses scipy instead of climin, if you find it difficult to install this package for some reason. It requires little more work because of the way this function passes values (gradients).
In addition to replacing optimize function with the one given below, you should also
1. add `theta = theta.reshape((-1, 1))` on top of all functions hyposesis_function, logistic_loss_theta_grad, logistic_loss_theta
2. Remove `optimizer_fn` argument from all calls to optimize() such as in 2.4.1
3. Add `ax.set_ylim((-1, 1))` in the 2.4.1(e)
4. And add apply `.ravel()` before returning from your the gradient function
```
import scipy.optimize
def optimize(theta_init, loss, loss_grad, max_iter=10000, print_every=1000, optimizer_fn=None, show=False):
theta = theta_init.copy()
opt_args = {'x0': theta_init, 'fun': loss, 'jac': loss_grad, 'options': {'maxiter': max_iter}}
loss_curve = []
def scipy_callback(theta):
f_value = loss(theta)
loss_curve.append(f_value)
if optimizer_fn is None:
optimizer_fn = partial(scipy.optimize.minimize, method='BFGS', callback=scipy_callback)
opt_result = optimizer_fn(**opt_args)
if show:
plt.plot(loss_curve)
plt.show()
return opt_result['x'].reshape((-1, 1)), opt_result['fun']
```
| github_jupyter |
```
import os
import sys
import re
from pathlib import Path
from IPython.display import display, HTML, Markdown
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import seaborn as sns
# Project level imports
from larval_gonad.notebook import Nb
from larval_gonad.io import cellranger_umi
# Setup notebook
nbconfig = Nb.setup_notebook(seurat_dir='../output/scrnaseq-wf/scrnaseq_combine_force')
cluster_annot = {
0: 'LS',
1: 'MC',
2: 'MS',
3: 'ES',
4: 'LC',
5: 'EC',
6: 'SP',
7: 'TE',
8: 'PC',
9: 'U9',
10: 'U10',
11: 'U11'
}
cluster_order = [
'SP',
'ES',
'MS',
'LS',
'EC',
'MC',
'LC',
'TE',
'PC',
'U9',
'U10',
'U11'
]
clusters = nbconfig.seurat.get_clusters('res.0.6')
clusters = clusters.map(cluster_annot)
clusters = pd.Series(pd.Categorical(clusters, categories=cluster_order, ordered=True), index=clusters.index)
clusters.name = 'cluster'
def munge(fname, rep):
dat = cellranger_umi(fname).groupby('cell_id').size()
dat.name = 'umi_count'
dat.index = f'{rep}_' + dat.index.values
dat = pd.concat([dat, clusters], axis=1, sort=True)
dat.sort_values('umi_count', ascending=False, inplace=True)
dat['cell_rank'] = range(1, dat.shape[0] + 1)
dat['dups'] = False
dat['dupFinder'] = False
with open(f'../output/notebook/2018-10-29_testing_doubletdetection_{rep}.txt') as fh:
dups = fh.read().strip().split('\n')
dat.loc[dups, 'dups'] = True
with open(f'../output/notebook/2018-10-30_testing_doubletFinder_{rep}.txt') as fh:
dups = fh.read().strip().split('\n')
dat.loc[dups, 'dupFinder'] = True
return dat
def plot_knee(dat, ax, title='All Cells', **kwargs):
args = dict(x='cell_rank', y='umi_count', s=10, alpha=.8, color='grey', ax=ax)
args.update(kwargs)
dat.plot.scatter(**args)
ax.set_yscale('log')
ax.set_xscale('log')
ax.axhline(10**5, ls='--', color='red', lw=.8)
ax.set_title(title, fontsize=8)
def plotall(dat, title):
# figure layout
fig = plt.figure(figsize=plt.figaspect(1/2))
fig.suptitle(title, fontsize=12)
gs = GridSpec(4, 4, height_ratios=[3, 1, 1, 1], wspace=.1)
ax0 = fig.add_subplot(gs[0, :])
ax1 = fig.add_subplot(gs[1, 0], sharex=ax0, sharey=ax0)
ax2 = fig.add_subplot(gs[1, 1], sharex=ax0, sharey=ax0)
ax3 = fig.add_subplot(gs[1, 2], sharex=ax0, sharey=ax0)
ax4 = fig.add_subplot(gs[1, 3], sharex=ax0, sharey=ax0)
ax5 = fig.add_subplot(gs[2, 0], sharex=ax0, sharey=ax0)
ax6 = fig.add_subplot(gs[2, 1], sharex=ax0, sharey=ax0)
ax7 = fig.add_subplot(gs[2, 2], sharex=ax0, sharey=ax0)
ax8 = fig.add_subplot(gs[2, 3], sharex=ax0, sharey=ax0)
ax9 = fig.add_subplot(gs[3, 0], sharex=ax0, sharey=ax0)
ax10 = fig.add_subplot(gs[3, 1], sharex=ax0, sharey=ax0)
ax11 = fig.add_subplot(gs[3, 2], sharex=ax0, sharey=ax0)
ax12 = fig.add_subplot(gs[3, 3], sharex=ax0, sharey=ax0)
axes = [
ax0, ax1, ax2, ax3, ax4,
ax5, ax6, ax7, ax8, ax9,
ax10, ax11, ax12,
]
# Plot all cells
plot_knee(dat, axes[0], zorder=1)
_dups = dat.query('dups == True')
if _dups.shape[0] > 0:
plot_knee(_dups, axes[0], color='red', zorder=5)
_dups = dat.query('dupFinder == True')
if _dups.shape[0] > 0:
plot_knee(_dups, axes[0], color='blue', zorder=5)
_dups = dat.query('dups == True & dupFinder == True')
if _dups.shape[0] > 0:
plot_knee(_dups, axes[0], color='yellow', zorder=10, alpha=1)
# Plot subsets of cells
_axes = np.array(axes[1:])
_axes = _axes.reshape(3, 4)
for (g, dd), ax in zip(dat.groupby('cluster'), _axes.flatten()):
plot_knee(dd, ax, title=g, zorder=1)
_dups = dd.query('dups == True')
if _dups.shape[0] > 0:
plot_knee(_dups, ax, title=g, color='r', zorder=5)
_dups = dd.query('dupFinder == True')
if _dups.shape[0] > 0:
plot_knee(_dups, ax, title=g, color='blue', zorder=5)
_dups = dd.query('dups == True & dupFinder == True')
if _dups.shape[0] > 0:
plot_knee(_dups, ax, title=g, color='yellow', zorder=10, alpha=1)
# Clean up
# Remove y labels
for ax in axes:
ax.set_ylabel('')
fig.text(0.08, 0.5, 'UMI Count', ha='left', va='center', rotation=90, fontsize=12)
# Remove x labels
for ax in axes:
ax.set_xlabel('')
fig.text(0.5, 0, 'Cell Rank', ha='center', va='bottom', fontsize=12)
# Remove extra ticks
axes[0].xaxis.set_visible(False)
for ax in _axes[:-1, :].flatten():
ax.xaxis.set_visible(False)
for ax in _axes[:, 1:].flatten():
ax.yaxis.set_visible(False)
# Fix limits
for ax in axes:
ax.set_xlim(1, 10**6)
ax.set_ylim(1, 10**6)
ax.margins(0.4)
dat = munge('../output/scrnaseq-wf/scrnaseq_samples/testis1_force/outs/molecule_info.h5', 'rep1')
plotall(dat, 'Replicate 1')
df = pd.crosstab(dat['dups'], dat['dupFinder'])
df.index.name = 'DoubletDetection'
df.columns.name = 'DoubletFinder'
df.applymap(lambda x: f'{x:,}')
dat = munge('../output/scrnaseq-wf/scrnaseq_samples/testis2_force/outs/molecule_info.h5', 'rep2')
plotall(dat, 'Replicate 2')
df = pd.crosstab(dat['dups'], dat['dupFinder'])
df.index.name = 'DoubletDetection'
df.columns.name = 'DoubletFinder'
df.applymap(lambda x: f'{x:,}')
dat = munge('../output/scrnaseq-wf/scrnaseq_samples/testis3_force/outs/molecule_info.h5', 'rep3')
plotall(dat, 'Replicate 3')
df = pd.crosstab(dat['dups'], dat['dupFinder'])
df.index.name = 'DoubletDetection'
df.columns.name = 'DoubletFinder'
df.applymap(lambda x: f'{x:,}')
```
| github_jupyter |
```
!pip install transformers==4.2.0
import sys
sys.path.append('/content/drive/MyDrive/MAIS')
import os
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import GPT2LMHeadModel, GPT2Config, AdamW, get_linear_schedule_with_warmup
from utils import get_tokenizer, set_seed
from adataset import GPT2Dataset
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import json
import argparse
import time
from tqdm import tqdm_notebook, tnrange
parser = argparse.ArgumentParser()
parser.add_argument("--lr",default=5e-5, type=float, help="learning rate")
parser.add_argument("--seed",default=42, type=int, help="seed to replicate results")
parser.add_argument("--n_gpu",default=1, type=int, help="no of gpu available")
parser.add_argument("--gradient_accumulation_steps",default=2, type=int, help="gradient_accumulation_steps")
parser.add_argument("--batch_size",default=1, type=int, help="batch_size")
parser.add_argument("--num_workers",default=4, type=int, help="num of cpus available")
parser.add_argument("--device",default=torch.device('cpu'), type=torch.device, help="torch.device object")
parser.add_argument("--num_train_epochs",default=1, type=int, help="no of epochs of training")
parser.add_argument("--output_dir",default='./output', type=str, help="path to save evaluation results")
parser.add_argument("--model_dir",default='./weights', type=str, help="path to save trained model")
parser.add_argument("--max_grad_norm",default=1.0, type=float, help="max gradient norm.")
parser.add_argument("--data_dir",default='./data', type=str, help="location of json dataset.")
# parser.add_argument("--ids_file",default='./data', type=str, help="location of train, valid and test file indexes")
args = parser.parse_args(["--device", "cpu", "--data_dir", "/content/drive/MyDrive/MAIS/train-balanced-sarcasm.csv", "--model_dir", "/content/drive/MyDrive/MAIS/model"])
print(args)
def train(args, model, tokenizer, train_dataset, ignore_index):
writer = SummaryWriter('./logs')
train_sampler = RandomSampler(train_dataset)
train_dl = DataLoader(train_dataset,
sampler=train_sampler,
batch_size=args.batch_size,
num_workers=args.num_workers)
loss_fct = CrossEntropyLoss(ignore_index=ignore_index) #ignores padding token for loss calculation
optimizer = AdamW(model.parameters(),lr=args.lr)
scheduler = get_linear_schedule_with_warmup(optimizer,100,80000)
global_step = 0 # for accumulated gradients
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = tnrange(int(args.num_train_epochs), desc='epochs')
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm_notebook(train_dl, desc='training') # just uses the DataLoader and shows a progress bar
for step, batch in enumerate(epoch_iterator):
inputs, labels = batch['context'], batch['context'] # it's the same but we'll only calculate loss over the stuff after <|sep|> token
# use GPU (!!)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
logits = model(inputs)[0] # get logits of the shape (batch_size, sequence_length, config.vocab_size)
# now shift the logits and labels accordingly so as to calculate the loss as required
loc_sep = batch['loc_sep'] # location of the <|sep|> token
shifted_logits = logits[:, loc_sep:-1, :].contiguous()
shifted_labels = labels[:, loc_sep+1:].contiguous() # make labels one ahead for inference
# accumulate gradients
loss = loss_fct(shifted_logits.view(-1, shifted_logits.size(-1)), shifted_labels.view(-1))
loss /= args.gradient_accumulation_steps
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
# update global loss and run accumulated gradient descent when number of steps reaches the gradient_accumulation_steps
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
writer.add_scalar('loss', (tr_loss - logging_loss)/args.gradient_accumulation_steps, global_step)
logging_loss = tr_loss
print("loss:", loss.item(), end='\n\n')
train_data = GPT2Dataset(args.data_dir)
len(train_data)
tokenizer = get_tokenizer()
ignore_idx = tokenizer.pad_token_id
# model = GPT2LMHeadModel.from_pretrained('gpt2')
# model.resize_token_embeddings(len(tokenizer))
model = torch.load("/content/drive/MyDrive/MAIS/model_1.pt", map_location=args.device)
model.to(args.device)
# training time (!!)
set_seed(args.seed)
start = time.time()
print("start time: ", start)
train(args, model, tokenizer, train_data, ignore_idx)
print('total time: ', (time.time()-start)/60, " minutes", end='\n\n')
# print('Saving trained model...')
# model_file = os.path.join(args.model_dir, 'model_data{}_trained_after_{}_epochs_only_sum_loss_ignr_pad.bin'.format(len(train_data),args.num_train_epochs))
# config_file = os.path.join(args.model_dir, 'config_data{}_trained_after_{}_epochs_only_sum_loss_ignr_pad.json'.format(len(train_data),args.num_train_epochs))
# torch.save(model.state_dict(), model_file)
# model.config.to_json_file(config_file)
torch.save(model, '/content/drive/MyDrive/MAIS/model_2.pt')
tokenizer.save_vocabulary('/content/drive/MyDrive/MAIS/')
```
| github_jupyter |
递归式特征消除Recursive feature elimination(RFE)
给定一个为特征(如线性模型的系数)分配权重的外部估计量,递归特征消除([RFE](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html#sklearn.feature_selection.RFE))就是通过递归地考虑越来越小的特征集来选择特征。首先,对初始特征集训练估计器,通过coef_属性或feature_importances_属性获得每个特征的重要性。然后,从当前的特征集中删除最不重要的特征。在经过修剪的集合上递归地重复这个过程,直到最终达到所需的特征数量。
说简单点,递归式特征消除的主要思路是反复建立多种模型,每一次根据系数的不挑出差的特征,并去除挑出来的特征,然后在剩余的特征上重复该过程,直到遍历了所有的特征。所以递归式特征消除效果如果很看选用的模型。
```
# 多行输出
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
```
# 1 基本使用介绍
对于RFE函数,主要参数如下:
+ estimator:一种监督学习估计器,其fit方法通过coef_ 属性或feature_importances_属性提供有关要素重要性的信息
+ n_features_to_select:要保留的特征数量,默认保留一半
+ step:为整数时表示每次要删除的特征数量;小于1时表示每次去除权重最小的特征,默认为1
以下示例说明了,如何通过RFE函数挑选5个最佳特征
```
from sklearn.datasets import make_friedman1
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = SVR(kernel="linear")
selector = RFE(estimator, n_features_to_select=5, step=1)
selector = selector.fit(X, y)
# 哪些特征入选最后特征,true表示入选
print(selector.support_)
# 每个特征的得分排名,特征得分越低(1最好),表示特征越好
print(selector.ranking_)
# 挑选了几个特征
print(selector.n_features_)
```
sklearn中[RFECV](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html#sklearn.feature_selection.RFECV)函数在交叉验证循环中执行RFE,以找到最佳数量的特征。RFE的稳定性很大程度上取决于迭代时用的哪种模型。RFECV 通过交叉验证的方式来执行RFE。
RFE需要一个指定数量的特性来保留,但是通常事先不知道有多少特性是有效的。为了寻找最佳特征数,采用RFE对不同特征子集进行交叉验证,并选择出最优的特征评分集合,但是如果删除特征会导致性能损失就不删除特征。这就是RFECV的原理。
对于RFECV函数,主要参数如下:
+ estimator:一种监督学习估计器,其fit方法通过coef_ 属性或feature_importances_属性提供有关要素重要性的信息
+ step:为整数时表示每次要删除的特征数量;小于1时表示每次去除权重最小的特征,默认为1
+ min_features_to_select:保留的最少的特征数(但是如果模型有特征数量限制,如随机森林设置了最大特征数,该变量需要大于等于随机森林设定的最大特征数),默认为1。
+ cv:指定交叉验证的折数,默认为5
以下示例说明了,如何通过RFECV挑选特征。如果减少特征会造成性能损失,那么将不会去除任何特征
```
from sklearn.datasets import make_friedman1
from sklearn.feature_selection import RFECV
from sklearn.svm import SVR
# 生成样本
# X维度(50,20),Y维度(50,1)
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = SVR(kernel="linear")
# 5折交叉
selector = RFECV(estimator, step=1, cv=5)
selector = selector.fit(X, y)
# 哪些特征入选最后特征,true表示入选
print(selector.support_)
# 每个特征的得分排名,特征得分越低(1最好),表示特征越好
print(selector.ranking_)
# 挑选了几个特征
print(selector.n_features_)
# 每次交叉迭代各个特征得分
print(selector.grid_scores_)
```
详细来说K折交叉验证,就是将数据集等比例划分成K份,以其中的一份作为测试数据,其他的K-1份数据作为训练数据。交叉验证实际是把实验重复做了K次,每次实验都是从K个部分选取一份不同的数据部分作为测试数据(保证K个部分的数据都分别做过测试数据),剩下的K-1个当作训练数据,最后把得到的K个实验结果进行平分。然是RFECV不是这样的,RFEC由RFE和CV(Cross-validation)组成,
RFECV源代码如下,在每次实验针对部分特征进行RFE计算。我们在所有CV上保留每个功能得分的平均值。然后,我们使用平均得分计算要删除的要素数量,然后使用整个数据集删除该数量的要素,这就是源代码所表达的意思。
举个例子如果有a,b,c三个特征,交叉认证每次提取部分特征,比如第一次提取特征(a,b)与y建模,计算在测试集的得分。第二次提取特征(a,c)进行建模,第三次对(a,b,c)进行建模。如5折交叉验证会得到5个分数数组,会对5个分数数组进行对应元素求和,得到各个特征数量下的总分,求出最高总分,以及多少特征才能达到最高分,那么就可以求得应该删多少特征,然后针对整个数据进行RFE。
```
# 提取X,y
X, y = check_X_y(X, y, "csr", ensure_min_features=2)
# Initialization
# k折交叉
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
# Build an RFE object, which will evaluate and score each possible
# feature count, down to self.min_features_to_select
rfe = RFE(estimator=self.estimator,
n_features_to_select=self.min_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if effective_n_jobs(self.n_jobs) == 1:
parallel, func = list, _rfe_single_fit
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
# k折交叉认证
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y, groups))
# 计算各个交叉认证下各个元素的分数之和
scores = np.sum(scores, axis=0)
scores_rev = scores[::-1]
# 判断第几次交叉认证取得评分最大值
argmax_idx = len(scores) - np.argmax(scores_rev) - 1
# 根据设定参数step,到argmax_idx每次减少step个元素,这样能求出保留几个袁术
n_features_to_select = max(
n_features - (argmax_idx * step),
self.min_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step,
verbose=self.verbose)
rfe.fit(X, y)
```
# 2 应用实例
下面的实例为一个递归特征消除RFE函数示例,显示了数字分类任务中像素的重要性。
```
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
import matplotlib.pyplot as plt
# Load the digits dataset
# 读取数据集
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
print("训练图像集的维度",X.shape)
# Create the RFE object and rank each pixel
svc = SVC(kernel="linear", C=1)
# n_features_to_select=1表示每次都删除一个特征。比如X的图像为8*8的尺寸。共64个像素,对64个像素都进行排名
rfe = RFE(estimator=svc, n_features_to_select=1, step=1)
rfe.fit(X, y)
print("图像各个像素点的重要性排名:",rfe.ranking_)
# 大小重置
ranking = rfe.ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
# 颜色越浅表明该像素点对于手写数字图像分类越重要
plt.matshow(ranking, cmap=plt.cm.Blues)
plt.colorbar()
plt.title("Ranking of pixels with RFE");
```
下面为一个递归特征消除示例,该示例通过交叉验证自动调整所选特征的数量。最好画出选用各个特征数量下,分类集的交叉认证分数。可以看到RFECV能够自动选择适合分类的有效特征数
```
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
# 建立特征,X有25个特征,其中有效特征3个。
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
print("X维度", X.shape)
print("y维度",y.shape)
# Create the RFE object and compute a cross-validated score.
# 创建分类器
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
# 分类
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("RFEC挑选了几个特征 : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
# 画出不同特征数量下交叉认证验证得分
plt.figure()
# 选择的特征数量
plt.xlabel("Number of features selected")
# 交叉验证得分
plt.ylabel("Cross validation score (nb of correct classifications)")
# 画出各个特征的得分
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show();
```
# 3 参考
> [https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html#sklearn.feature_selection.RFE](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html#sklearn.feature_selection.RFE)
> [https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html#sklearn.feature_selection.RFECV](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html#sklearn.feature_selection.RFECV)
> [http://www.minxueyu.com/2020/03/29/RFE%E4%B8%8ERFECV%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/](http://www.minxueyu.com/2020/03/29/RFE%E4%B8%8ERFECV%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/)
> [https://blog.csdn.net/sunshunli/article/details/82355395](https://blog.csdn.net/sunshunli/article/details/82355395)
> [https://stackoverflow.com/questions/34703051/score-of-rfecv-in-python-scikit-learn](https://stackoverflow.com/questions/34703051/score-of-rfecv-in-python-scikit-learn)
> [https://blog.csdn.net/teng_zz/article/details/98027712](https://blog.csdn.net/teng_zz/article/details/98027712)
| github_jupyter |
# Computing gradients in parallel with PennyLane-Braket
A central feature of the Amazon Braket SV1 simulator is that is can execute multiple circuits sent from PennyLane in parallel. This is crucial for scalable optimization, where each training step creates lots of variations of a circuit which need to be executed.
This tutorial will explain the importance of this feature and allow you to benchmark it yourself.
## Why is the training of circuits so expensive?
Quantum-classical hybrid optimization of quantum circuits is the workhorse algorithm of near-term quantum computing. It is not only fundamental for training variational quantum circuits but also more broadly for applications like quantum chemistry, quantum machine learning and, of course, for applications in "vanilla" quantum optimization. Today’s most powerful optimization algorithms rely on the efficient computation of gradients—which tell us how to adapt parameters a little bit at a time to improve the algorithm.
Training quantum circuits is tough! Each step during optimization requires evaluation of the circuit gradient. Calculating the gradient involves multiple device executions: for each trainable parameter we must execute our circuit on the device typically [more than once](https://pennylane.ai/qml/glossary/parameter_shift.html). Reasonable applications involve many trainable parameters (just think of a classical neural net with millions of tunable weights). The result is a huge number of device executions for each optimization step.

In the ``braket.local.qubit`` device, gradients are calculated in PennyLane through sequential device executions—in other words, all these circuits have to wait in the same queue until they can be evaluated. This approach is simpler, but quickly becomes slow as we scale the number of parameters. Moreover, as the number of qubits, or "width", of the circuit is scaled, each device execution will slow down and eventually become a noticeable bottleneck. In short—**the future of training quantum circuits relies on high-performance remote simulators and hardware devices that are highly parallelized**.
Fortunately, Amazon Braket provides a solution for scalable quantum circuit training with the SV1 simulator. The SV1 simulator is a high-performance state vector simulator that is designed with parallel execution in mind. Together with PennyLane, we can use SV1 to run in parallel all the circuits needed to compute a gradient!

## Loading the SV1 device
Let's load Braket's SV1 simulator in PennyLane with 25 qubits. Further details on loading Braket devices are provided in the [first tutorial](./0_Getting_started.ipynb). We begin with the standard imports and specification of the bucket and ARN:
```
import pennylane as qml
from pennylane import numpy as np
wires = 25
# Please enter the S3 bucket you created during onboarding
# (or any other S3 bucket starting with 'amazon-braket-' in your account) in the code below
my_bucket = f"amazon-braket-Your-Bucket-Name" # the name of the bucket
my_prefix = "Your-Folder-Name" # the name of the folder in the bucket
s3_folder = (my_bucket, my_prefix)
device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1"
```
Recall that all remote simulators and hardware on Braket are accessed through PennyLane using the ``braket.aws.qubit`` device name. The specific remote device is set using the ``device_arn`` argument.
```
dev_remote = qml.device(
"braket.aws.qubit",
device_arn=device_arn,
wires=wires,
s3_destination_folder=s3_folder,
parallel=True,
)
dev_local = qml.device("braket.local.qubit", wires=wires)
```
Note the ``parallel=True`` argument in ``dev_remote``. This setting allows us to unlock the power of parallel execution on SV1 for gradient calculations. The local Braket simulator has also been loaded for comparison.
## Benchmarking a circuit evaluation
We will now compare the execution time for the remote and local Braket devices. Our first step is to create a simple circuit:
```
def circuit(params):
for i in range(wires):
qml.RX(params[i], wires=i)
for i in range(wires):
qml.CNOT(wires=[i, (i + 1) % wires])
return qml.expval(qml.PauliZ(wires - 1))
```

In this circuit, each of the 25 qubits has a controllable rotation. A final block of two-qubit CNOT gates is added to entangle the qubits. Overall, this circuit has 25 trainable parameters. Although not particularly relevant for practical problems, we can use this circuit as a testbed in our comparison of the remote and local devices.
The next step is to convert the above circuit into a PennyLane QNode, which binds the circuit to a device for execution.
```
qnode_remote = qml.QNode(circuit, dev_remote)
qnode_local = qml.QNode(circuit, dev_local)
```
<div class="alert alert-block alert-info">
<b>Note</b> The above uses <code>qml.QNode</code> to convert the circuit. In previous tutorials, you may have seen the <code>@qml.qnode()</code> decorator being used. These approaches are interchangeable, but we use <code>qml.QNode</code> here because it allows us to pair the same circuit to different devices.
</div>
Let's now compare the execution time between the two devices:
```
import time
params = np.random.random(wires)
```
The following cell will result in 1 circuit being executed on SV1.
```
t_0_remote = time.time()
qnode_remote(params)
t_1_remote = time.time()
t_0_local = time.time()
qnode_local(params)
t_1_local = time.time()
print("Execution time on remote device (seconds):", t_1_remote - t_0_remote)
print("Execution time on local device (seconds):", t_1_local - t_0_local)
```
Nice! These timings highlight the advantage of using SV1 for simulations with large qubit numbers. In general, simulation times scale exponentially with the number of qubits, but SV1 is highly optimized and running on AWS remote servers. This allows SV1 to outperform the local simulator in this 25-qubit example. The time you see for the remote device will also depend on factors such as your distance to AWS servers.
<div class="alert alert-block alert-info">
<b>Note</b> Given these timings, why would anyone want to use the local simulator? You should consider using the local simulator when your circuit has few qubits. In this regime, the latency times of communicating the circuit to AWS dominate over simulation times, allowing the local simulator to be faster.
</div>
## Benchmarking gradient calculations
Now let us compare the gradient-calculation times between the two devices. Remember that when loading the remote device, we set ``parallel=True``. This allows the multiple device executions required during gradient calculations to be performed in parallel on SV1, so we expect the remote device to be much faster.
```
d_qnode_remote = qml.grad(qnode_remote)
d_qnode_local = qml.grad(qnode_local)
```
The following cell will result in 51 circuits being executed (in parallel) on SV1. We must execute the circuit twice to evaluate the partial derivative with respect to each parameter. Hence, for 25 parameters there are 50 circuit executions. The final circuit execution is due to a "forward pass" evaluation of the QNode before the gradient is calculated.
```
t_0_remote_grad = time.time()
d_qnode_remote(params)
t_1_remote_grad = time.time()
```
<div class="alert alert-block alert-warning">
<b>Caution:</b> Depending on your hardware, running the following cell can take 15 minutes or longer. Only uncomment it if you are happy to wait.
</div>
```
# t_0_local_grad = time.time()
# d_qnode_local(params)
# t_1_local_grad = time.time()
print("Gradient calculation time on remote device (seconds):", t_1_remote_grad - t_0_remote_grad)
# print("Gradient calculation time on local device (seconds):", t_1_local_grad - t_0_local_grad)
```
If you had the patience to run the local device, you will see times of around 15 minutes or more! Compare this to less than a minute spent calculating the gradient on SV1. This provides a powerful lesson in parallelization.
What if we had run on SV1 with ``parallel=False``? It would have taken around 3 minutes—still faster than a local device, but much slower than running SV1 in parallel.
<div class="alert alert-block alert-info">
<b>What's next?</b> Look into some applications, for example how to solve
<a href="./2_Graph_optimization_with_QAOA.ipynb">graph</a> or <a href="./3_Quantum_chemistry_with_VQE.ipynb">chemistry</a> problems with PennyLane and Braket.
</div>
| github_jupyter |
# Empirical Bayes for the Gaussian-Gaussian Hierarchical Model
A demonstration of how to estimate posterior group means in a Gaussian-Gaussian hierarchical model using Empirical Bayes. Based on Ch 5 of Murphy, Machine Learning.
Author: Juvid Aryaman
```
import numpy as np
import pandas as pd
import utls
import matplotlib.pyplot as plt
%reload_ext autoreload
%autoreload 2
%matplotlib inline
utls.reset_plots()
```
Assume that the score ($x_{ij}$) of every student $i = 1,\dots,N_j$ from school $j=1,\dots,D$ is Gaussian distributed according to
$$p(x_{ij}|\theta_j,\sigma^2) = \mathcal{N}(x_{ij}|\theta_j,\sigma^2).$$
We assume that $\sigma$ is the same for every school (this assumption allows us to write down an Emperical Bayes solution in closed form, otherwise we need to use expectation maximization), and is known for simplicity. Our prior for $\theta_j$ is also Gaussian:
$$p(\theta_j|\mu,\tau^2) = \mathcal{N}(\theta_j| \mu, \tau^2).$$
Hence we have a graphical model which looks like this:

where $\boldsymbol{\eta} = (\mu,\tau)$.
Aim: To estimate $\theta_j$ for every school.
Let's generate synthetic data from the process
```
np.random.seed(1)
mu_true = 7.0 # inter-school mean score
tau_sq_true = 0.6 # inter-school score variance
n_schools = 18
#n_schools = 2000
theta_j = np.random.normal(mu_true, np.sqrt(tau_sq_true), n_schools) # mean score per school
ndata_per_school = np.random.randint(12,20,n_schools)
sigma_true = 0.7 # intra-school score variability
data = []
for j in range(n_schools):
data_j = np.random.normal(theta_j[j], sigma_true, ndata_per_school[j])
data.append(np.array(data_j))
```
Visualise data
```
fig, ax = plt.subplots(1,1)
for j in range(n_schools):
data_j = data[j]
n = ndata_per_school[j]
x_coords = np.ones(n)*(j+1) + np.random.uniform(-0.1,0.1,n)
ax.plot(x_coords, data_j,'.')
ax.set_xticks(np.arange(1,n_schools+1))
ax.set_xlabel('School')
ax.set_ylabel('Student scores');
```
The obvious choice is to take
$$\hat{\theta}_j = \frac{1}{N_j}\sum_i x_{ij}$$
for every school, which is the maximum likelihood estimate. Can we do better, given our knowledge of the model structure?
For this model, we have
$$p(\boldsymbol{\theta},\mathcal{D}|\boldsymbol{\eta},\sigma^2) = \prod_{j=1}^D \mathcal{N}(\theta_j | \mu,\tau^2) \prod_{i=1}^{N_j}\mathcal{N}(x_{ij}|\theta_j,\sigma^2).$$
One can show the following marginal likelihood (Eq 5.91 of Murphy)
$$p(\mathcal{D}|\mu,\tau^2,\sigma^2) = \prod_{j=1}^D \mathcal{N}(\bar{x}_j | \mu, \tau^2 + \sigma^2)$$
where $\bar{x}_j = \sum_{i=1}^{N_j} x_{ij}$.
The crucial step in Empirical Bayes is for our priors to be informed by the data in the following way. We take maximum likelihood estimates of the marginal likelihood for the hyperparameters $\mu$ and $\tau$, arriving at
$$\hat{\mu} = \frac{1}{D} \sum_{j=1}^{D} \bar{x}_j = \bar{x}$$
$$\hat{\tau}^2 + \sigma^2 = \frac{1}{D-1}\sum_{j=1}^D(\bar{x}_j - \bar{x})^2.$$
Empirical Bayes violates the principle that the prior should be chosen independently of the data. We just view this as a computationally cheap approximation to inference in a hierarchical Bayesian model.
Given our estimates $\hat{\mu}$ and $\hat{\tau}$, we may write
$$p(\boldsymbol{\theta},\mathcal{D}|\hat{\boldsymbol{\eta}},\sigma^2) = \prod_{j=1}^D \mathcal{N}(\theta_j | \hat{\mu},\hat{\tau}^2) \mathcal{N}(\bar{x}|\theta_j,\sigma^2/N_j)$$
where we've used the trick that $N_j$ independent measurements of a Gaussian with variance $\sigma$ is the same as one measurement at the sample mean with variance $\sigma_j^2 := \sigma^2/N_j$.
Using Bayes rule for linear Gaussian systems (Sec 4.4.1 of Murphy), we may write
$$p(\theta_j|\mathcal{D},\hat{\mu},\hat{\tau}^2, \sigma) = \mathcal{N}(\theta_j|\hat{B}_j \hat{\mu} + (1-\hat{B}_j) \bar{x}_j,(1-\hat{B}_j) \sigma_j)$$
$$\hat{B}_j = \frac{\sigma^2_j}{\sigma^2_j + \hat{\tau}^2}$$
```
xbar_j = np.array([np.mean(data[j]) for j in range(n_schools)])
mu_hat = np.mean(xbar_j)
tau_sq_hat = max(0,np.var(xbar_j,ddof=1) - sigma_true**2)
sigma_j = np.sqrt(sigma_true/ndata_per_school)
B_hat_j = sigma_j**2/(sigma_j**2 + tau_sq_hat)
theta_j_EB = B_hat_j*mu_hat + (1.-B_hat_j)*xbar_j
```
Visualise errors
```
res_EB = (theta_j - theta_j_EB)**2
res_ML = (theta_j - xbar_j)**2
fig, ax = plt.subplots(1,1)
x = np.arange(1,n_schools+1)
ax.plot(x,res_EB,'kx',label = 'Empirical Bayes')
ax.plot(x,res_ML,'r.', label = 'Maximum Likelihood')
ax.set_xticks(x)
ax.set_xlabel('School')
ax.set_ylabel(r'$\theta_j$ - estimate')
ax.legend(prop={'size':10},bbox_to_anchor=(1.45, 1.0));
cm = plt.get_cmap("jet")
cols = cm((0.5+np.arange(0,n_schools))/float(n_schools))
fig, ax = plt.subplots(1,1)
x = np.arange(1,n_schools+1)
for j in range(n_schools):
data_j = data[j]
n = ndata_per_school[j]
x_coords = np.ones(n)*(j+1) + np.random.uniform(-0.1,0.1,n)
ax.plot(x_coords, data_j,'.',color=cols[j])
eb=ax.plot(j+1, theta_j_EB[j], 'x',color='k')
ml=ax.plot(j+1, xbar_j[j], '.',color='k')
tr=ax.plot(j+1, theta_j[j], '_',color='k')
ax.legend((tr[0],eb[0],ml[0]),('True','EB','ML'),bbox_to_anchor=(1.45, 1.0))
ax.set_xticks(x)
ax.set_xlabel('School')
ax.set_ylabel(r'Score');
np.sum(res_EB)/n_schools,np.sum(res_ML)/n_schools
```
Hmmm, in this case it doesn't seem to be doing better than MLE.
To do:
- Debug?
- If correct, under what circumstances do we expect this to perform better than MLE? The key advantage seems to be being able to learn what $\mu$ and $\tau$ are. In the limit of large $D$ does this perform well? Seems not after a quick play....
| github_jupyter |
```
# default_exp data.preparation
```
# Data preparation
> Functions required to prepare X (and y) from a pandas dataframe.
```
# export
from tsai.imports import *
from tsai.utils import *
from tsai.data.validation import *
from io import StringIO
#export
def df2Xy(df, sample_col=None, feat_col=None, data_cols=None, target_col=None, steps_in_rows=False, to3d=True, splits=None,
sort_by=None, ascending=True, y_func=None, return_names=False):
r"""
This function allows you to transform a pandas dataframe into X and y numpy arrays that can be used to craete a TSDataset.
sample_col: column that uniquely identifies each sample.
feat_col: used for multivariate datasets. It indicates which is the column that indicates the feature by row.
data_col: indicates ths column/s where the data is located. If None, it means all columns (except the sample_col, feat_col, and target_col)
target_col: indicates the column/s where the target is.
steps_in_rows: flag to indicate if each step is in a different row or in a different column (default).
to3d: turns X to 3d (including univariate time series)
sort_by: used to indicate how to sort the dataframe.
y_func: function used to calculate y for each sample (and target_col)
return_names: flag to return the names of the columns from where X was generated
"""
if feat_col is not None:
assert sample_col is not None, 'You must pass a sample_col when you pass a feat_col'
passed_cols = []
sort_cols = []
if sort_by is not None:
if isinstance(sort_by, pd.core.indexes.base.Index): sort_by = sort_by.tolist()
sort_cols += listify(sort_by)
if sample_col is not None:
if isinstance(sample_col, pd.core.indexes.base.Index): sample_col = sample_col.tolist()
sample_col = listify(sample_col)
if sample_col[0] not in sort_cols: sort_cols += listify(sample_col)
passed_cols += sample_col
if feat_col is not None:
if isinstance(feat_col, pd.core.indexes.base.Index): feat_col = feat_col.tolist()
feat_col = listify(feat_col)
if feat_col[0] not in sort_cols: sort_cols += listify(feat_col)
passed_cols += feat_col
if data_cols is not None:
if isinstance(data_cols, pd.core.indexes.base.Index): data_cols = data_cols.tolist()
data_cols = listify(data_cols)
if target_col is not None:
if isinstance(target_col, pd.core.indexes.base.Index): target_col = target_col.tolist()
target_col = listify(target_col)
passed_cols += target_col
if data_cols is None:
data_cols = [col for col in df.columns if col not in passed_cols]
if target_col is not None:
if any([t for t in target_col if t in data_cols]): print(f"Are you sure you want to include {target_col} in X?")
if sort_by and sort_cols:
df.sort_values(sort_cols, ascending=ascending, inplace=True)
# X
X = df.loc[:, data_cols].values
if X.dtype == 'O':
X = X.astype(np.float32)
if sample_col is not None:
unique_ids = df[sample_col[0]].unique().tolist()
n_samples = len(unique_ids)
else:
unique_ids = np.arange(len(df)).tolist()
n_samples = len(df)
if to3d:
if feat_col is not None:
n_feats = df[feat_col[0]].nunique()
X = X.reshape(n_samples, n_feats, -1)
elif steps_in_rows:
X = X.reshape(n_samples, -1, len(data_cols)).swapaxes(1,2)
else:
X = X.reshape(n_samples, 1, -1)
# y
if target_col is not None:
if sample_col is not None:
y = []
for tc in target_col:
_y = np.concatenate(df.groupby(sample_col)[tc].apply(np.array).reset_index()[tc]).reshape(n_samples, -1)
if y_func is not None: _y = y_func(_y)
y.append(_y)
y = np.concatenate(y, -1)
else:
y = df[target_col].values
y = np.squeeze(y)
else:
y = None
# Output
if splits is None:
if return_names: return X, y, data_cols
else: return X, y
else:
if return_names: return split_xy(X, y, splits), data_cols
return split_xy(X, y, splits)
#export
def split_Xy(X, y=None, splits=None):
if splits is None:
if y is not None: return X, y
else: return X
if not is_listy(splits[0]): splits = [splits]
else: assert not is_listy(splits[0][0]), 'You must pass a single set of splits.'
_X = []
_y = []
for split in splits:
_X.append(X[split])
if y is not None: _y.append(y[split])
if len(splits) == 1: return _X[0], _y[0]
elif len(splits) == 2: return _X[0], _y[0], _X[1], _y[1]
elif len(splits) == 3: return _X[0], _y[0], _X[1], _y[1], _X[2], _y[2]
df2xy = df2Xy
split_xy = split_Xy
df = pd.DataFrame()
df['sample_id'] = np.array([1,1,1,2,2,2,3,3,3])
df['var1'] = df['sample_id'] * 10 + df.index.values
df['var2'] = df['sample_id'] * 100 + df.index.values
df
X_df, y_df = df2Xy(df, sample_col='sample_id', steps_in_rows=True)
test_eq(X_df[0], np.array([[10, 11, 12], [100, 101, 102]]))
n_samples = 1_000
n_rows = 10_000
sample_ids = np.arange(n_samples).repeat(n_rows//n_samples).reshape(-1,1)
feat_ids = np.tile(np.arange(n_rows // n_samples), n_samples).reshape(-1,1)
cont = np.random.randn(n_rows, 6)
ind_cat = np.random.randint(0, 3, (n_rows, 1))
target = np.array(['a', 'b', 'c'])[ind_cat]
ind_cat2 = np.random.randint(0, 3, (n_rows, 1))
target2 = np.array(['a', 'b', 'c'])[ind_cat2]
data = np.concatenate([sample_ids, feat_ids, cont, target, target], -1)
columns = ['sample_id', 'feat_id'] + (np.arange(6) + 1).astype(str).tolist() + ['target'] + ['target2']
df = pd.DataFrame(data, columns=columns)
idx = np.random.choice(np.arange(len(df)), len(df), False)
new_dtypes = {'sample_id':np.int32, 'feat_id':np.int32, '1':np.float32, '2':np.float32, '3':np.float32, '4':np.float32, '5':np.float32, '6':np.float32}
df = df.astype(dtype=new_dtypes)
df = df.loc[idx].reset_index(drop=True)
df
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', feat_col='feat_id', data_cols=['1', '2', '3', '4', '5', '6'], target_col=['target'],
sort_by=['sample_id', 'feat_id'], y_func=y_func)
test_eq(X.shape, (1000, 10, 6))
test_eq(y.shape, (1000,))
rand_idx = np.random.randint(0, np.max(df.sample_id))
sorted_df = df.sort_values(by=['sample_id', 'feat_id']).reset_index(drop=True)
test_eq(X[rand_idx], sorted_df[sorted_df.sample_id == rand_idx][['1', '2', '3', '4', '5', '6']].values)
test_eq(np.squeeze(scipy.stats.mode(sorted_df[sorted_df.sample_id == rand_idx][['target']].values).mode), y[rand_idx])
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', feat_col='feat_id', target_col=['target', 'target2'], sort_by=['sample_id', 'feat_id'], y_func=y_func)
test_eq(X.shape, (1000, 10, 6))
test_eq(y.shape, (1000, 2))
rand_idx = np.random.randint(0, np.max(df.sample_id))
sorted_df = df.sort_values(by=['sample_id', 'feat_id']).reset_index(drop=True)
test_eq(X[rand_idx], sorted_df[sorted_df.sample_id == rand_idx][['1', '2', '3', '4', '5', '6']].values)
test_eq(np.squeeze(scipy.stats.mode(sorted_df[sorted_df.sample_id == rand_idx][['target', 'target2']].values).mode), y[rand_idx])
# Univariate
TESTDATA = StringIO("""sample_id;value_0;value_1;target
rob;2;3;hot
alice;6;7;lukewarm
eve;11;12;cold
""")
df = pd.read_csv(TESTDATA, sep=";")
display(df)
X, y = df2Xy(df, sample_col='sample_id', target_col='target', data_cols=['value_0', 'value_1'], sort_by='sample_id')
test_eq(X.shape, (3, 1, 2))
test_eq(y.shape, (3,))
X, y
# Univariate
TESTDATA = StringIO("""sample_id;timestep;values;target
rob;1;2;hot
alice;1;6;lukewarm
eve;1;11;cold
rob;2;3;hot
alice;2;7;lukewarm
eve;2;12;cold
""")
df = pd.read_csv(TESTDATA, sep=";")
display(df)
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', target_col='target', data_cols=['values'], sort_by='timestep', to3d=True, y_func=y_func)
test_eq(X.shape, (3, 1, 2))
test_eq(y.shape, (3, ))
print(X, y)
# Multivariate
TESTDATA = StringIO("""sample_id;trait;value_0;value_1;target
rob;green;2;3;hot
rob;yellow;3;4;hot
rob;blue;4;5;hot
rob;red;5;6;hot
alice;green;6;7;lukewarm
alice;yellow;7;8;lukewarm
alice;blue;8;9;lukewarm
alice;red;9;10;lukewarm
eve;yellow;11;12;cold
eve;green;10;11;cold
eve;blue;12;12;cold
eve;red;13;14;cold
""")
df = pd.read_csv(TESTDATA, sep=";")
idx = np.random.choice(len(df), len(df), False)
df = df.iloc[idx]
display(df)
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', feat_col='trait', target_col='target', data_cols=['value_0', 'value_1'], y_func=y_func)
print(X, y)
test_eq(X.shape, (3, 4, 2))
test_eq(y.shape, (3,))
# Multivariate, multi-label
TESTDATA = StringIO("""sample_id;trait;value_0;value_1;target1;target2
rob;green;2;3;hot;good
rob;yellow;3;4;hot;good
rob;blue;4;5;hot;good
rob;red;5;6;hot;good
alice;green;6;7;lukewarm;good
alice;yellow;7;8;lukewarm;good
alice;blue;8;9;lukewarm;good
alice;red;9;10;lukewarm;good
eve;yellow;11;12;cold;bad
eve;green;10;11;cold;bad
eve;blue;12;12;cold;bad
eve;red;13;14;cold;bad
""")
df = pd.read_csv(TESTDATA, sep=";")
display(df)
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', feat_col='trait', target_col=['target1', 'target2'], data_cols=['value_0', 'value_1'], y_func=y_func)
test_eq(X.shape, (3, 4, 2))
test_eq(y.shape, (3, 2))
print(X, y)
# Multivariate, unlabeled
TESTDATA = StringIO("""sample_id;trait;value_0;value_1;target
rob;green;2;3;hot
rob;yellow;3;4;hot
rob;blue;4;5;hot
rob;red;5;6;hot
alice;green;6;7;lukewarm
alice;yellow;7;8;lukewarm
alice;blue;8;9;lukewarm
alice;red;9;10;lukewarm
eve;yellow;11;12;cold
eve;green;10;11;cold
eve;blue;12;12;cold
eve;red;13;14;cold
""")
df = pd.read_csv(TESTDATA, sep=";")
idx = np.random.choice(len(df), len(df), False)
df = df.iloc[idx]
display(df)
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', feat_col='trait', data_cols=['value_0', 'value_1'], y_func=y_func)
print(X, y)
test_eq(X.shape, (3, 4, 2))
test_eq(y, None)
TESTDATA = StringIO("""sample_id;trait;timestep;values;target
rob;green;1;2;hot
rob;yellow;1;3;hot
rob;blue;1;4;hot
rob;red;1;5;hot
alice;green;1;6;lukewarm
alice;yellow;1;7;lukewarm
alice;blue;1;8;lukewarm
alice;red;1;9;lukewarm
eve;yellow;1;11;cold
eve;green;1;10;cold
eve;blue;1;12;cold
eve;red;1;13;cold
rob;green;2;3;hot
rob;yellow;2;4;hot
rob;blue;2;5;hot
rob;red;2;6;hot
alice;green;2;7;lukewarm
alice;yellow;2;8;lukewarm
alice;blue;2;9;lukewarm
alice;red;2;10;lukewarm
eve;yellow;2;12;cold
eve;green;2;11;cold
eve;blue;2;13;cold
eve;red;2;14;cold
""")
df = pd.read_csv(TESTDATA, sep=";")
display(df)
def y_func(o): return scipy.stats.mode(o, axis=1).mode
X, y = df2xy(df, sample_col='sample_id', feat_col='trait', sort_by='timestep', target_col='target', data_cols=['values'], y_func=y_func)
print(X, y)
test_eq(X.shape, (3, 4, 2))
test_eq(y.shape, (3, ))
# export
def df2np3d(df, groupby, data_cols=None):
"""Transforms a df (with the same number of rows per group in groupby) to a 3d ndarray"""
if data_cols is None: data_cols = df.columns
return np.stack([x[data_cols].values for _, x in df.groupby(groupby)]).transpose(0, 2, 1)
user = np.array([1,2]).repeat(4).reshape(-1,1)
val = np.random.rand(8, 3)
data = np.concatenate([user, val], axis=-1)
df = pd.DataFrame(data, columns=['user', 'x1', 'x2', 'x3'])
test_eq(df2np3d(df, ['user'], ['x1', 'x2', 'x3']).shape, (2, 3, 4))
# export
def add_missing_value_cols(df, cols=None, dtype=float):
if cols is None: cols = df.columns
elif not is_listy(cols): cols = [cols]
for col in cols:
df[f'missing_{col}'] = df[col].isnull().astype(dtype)
return df
data = np.random.randn(10, 2)
mask = data > .8
data[mask] = np.nan
df = pd.DataFrame(data, columns=['A', 'B'])
df = add_missing_value_cols(df, cols=None, dtype=float)
test_eq(df['A'].isnull().sum(), df['missing_A'].sum())
test_eq(df['B'].isnull().sum(), df['missing_B'].sum())
df
# export
def add_missing_timestamps(df, datetime_col, groupby=None, fill_value=np.nan, range_by_group=True, freq=None):
"""Fills missing timestamps in a dataframe to a desired frequency
Args:
df: pandas DataFrame
datetime_col: column that contains the datetime data (without duplicates within groups)
groupby: column used to identify unique_ids
fill_value: values that will be insert where missing dates exist. Default:np.nan
range_by_group: if True, dates will be filled between min and max dates for each group. Otherwise, between the min and max dates in the df.
freq: frequence used to fillin the missing datetime
"""
if is_listy(datetime_col):
assert len(datetime_col) == 1, 'you can only pass a single datetime_col'
datetime_col = datetime_col[0]
dates = pd.date_range(df[datetime_col].min(), df[datetime_col].max(), freq=freq)
if groupby is not None:
if is_listy(groupby):
assert len(groupby) == 1, 'you can only pass a single groupby'
groupby = groupby[0]
keys = df[groupby].unique()
if range_by_group:
# Fills missing dates between min and max for each unique id
min_dates = df.groupby(groupby)[datetime_col].min()
max_dates = df.groupby(groupby)[datetime_col].max()
idx_tuples = flatten_list([[(d, key) for d in pd.date_range(min_date, max_date, freq=freq)] for min_date, max_date, key in \
zip(min_dates, max_dates, keys)])
multi_idx = pd.MultiIndex.from_tuples(idx_tuples, names=[datetime_col, groupby])
df = df.set_index([datetime_col, groupby]).reindex(multi_idx, fill_value=np.nan).reset_index()
else:
# Fills missing dates between min and max - same for all unique ids
multi_idx = pd.MultiIndex.from_product((dates, keys), names=[datetime_col, groupby])
df = df.set_index([datetime_col, groupby]).reindex(multi_idx, fill_value=np.nan)
df = df.reset_index().sort_values(by=[groupby, datetime_col]).reset_index(drop=True)
else:
index = pd.Index(dates, name=datetime_col)
df = df.set_index([datetime_col]).reindex(index, fill_value=fill_value)
df = df.reset_index().reset_index(drop=True)
return df
# Filling dates between min and max dates
dates = pd.date_range('2021-05-01', '2021-05-07').values
data = np.zeros((len(dates), 3))
data[:, 0] = dates
data[:, 1] = np.random.rand(len(dates))
data[:, 2] = np.random.rand(len(dates))
cols = ['date', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)
date_df_with_missing_dates
# No groups
expected_output_df = date_df.copy()
expected_output_df.loc[[1,3], ['feature1', 'feature2']] = np.nan
display(expected_output_df)
output_df = add_missing_timestamps(date_df_with_missing_dates,
'date',
groupby=None,
fill_value=np.nan,
range_by_group=False)
test_eq(output_df, expected_output_df)
# Filling dates between min and max dates for each value in groupby column
dates = pd.date_range('2021-05-01', '2021-05-07').values
dates = np.concatenate((dates, dates))
data = np.zeros((len(dates), 4))
data[:, 0] = dates
data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
data[:, 2] = np.random.rand(len(dates))
data[:, 3] = np.random.rand(len(dates))
cols = ['date', 'id', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([0,1,3,8,11,13]).reset_index(drop=True)
date_df_with_missing_dates
# groupby='id', range_by_group=True
expected_output_df = date_df.drop([0,1,13]).reset_index(drop=True)
expected_output_df.loc[[1,6,9], ['feature1', 'feature2']] = np.nan
display(expected_output_df)
output_df = add_missing_timestamps(date_df_with_missing_dates,
'date',
groupby='id',
fill_value=np.nan,
range_by_group=True)
test_eq(expected_output_df, output_df)
# groupby='id', range_by_group=False
expected_output_df = date_df.copy()
expected_output_df.loc[[0,1,3,8,11,13], ['feature1', 'feature2']] = np.nan
display(expected_output_df)
output_df = add_missing_timestamps(date_df_with_missing_dates,
'date',
groupby='id',
fill_value=np.nan,
range_by_group=False)
test_eq(expected_output_df, output_df)
# Filling dates between min and max timestamps
dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
data = np.zeros((len(dates), 3))
data[:, 0] = dates
data[:, 1] = np.random.rand(len(dates))
data[:, 2] = np.random.rand(len(dates))
cols = ['date', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)
date_df_with_missing_dates
# No groups
expected_output_df = date_df.copy()
expected_output_df.loc[[1,3], ['feature1', 'feature2']] = np.nan
display(expected_output_df)
output_df = add_missing_timestamps(date_df_with_missing_dates, 'date', groupby=None, fill_value=np.nan, range_by_group=False, freq='4H')
test_eq(output_df, expected_output_df)
# Filling missing values between min and max timestamps for each value in groupby column
dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
dates = np.concatenate((dates, dates))
data = np.zeros((len(dates), 4))
data[:, 0] = dates
data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
data[:, 2] = np.random.rand(len(dates))
data[:, 3] = np.random.rand(len(dates))
cols = ['date', 'id', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([0,1,3,8,9,11]).reset_index(drop=True)
date_df_with_missing_dates
# groupby='id', range_by_group=True
expected_output_df = date_df.drop([0,1,11]).reset_index(drop=True)
expected_output_df.loc[[1,6,7], ['feature1', 'feature2']] = np.nan
display(expected_output_df)
output_df = add_missing_timestamps(date_df_with_missing_dates,
'date',
groupby='id',
fill_value=np.nan,
range_by_group=True,
freq='4H')
test_eq(expected_output_df, output_df)
# groupby='id', range_by_group=False
expected_output_df = date_df.copy()
expected_output_df.loc[[0,1,3,8,9,11], ['feature1', 'feature2']] = np.nan
display(expected_output_df)
output_df = add_missing_timestamps(date_df_with_missing_dates,
'date',
groupby='id',
fill_value=np.nan,
range_by_group=False,
freq='4H')
test_eq(expected_output_df, output_df)
# No groups, with duplicate dates ==> FAILS
dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
data = np.zeros((len(dates), 3))
data[:, 0] = dates
data[:, 1] = np.random.rand(len(dates))
data[:, 2] = np.random.rand(len(dates))
cols = ['date', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([1,3]).reset_index(drop=True)
date_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']
display(date_df_with_missing_dates)
test_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby=None, fill_value=np.nan, range_by_group=False, freq='4H'),
contains='cannot reindex from a duplicate axis')
# groupby='id', range_by_group=True, with duplicate dates ==> FAILS
dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
dates = np.concatenate((dates, dates))
data = np.zeros((len(dates), 4))
data[:, 0] = dates
data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
data[:, 2] = np.random.rand(len(dates))
data[:, 3] = np.random.rand(len(dates))
cols = ['date', 'id', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([0,1,8,9,11]).reset_index(drop=True)
date_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']
display(date_df_with_missing_dates)
test_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby='id', fill_value=np.nan, range_by_group=True, freq='4H'),
contains='cannot handle a non-unique multi-index!')
# groupby='id', range_by_group=FALSE, with duplicate dates ==> FAILS
dates = pd.date_range('2021-05-01 000:00', '2021-05-01 20:00', freq='4H').values
dates = np.concatenate((dates, dates))
data = np.zeros((len(dates), 4))
data[:, 0] = dates
data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
data[:, 2] = np.random.rand(len(dates))
data[:, 3] = np.random.rand(len(dates))
cols = ['date', 'id', 'feature1', 'feature2']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float, 'feature2': float})
date_df_with_missing_dates = date_df.drop([0,1,8,9,11]).reset_index(drop=True)
date_df_with_missing_dates.loc[3, 'date'] = date_df_with_missing_dates.loc[2, 'date']
display(date_df_with_missing_dates)
test_fail(add_missing_timestamps, args=[date_df_with_missing_dates, 'date'], kwargs=dict(groupby='id', fill_value=np.nan, range_by_group=False, freq='4H'),
contains='cannot handle a non-unique multi-index!')
# export
def time_encoding(series, freq, max_val=None):
"""Transforms a pandas series of dtype datetime64 (of any freq) or DatetimeIndex into 2 float arrays
Available options: microsecond, millisecond, second, minute, hour, day = day_of_month = dayofmonth,
day_of_week = weekday = dayofweek, day_of_year = dayofyear, week = week_of_year = weekofyear, month and year
"""
if freq == 'day_of_week' or freq == 'weekday': freq = 'dayofweek'
elif freq == 'day_of_month' or freq == 'dayofmonth': freq = 'day'
elif freq == 'day_of_year': freq = 'dayofyear'
available_freqs = ['microsecond', 'millisecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'month', 'year']
assert freq in available_freqs
if max_val is None:
idx = available_freqs.index(freq)
max_val = [1_000_000, 1_000, 60, 60, 24, 31, 7, 366, 53, 12, 10][idx]
try:
series = series.to_series()
except:
pass
if freq == 'microsecond': series = series.dt.microsecond
elif freq == 'millisecond': series = series.dt.microsecond // 1_000
elif freq == 'second': series = series.dt.second
elif freq == 'minute': series = series.dt.minute
elif freq == 'hour': series = series.dt.hour
elif freq == 'day': series = series.dt.day
elif freq == 'dayofweek': series = series.dt.dayofweek
elif freq == 'dayofyear': series = series.dt.dayofyear
elif freq == 'week': series = series.dt.isocalendar().week
elif freq == 'month': series = series.dt.month
elif freq == 'year': series = series.dt.year - series.dt.year // 10 * 10
sin = np.sin(series.values / max_val * 2 * np.pi)
cos = np.cos(series.values / max_val * 2 * np.pi)
return sin, cos
for freq in ['microsecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'month']:
tdf = pd.DataFrame(pd.date_range('2021-03-01', datetime.today()), columns=['date'])
a,b = time_encoding(tdf.date, freq=freq)
plt.plot(a)
plt.plot(b)
plt.title(freq)
plt.show()
for freq in ['microsecond', 'second', 'minute', 'hour', 'day', 'dayofweek', 'dayofyear', 'month']:
dateindex = pd.date_range('2021-03-01', datetime.today())
a,b = time_encoding(dateindex, freq=freq)
plt.plot(a)
plt.plot(b)
plt.title(freq)
plt.show()
dow_sin, dow_cos = time_encoding(date_df['date'], 'dayofweek')
plt.plot(dow_sin)
plt.plot(dow_cos)
plt.title('DayOfWeek')
plt.show()
date_df['dow_sin'] = dow_sin
date_df['dow_cos'] = dow_cos
date_df
# export
def forward_gaps(o, nan_to_num=0, normalize=True):
"""Number of sequence steps since previous real value along the last dimension of 3D arrays or tensors"""
b,c,s=o.shape
if isinstance(o, torch.Tensor):
idx = torch.where(o==o, torch.arange(s, device=o.device), 0)
idx = torch.cummax(idx, axis=-1).values
gaps = 1 + (torch.arange(s, device=o.device).reshape(1,1,-1).repeat(b, c, 1) - idx).float()
mask = torch.isnan(o[torch.arange(b, device=o.device)[:,None, None], torch.arange(c, device=o.device)[None, :, None], idx])
elif isinstance(o, np.ndarray):
idx = np.where(o==o, np.arange(s), 0)
idx = np.maximum.accumulate(idx,axis=-1)
gaps = 1 + (np.arange(s).reshape(1,1,-1).repeat(b, 0).repeat(c, 1) - idx).astype(float)
mask = np.isnan(o[np.arange(b)[:,None, None], np.arange(c)[None, :, None], idx])
gaps[mask] = nan_to_num
if normalize:
gaps[gaps > 0] = gaps[gaps > 0] / s
return gaps
def backward_gaps(o, nan_to_num=0, normalize=True):
"""Number of sequence steps to next real value along the last dimension of 3D arrays or tensors"""
if isinstance(o, torch.Tensor): o = torch_flip(o, -1)
elif isinstance(o, np.ndarray): o = o[..., ::-1]
gaps = forward_gaps(o, nan_to_num=nan_to_num, normalize=normalize)
if isinstance(o, torch.Tensor): gaps = torch_flip(gaps, -1)
elif isinstance(o, np.ndarray): gaps = gaps[..., ::-1]
return gaps
def nearest_gaps(o, nan_to_num=0, normalize=True):
"""Number of sequence steps to nearest real value along the last dimension of 3D arrays or tensors"""
forward = forward_gaps(o, nan_to_num=np.nan, normalize=normalize)
backward = backward_gaps(o, nan_to_num=np.nan, normalize=normalize)
if isinstance(o, torch.Tensor):
gaps = torch.fmin(forward, backward)
gaps[torch.isnan(gaps)] = nan_to_num
return gaps
elif isinstance(o, np.ndarray):
gaps = np.fmin(forward, backward)
gaps[np.isnan(gaps)] = nan_to_num
return gaps
def get_gaps(o : Tensor, nan_to_num : int = 0, forward : bool = True, backward : bool = True,
nearest : bool = True, normalize : bool = True):
"""Number of sequence steps from previous, to next and/or to nearest real value along the
last dimension of 3D arrays or tensors"""
_gaps = []
if forward or nearest:
fwd = forward_gaps(o, nan_to_num=np.nan, normalize=normalize)
if forward:
_gaps.append(fwd)
if backward or nearest:
bwd = backward_gaps(o, nan_to_num=np.nan, normalize=normalize)
if backward:
_gaps.append(bwd)
if nearest:
if isinstance(o, torch.Tensor):
nst = torch.fmin(fwd, bwd)
elif isinstance(o, np.ndarray):
nst = np.fmin(fwd, bwd)
_gaps.append(nst)
if isinstance(o, torch.Tensor):
gaps = torch.cat(_gaps, 1)
gaps[torch.isnan(gaps)] = nan_to_num
elif isinstance(o, np.ndarray):
gaps = np.concatenate(_gaps, 1)
gaps[np.isnan(gaps)] = nan_to_num
return gaps
t = torch.rand(1, 2, 8)
arr = t.numpy()
t[t <.6] = np.nan
test_ge(nearest_gaps(t).min().item(), 0)
test_ge(nearest_gaps(arr).min(), 0)
test_le(nearest_gaps(t).min().item(), 1)
test_le(nearest_gaps(arr).min(), 1)
test_eq(torch.isnan(forward_gaps(t)).sum(), 0)
test_eq(np.isnan(forward_gaps(arr)).sum(), 0)
ag = get_gaps(t)
test_eq(ag.shape, (1,6,8))
test_eq(torch.isnan(ag).sum(), 0)
# export
def add_delta_timestamp_cols(df, cols=None, groupby=None, forward=True, backward=True, nearest=True, nan_to_num=0, normalize=True):
if cols is None: cols = df.columns
elif not is_listy(cols): cols = [cols]
if forward or nearest:
if groupby:
forward_time_gaps = df[cols].groupby(df[groupby]).apply(lambda x: forward_gaps(x.values.transpose(1,0)[None], nan_to_num=np.nan, normalize=normalize))
forward_time_gaps = np.concatenate(forward_time_gaps, -1)[0].transpose(1,0)
else:
forward_time_gaps = forward_gaps(df[cols].values.transpose(1,0)[None], nan_to_num=np.nan, normalize=normalize)[0].transpose(1,0)
if forward :
df[[f'{col}_dt_fwd' for col in cols]] = forward_time_gaps
df[[f'{col}_dt_fwd' for col in cols]] = df[[f'{col}_dt_fwd' for col in cols]].fillna(nan_to_num)
if backward or nearest:
if groupby:
backward_time_gaps = df[cols].groupby(df[groupby]).apply(lambda x: backward_gaps(x.values.transpose(1,0)[None], nan_to_num=np.nan, normalize=normalize))
backward_time_gaps = np.concatenate(backward_time_gaps, -1)[0].transpose(1,0)
else:
backward_time_gaps = backward_gaps(df[cols].values.transpose(1,0)[None], nan_to_num=np.nan, normalize=normalize)[0].transpose(1,0)
if backward:
df[[f'{col}_dt_bwd' for col in cols]] = backward_time_gaps
df[[f'{col}_dt_bwd' for col in cols]] = df[[f'{col}_dt_bwd' for col in cols]].fillna(nan_to_num)
if nearest:
df[[f'{col}_dt_nearest' for col in cols]] = np.fmin(forward_time_gaps, backward_time_gaps)
df[[f'{col}_dt_nearest' for col in cols]] = df[[f'{col}_dt_nearest' for col in cols]].fillna(nan_to_num)
return df
# Add delta timestamp features for the no groups setting
dates = pd.date_range('2021-05-01', '2021-05-07').values
data = np.zeros((len(dates), 2))
data[:, 0] = dates
data[:, 1] = np.random.rand(len(dates))
cols = ['date', 'feature1']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'feature1': float})
date_df.loc[[1,3,4],'feature1'] = np.nan
# No groups
normalize = 1/len(dates)
expected_output_df = date_df.copy()
expected_output_df['feature1_dt_fwd'] = normalize
expected_output_df.loc[[1,3,4], 'feature1_dt_fwd'] = np.array([2,2,3]) * normalize
expected_output_df['feature1_dt_bwd'] = normalize
expected_output_df.loc[[1,3,4], 'feature1_dt_bwd'] = np.array([2,3,2]) * normalize
expected_output_df['feature1_dt_nearest'] = normalize
expected_output_df.loc[[1,3,4], 'feature1_dt_nearest'] =np.array([2,2,2]) * normalize
display(expected_output_df)
output_df = add_delta_timestamp_cols(date_df, cols='feature1')
test_eq(expected_output_df, output_df)
# Add delta timestamp features within a group
dates = pd.date_range('2021-05-01', '2021-05-07').values
dates = np.concatenate((dates, dates))
data = np.zeros((len(dates), 3))
data[:, 0] = dates
data[:, 1] = np.array([0]*(len(dates)//2)+[1]*(len(dates)//2))
data[:, 2] = np.random.rand(len(dates))
cols = ['date', 'id', 'feature1']
date_df = pd.DataFrame(data, columns=cols).astype({'date': 'datetime64[ns]', 'id': int, 'feature1': float})
date_df.loc[[1,3,4,8,9,11],'feature1'] = np.nan
# groupby='id'
normalize = 2/len(dates)
expected_output_df = date_df.copy()
expected_output_df['feature1_dt_fwd'] = normalize
expected_output_df.loc[[1,3,4,8,9,11], 'feature1_dt_fwd'] = np.array([2,2,3,2,3,2]) * normalize
expected_output_df['feature1_dt_bwd'] = normalize
expected_output_df.loc[[1,3,4,8,9,11], 'feature1_dt_bwd'] = np.array([2,3,2,3,2,2]) * normalize
expected_output_df['feature1_dt_nearest'] = normalize
expected_output_df.loc[[1,3,4,8,9,11], 'feature1_dt_nearest'] =np.array([2,2,2,2,2,2]) * normalize
display(expected_output_df)
output_df = add_delta_timestamp_cols(date_df, cols='feature1', groupby='id')
test_eq(expected_output_df, output_df)
# export
# # SlidingWindow vectorization is based on "Fast and Robust Sliding Window Vectorization with NumPy" by Syafiq Kamarul Azman
# # https://towardsdatascience.com/fast-and-robust-sliding-window-vectorization-with-numpy-3ad950ed62f5
def SlidingWindow(window_len:int, stride:Union[None, int]=1, start:int=0, pad_remainder:bool=False, padding_value:float=np.nan, add_padding_feature:bool=True,
get_x:Union[None, int, list]=None, get_y:Union[None, int, list]=None, y_func:Optional[callable]=None, copy:bool=False,
horizon:Union[int, list]=1, seq_first:bool=True, sort_by:Optional[list]=None, ascending:bool=True, check_leakage:bool=True):
"""
Applies a sliding window to a 1d or 2d input (np.ndarray, torch.Tensor or pd.DataFrame)
Args:
window_len = length of lookback window
stride = n datapoints the window is moved ahead along the sequence. Default: 1. If None, stride=window_len (no overlap)
start = determines the step where the first window is applied: 0 (default), a given step (int), or random within the 1st stride (None).
pad_remainder = allows to pad remainder subsequences when the sliding window is applied and get_y == [] (unlabeled data).
padding_value = value (float) that will be used for padding. Default: np.nan
add_padding_feature = add an additional feature indicating whether each timestep is padded (1) or not (0).
horizon = number of future datapoints to predict:
* 0 for last step in each sub-window.
* n > 0 for a range of n future steps (1 to n).
* n < 0 for a range of n past steps (-n + 1 to 0).
* list : for those exact timesteps.
get_x = indices of columns that contain the independent variable (xs). If None, all data will be used as x.
get_y = indices of columns that contain the target (ys). If None, all data will be used as y.
[] means no y data is created (unlabeled data).
y_func = function to calculate the ys based on the get_y col/s and each y sub-window. y_func must be a function applied to axis=1!
copy = copy the original object to avoid changes in it.
seq_first = True if input shape (seq_len, n_vars), False if input shape (n_vars, seq_len)
sort_by = column/s used for sorting the array in ascending order
ascending = used in sorting
check_leakage = checks if there's leakage in the output between X and y
Input:
You can use np.ndarray, pd.DataFrame or torch.Tensor as input
shape: (seq_len, ) or (seq_len, n_vars) if seq_first=True else (n_vars, seq_len)
"""
if horizon == 0: horizon_rng = np.array([0])
elif is_listy(horizon): horizon_rng = np.array(horizon)
elif isinstance(horizon, Integral): horizon_rng = np.arange(1, horizon + 1) if horizon > 0 else np.arange(horizon + 1, 1)
min_horizon = min(horizon_rng)
max_horizon = max(horizon_rng)
_get_x = slice(None) if get_x is None else get_x.tolist() if isinstance(get_x, pd.core.indexes.base.Index) else [get_x] if not is_listy(get_x) else get_x
_get_y = slice(None) if get_y is None else get_y.tolist() if isinstance(get_y, pd.core.indexes.base.Index) else [get_y] if not is_listy(get_y) else get_y
if min_horizon <= 0 and y_func is None and get_y != [] and check_leakage:
assert get_x is not None and get_y is not None and len([y for y in _get_y if y in _get_x]) == 0, \
'you need to change either horizon, get_x, get_y or use a y_func to avoid leakage'
if stride == 0 or stride is None:
stride = window_len
def _inner(o):
if copy:
if isinstance(o, torch.Tensor): o = o.clone()
else: o = o.copy()
if not seq_first: o = o.T
if isinstance(o, pd.DataFrame):
if sort_by is not None: o.sort_values(by=sort_by, axis=0, ascending=ascending, inplace=True, ignore_index=True)
if get_x is None: X = o.values
elif isinstance(o.columns[0], str): X = o.loc[:, _get_x].values
else: X = o.iloc[:, _get_x].values
if get_y == []: y = None
elif get_y is None: y = o.values
elif isinstance(o.columns[0], str): y = o.loc[:, _get_y].values
else: y = o.iloc[:, _get_y].values
else:
if isinstance(o, torch.Tensor): o = o.numpy()
if o.ndim < 2: o = o[:, None]
if get_x is None: X = o
else: X = o[:, _get_x]
if get_y == []: y = None
elif get_y is None: y = o
else: y = o[:, _get_y]
seq_len = len(X)
if get_y != []:
X_max_time = seq_len - start - window_len - max_horizon - pad_remainder
else:
X_max_time = seq_len - start - window_len - pad_remainder
if pad_remainder:
if add_padding_feature:
X = np.concatenate([X, np.zeros((X.shape[0], 1))], axis=1)
X_max_time = X_max_time - X_max_time % stride + stride
if window_len + start + X_max_time - len(X) >= 0:
_X = np.empty((window_len + start + X_max_time - len(X), *X.shape[1:]))
_X[:] = padding_value
if add_padding_feature:
_X[:, -1] = 1
X = np.concatenate((X, _X))
elif X_max_time < 0: return None, None
X_sub_windows = (start +
np.expand_dims(np.arange(window_len), 0) + # window len
np.expand_dims(np.arange(X_max_time + 1, step=stride), 0).T
) # # subwindows
X = np.transpose(X[X_sub_windows], (0, 2, 1))
if get_y != [] and y is not None:
y_start = start + window_len - 1
y_max_time = seq_len - y_start - max_horizon - pad_remainder
div = 0
if pad_remainder:
div = y_max_time % stride
y_max_time = y_max_time - y_max_time % stride + stride
if window_len + start + y_max_time - len(y) >= 0:
_y = np.empty((window_len + start + y_max_time - len(y), *y.shape[1:]))
_y[:] = padding_value
y = np.concatenate((y, _y))
y_sub_windows = (y_start +
np.expand_dims(horizon_rng, 0) + # horizon_rng
np.expand_dims(np.arange(y_max_time + div, step=stride), 0).T
) # # subwindows
y = y[y_sub_windows]
if y_func is not None and len(y) > 0:
y = y_func(y)
if y.ndim >= 2:
for d in np.arange(1, y.ndim)[::-1]:
if y.shape[d] == 1: y = np.squeeze(y, axis=d)
if y.ndim == 3:
y = y.transpose(0, 2, 1)
return X, y
else: return X, None
return _inner
SlidingWindowSplitter = SlidingWindow
wl = 5
stride = 5
t = np.repeat(np.arange(13).reshape(-1,1), 3, axis=-1)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, stride=stride, pad_remainder=True, get_y=[])(t)
X
wl = 5
t = np.arange(10)
print('input shape:', t.shape)
X, y = SlidingWindow(wl)(t)
test_eq(X.shape[1:], (1, wl))
itemify(X,)
wl = 5
h = 1
t = np.arange(10)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, stride=1, horizon=h)(t)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, ())
wl = 5
h = 2 # 2 or more
t = np.arange(10)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, horizon=h)(t)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, (2, ))
wl = 5
h = 2 # 2 or more
t = np.arange(10).reshape(1, -1)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, stride=1, horizon=h, get_y=None, seq_first=False)(t)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, (2, ))
wl = 5
h = 2 # 2 or more
t = np.arange(10).reshape(1, -1)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, stride=1, horizon=h, seq_first=False)(t)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
wl = 5
t = np.arange(10).reshape(1, -1)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, stride=3, horizon=1, get_y=None, seq_first=False)(t)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, ())
wl = 5
start = 3
t = np.arange(20)
print('input shape:', t.shape)
X, y = SlidingWindow(wl, stride=None, horizon=1, start=start)(t)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, ())
wl = 5
t = np.arange(20)
print('input shape:', t.shape)
df = pd.DataFrame(t, columns=['var'])
display(df)
X, y = SlidingWindow(wl, stride=None, horizon=1, get_y=None)(df)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, ())
wl = 5
t = np.arange(20)
print('input shape:', t.shape)
df = pd.DataFrame(t, columns=['var'])
display(df)
X, y = SlidingWindow(wl, stride=1, horizon=1, get_y=None)(df)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, ())
wl = 5
t = np.arange(20)
print('input shape:', t.shape)
df = pd.DataFrame(t, columns=['var']).T
display(df)
X, y = SlidingWindow(wl, stride=None, horizon=1, get_y=None, seq_first=False)(df)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (1, wl))
test_eq(items[0][1].shape, ())
wl = 5
n_vars = 3
t = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))
print('input shape:', t.shape)
df = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])
display(df)
X, y = SlidingWindow(wl, horizon=1)(df)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (n_vars, wl))
wl = 5
n_vars = 3
t = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))
print('input shape:', t.shape)
df = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])
display(df)
X, y = SlidingWindow(wl, horizon=1, get_y="var_0")(df)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (n_vars, wl))
wl = 5
n_vars = 3
t = (torch.stack(n_vars * [torch.arange(10)]).T * tensor([1, 10, 100]))
print('input shape:', t.shape)
columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
df = pd.DataFrame(t, columns=columns)
display(df)
X, y = SlidingWindow(wl, horizon=1, get_x=columns[:-1], get_y='target')(df)
items = itemify(X, y)
print(items)
test_eq(items[0][0].shape, (n_vars-1, wl))
test_eq(items[0][1].shape, ())
n_vars = 3
t = (np.random.rand(1000, n_vars) - .5).cumsum(0)
print(t.shape)
plt.plot(t)
plt.show()
X, y = SlidingWindow(5, stride=None, horizon=0, get_x=[0,1], get_y=2)(t)
test_eq(X[0].shape, (n_vars-1, wl))
test_eq(y[0].shape, ())
print(X.shape, y.shape)
wl = 5
n_vars = 3
t = (np.random.rand(100, n_vars) - .5).cumsum(0)
print(t.shape)
columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
df = pd.DataFrame(t, columns=columns)
display(df)
X, y = SlidingWindow(5, horizon=0, get_x=columns[:-1], get_y='target')(df)
test_eq(X[0].shape, (n_vars-1, wl))
test_eq(y[0].shape, ())
print(X.shape, y.shape)
seq_len = 100
n_vars = 5
t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
print(t.shape)
columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
df = pd.DataFrame(t, columns=columns)
display(df)
X, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=True)(df)
test_eq(X[0].shape, (n_vars-1, wl))
test_eq(y[0].shape, ())
print(X.shape, y.shape)
seq_len = 100
n_vars = 5
t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
print(t.shape)
columns=[f'var_{i}' for i in range(n_vars-1)] + ['target']
df = pd.DataFrame(t, columns=columns).T
display(df)
X, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=False)(df)
test_eq(X[0].shape, (n_vars-1, wl))
test_eq(y[0].shape, ())
print(X.shape, y.shape)
seq_len = 100
n_vars = 5
t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
print(t.shape)
columns=[f'var_{i}' for i in range(n_vars-1)] + ['target']
df = pd.DataFrame(t, columns=columns).T
display(df)
X, y = SlidingWindow(5, stride=None, horizon=0, get_x=columns[:-1], get_y='target', seq_first=False)(df)
test_eq(X[0].shape, (n_vars-1, wl))
test_eq(y[0].shape, ())
print(X.shape, y.shape)
seq_len = 100
n_vars = 5
t = (np.random.rand(seq_len, n_vars) - .5).cumsum(0)
print(t.shape)
columns=[f'var_{i}' for i in range(n_vars-1)]+['target']
df = pd.DataFrame(t, columns=columns)
display(df)
X, y = SlidingWindow(5, stride=1, horizon=0, get_x=columns[:-1], get_y='target', seq_first=True)(df)
splits = TrainValidTestSplitter(valid_size=.2, shuffle=False)(y)
X.shape, y.shape, splits
data = np.concatenate([np.linspace(0, 1, 11).reshape(-1,1).repeat(2, 1), np.arange(11).reshape(-1,1)], -1)
df_test = pd.DataFrame(data, columns=['col1', 'col2', 'target'])
df_test['target'] = df_test['target'].astype(int)
df_test
def _y_func(o): return o[:, 0]
for wl in np.arange(1, 20):
x, y = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=_y_func)(df_test)
test_eq(x.shape[0], math.ceil((len(df_test))/wl))
test_eq(x.shape[0], y.shape[0])
test_eq(x.shape[1], 3)
test_eq(x.shape[2], wl)
test_close(x[:, 0, 0]*10, y)
for wl in np.arange(1, 20):
x, y = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=None)(df_test)
test_eq(x.shape[0], math.ceil((len(df_test))/ wl))
test_eq(x.shape[0], y.shape[0])
test_eq(x.shape[1], 3)
test_eq(x.shape[2], wl)
for wl in np.arange(1, len(df_test)+1):
x, y = SlidingWindow(wl, None, pad_remainder=False, get_x=['col1', 'col2'], get_y=['target'], horizon=-wl, y_func=None)(df_test)
test_eq(x.shape[0], len(df_test) // wl)
test_eq(x.shape[0], y.shape[0])
test_eq(x.shape[1], 2)
test_eq(x.shape[2], wl)
for wl in np.arange(1, 20):
x, _ = SlidingWindow(wl, None, pad_remainder=True, get_x=['col1', 'col2'], get_y=[])(df_test)
test_eq(x.shape[0], math.ceil((len(df_test))/wl))
test_eq(x.shape[1], 3)
test_eq(x.shape[2], wl)
for wl in np.arange(2, len(df_test)):
x, _ = SlidingWindow(wl, wl, pad_remainder=False, get_x=['col1', 'col2'], get_y=[])(df_test)
test_eq(x.shape[0], len(df_test) // wl)
test_eq(x.shape[1], 2)
test_eq(x.shape[2], wl)
#export
def SlidingWindowPanel(window_len:int, unique_id_cols:list, stride:Union[None, int]=1, start:int=0,
pad_remainder:bool=False, padding_value:float=np.nan, add_padding_feature:bool=True,
get_x:Union[None, int, list]=None, get_y:Union[None, int, list]=None, y_func:Optional[callable]=None, copy:bool=False,
horizon:Union[int, list]=1, seq_first:bool=True, sort_by:Optional[list]=None, ascending:bool=True,
check_leakage:bool=True, return_key:bool=False, verbose:bool=True):
"""
Applies a sliding window to a pd.DataFrame.
Args:
window_len = length of lookback window
unique_id_cols = pd.DataFrame columns that will be used to identify a time series for each entity.
stride = n datapoints the window is moved ahead along the sequence. Default: 1. If None, stride=window_len (no overlap)
start = determines the step where the first window is applied: 0 (default), a given step (int), or random within the 1st stride (None).
pad_remainder = allows to pad remainder subsequences when the sliding window is applied and get_y == [] (unlabeled data).
padding_value = value (float) that will be used for padding. Default: np.nan
add_padding_feature = add an additional feature indicating whether each timestep is padded (1) or not (0).
horizon = number of future datapoints to predict:
* 0 for last step in each sub-window.
* n > 0 for a range of n future steps (1 to n).
* n < 0 for a range of n past steps (-n + 1 to 0).
* list : for those exact timesteps.
get_x = indices of columns that contain the independent variable (xs). If None, all data will be used as x.
get_y = indices of columns that contain the target (ys). If None, all data will be used as y. [] means no y data is created (unlabeled data).
y_func = function to calculate the ys based on the get_y col/s and each y sub-window. y_func must be a function applied to axis=1!
copy = copy the original object to avoid changes in it.
seq_first = True if input shape (seq_len, n_vars), False if input shape (n_vars, seq_len)
sort_by = column/s used for sorting the array in ascending order
ascending = used in sorting
check_leakage = checks if there's leakage in the output between X and y
return_key = when True, the key corresponsing to unique_id_cols for each sample is returned
verbose = controls verbosity. True or 1 displays progress bar. 2 or more show records that cannot be created due to its length.
Input:
You can use np.ndarray, pd.DataFrame or torch.Tensor as input
shape: (seq_len, ) or (seq_len, n_vars) if seq_first=True else (n_vars, seq_len)
"""
if not is_listy(unique_id_cols): unique_id_cols = [unique_id_cols]
if sort_by is not None and not is_listy(sort_by): sort_by = [sort_by]
sort_by = unique_id_cols + (sort_by if sort_by is not None else [])
def _SlidingWindowPanel(o):
if copy:
o = o.copy()
o.sort_values(by=sort_by, axis=0, ascending=ascending, inplace=True, ignore_index=True)
unique_id_values = o[unique_id_cols].drop_duplicates().values
_x = []
_y = []
_key = []
for v in progress_bar(unique_id_values, display=verbose, leave=False):
x_v, y_v = SlidingWindow(window_len, stride=stride, start=start, pad_remainder=pad_remainder, padding_value=padding_value,
add_padding_feature=add_padding_feature, get_x=get_x, get_y=get_y, y_func=y_func,
horizon=horizon, seq_first=seq_first,
check_leakage=check_leakage)(o[(o[unique_id_cols].values == v).sum(axis=1) == len(v)])
if x_v is not None and len(x_v) > 0:
_x.append(x_v)
if return_key: _key.append([v.tolist()] * len(x_v))
if y_v is not None and len(y_v) > 0: _y.append(y_v)
elif verbose>=2:
print(f'cannot use {unique_id_cols} = {v} due to not having enough records')
X = np.concatenate(_x)
if _y != []:
y = np.concatenate(_y)
for d in np.arange(1, y.ndim)[::-1]:
if y.shape[d] == 1: y = np.squeeze(y, axis=d)
else: y = None
if return_key:
key = np.concatenate(_key)
if key.ndim == 2 and key.shape[-1] == 1: key = np.squeeze(key, -1)
if return_key: return X, y, key
else: return X, y
return _SlidingWindowPanel
SlidingWindowPanelSplitter = SlidingWindowPanel
samples = 100_000
wl = 5
n_vars = 10
t = (torch.stack(n_vars * [torch.arange(samples)]).T * tensor([10**i for i in range(n_vars)]))
df = pd.DataFrame(t, columns=[f'var_{i}' for i in range(n_vars)])
df['time'] = np.arange(len(t))
df['device'] = 0
df['target'] = np.random.randint(0, 2, len(df))
df2 = df.copy()
df3 = df.copy()
cols = ['var_0', 'var_1', 'var_2', 'device', 'target']
df2[cols] = df2[cols] + 1
df3[cols] = df3[cols] + 2
df2 = df2.loc[:3]
df['region'] = 'A'
df2['region'] = 'A'
df3['region'] = 'B'
df = df.append(df2).append(df3).reset_index(drop=True)
df['index'] = np.arange(len(df))
df = df.sample(frac=1).reset_index(drop=True)
display(df.head())
df.shape
X, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'],
horizon=0, seq_first=True, sort_by=['time'], ascending=True, return_key=False)(df)
X.shape, y.shape
X, y, key = SlidingWindowPanel(window_len=5, unique_id_cols=['device'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'],
horizon=0, seq_first=True, sort_by=['time'], ascending=True, return_key=True)(df)
X.shape, y.shape, key.shape
X, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device', 'region'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'],
horizon=0, seq_first=True, sort_by=['time'], ascending=True)(df)
X.shape, y.shape
# y_func must be a function applied to axis=1!
def y_max(o): return np.max(o, axis=1)
X, y = SlidingWindowPanel(window_len=5, unique_id_cols=['device', 'region'], stride=1, start=0, get_x=df.columns[:n_vars], get_y=['target'],
y_func=y_max, horizon=5, seq_first=True, sort_by=['time'], ascending=True)(df)
X.shape, y.shape
# export
def identify_padding(float_mask, value=-1):
"""Identifies padded subsequences in a mask of type float
This function identifies as padded subsequences those where all values == nan
from the end of the sequence (last dimension) across all channels, and sets
those values to the selected value (default = -1)
Args:
mask: boolean or float mask
value: scalar that will be used to identify padded subsequences
"""
padding = torch.argmax((torch.flip(float_mask.mean((1)) - 1, (-1,)) != 0).float(), -1)
padded_idxs = torch.arange(len(float_mask))[padding != 0]
if len(padded_idxs) > 0:
padding = padding[padding != 0]
for idx,pad in zip(padded_idxs, padding): float_mask[idx, :, -pad:] = value
return float_mask
wl = 5
stride = 5
t = np.repeat(np.arange(13).reshape(-1,1), 3, axis=-1)
print('input shape:', t.shape)
X, _ = SlidingWindow(wl, stride=stride, pad_remainder=True, get_y=[])(t)
X = tensor(X)
X[0, 1, -2:] = np.nan
X[1,..., :3] = np.nan
print(X)
identify_padding(torch.isnan(X).float())
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
```
| github_jupyter |
# Create trip statistics
# Purpose
Before looking at the dynamics of the ferries from the time series it is a good idea to first look at some longer term trends. Statistics for each trip will be generated and saved as a first data reduction, to spot trends over the day/week/month and year.
# Methodology
* Trip statistics will be generated for each trip containing:
* for all columns: min/mean/max/median/std
* energy consumption for all thrusters
* The statistics will be stored into a [xarray](http://xarray.pydata.org/en/stable/)
# Setup
```
# %load imports.py
#%load imports.py
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,3)
#import seaborn as sns
import os
from collections import OrderedDict
from IPython.display import display
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.set_option("display.max_columns", None)
import folium
import plotly.express as px
import plotly.graph_objects as go
import sys
import os
sys.path.append('../')
from src.visualization import visualize
from src.data import get_dataset
from src.data import trips
import scipy.integrate
import seaborn as sns
import xarray as xr
%%time
df = get_dataset.get(n_rows=None)
deltas = ['delta_%i' % i for i in range(1,5)]
df.drop(columns=deltas, inplace=True)
df['trip_time'] = pd.TimedeltaIndex(df['trip_time']).total_seconds()
mask = df['reversing'].copy()
df['reversing'].loc[mask] = 1
df['reversing'].loc[~mask] = 0
df.head()
groups = df.groupby(by='trip_no')
assert (groups.last()['sog'] < 0.3).all()
trip = df.groupby(by='trip_no').get_group(11)
visualize.plot_map(trip)
def integrate_time(trip):
trip_ = trip.copy()
t = pd.TimedeltaIndex(trip_['trip_time'],unit='s').total_seconds()
trip_.drop(columns=['trip_time'], inplace=True)
integral_trip = scipy.integrate.simps(y=trip_.T,x=t)
s = pd.Series(data=integral_trip, name='integral', index=trip_.columns)
return s
integrate_time(trip)
t = pd.TimedeltaIndex(trip['trip_time'], unit='s').total_seconds()
scipy.integrate.simps(y=trip['power_em_thruster_total'],x=t)
def trip_statistic(trip):
stats = trip.describe() # General statistics
integral_trip = integrate_time(trip)
stats = stats.append(integral_trip)
return stats
ds_stats = None
for trip_no, trip in df.groupby(by='trip_no'):
trip_ = trip.copy()
trip_direction = trip_.iloc[0]['trip_direction']
#trip_.drop(columns=['trip_no','trip_direction'], inplace=True)
trip_.drop(columns=['trip_no'], inplace=True)
stats = trip_statistic(trip_)
stats.index.name = 'statistic'
ds = xr.Dataset.from_dataframe(stats)
ds = ds.expand_dims('trip_no')
ds = ds.assign_coords(trip_no=np.array([trip_no],dtype=np.int64))
#ds.attrs['trip_direction'] = trip_direction
if ds_stats is None:
ds_stats = ds
else:
ds_stats = xr.concat([ds_stats,ds], dim="trip_no")
ds_stats
ds_stats.coords['statistic']
ds
ds_stats.sel(trip_no=2, statistic='mean')
ds_stats.sel(statistic='mean').plot.scatter(x="sog",y="trip_time")
ds_stats.sel(statistic='max').plot.scatter(x="sog",y="power_em_thruster_total")
ds_stats.sel(statistic=['min','mean','max']).plot.scatter(x="sog",y="power_em_thruster_total", hue='statistic');
xr.plot.hist(ds_stats.sel(statistic='mean')['sog'], bins=20);
xr.plot.hist(ds_stats.sel(statistic='integral')["power_em_thruster_total"], bins=20);
ds_stats.sel(statistic='integral').plot.scatter(x="sog",y="power_em_thruster_total")
df_mean = ds_stats.sel(statistic='mean').to_dataframe()
df_means = df_mean.groupby(by='trip_direction').mean()
df_stds = df_mean.groupby(by='trip_direction').std()
directions = pd.Series({
0 : 'Helsingör-Helsinborg',
1 : 'Helsinborg-Helsingör',
})
x = directions[df_means.index]
fig,ax=plt.subplots()
ax.bar(x=x, height=df_means['power_em_thruster_total'], yerr=df_stds['power_em_thruster_total'])
fig,ax=plt.subplots()
ax.bar(x=x, height=df_means['trip_time'], yerr=df_stds['trip_time'])
fig,ax=plt.subplots()
ax.bar(x=x, height=df_means['sog'], yerr=df_stds['sog'])
```
## Save statistics
```
df_mean = ds_stats.sel(statistic='mean').to_dataframe()
df_integral = ds_stats.sel(statistic='integral').to_dataframe()
df_std = ds_stats.sel(statistic='std').to_dataframe()
df_max = ds_stats.sel(statistic='max').to_dataframe()
df_stats = df_mean.drop(columns=['statistic'])
df_ = df.reset_index()
start_times = df_.groupby('trip_no').first()['time']
end_time = df_.groupby('trip_no').last()['time']
integral_columns = ['power_em_thruster_%i' %i for i in range(1,5)]
integral_columns+=['power_em_thruster_total','power_heeling']
df_stats[integral_columns] = df_integral[integral_columns]
max_columns = ['trip_time']
df_stats[max_columns] = df_max[max_columns]
df_stats['start_time'] = start_times
df_stats['end_time'] = end_time
df_stats.head()
from azureml.core import Workspace, Dataset
subscription_id = '3e9a363e-f191-4398-bd11-d32ccef9529c'
resource_group = 'demops'
workspace_name = 'D2E2F'
workspace = Workspace(subscription_id, resource_group, workspace_name)
def save():
df_stats_save = df_stats.copy()
df_stats_save.reset_index(inplace=True)
datastore = workspace.get_default_datastore()
dataset_2 = Dataset.Tabular.register_pandas_dataframe(dataframe=df_stats_save, target=datastore, name=new_name)
new_name = 'tycho_short_statistics'
if not new_name in workspace.datasets:
save()
columns = ['cos_pm%i' % i for i in range(1,5)]
df_std.mean()[columns]
columns = ['sin_pm%i' % i for i in range(1,5)]
df_std.mean()[columns]
```
| github_jupyter |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import pyro
import pyro.distributions as dist
import scipy as sp
import scipy.stats
import time
import torch
# Utilities
import scene_generation.data.planar_scene_arrangement_utils as psa_utils
# These scenes include normally randomly distributed nonpenetrating
# object arrangements with mu = 0.5, 0.5, pi and sigma=0.1, 0.1, pi/2
DATA_BASE = "../data/single_planar_box_arrangements/normal_random/fixed_2_objects"
environments = psa_utils.load_environments(DATA_BASE)
def draw_rbt(ax, rbt, q):
psa_utils.draw_board_state(ax, rbt, q)
patch = patches.Rectangle([0., 0.], 1., 1., fill=True, color=[0., 1., 0.],
linestyle='solid', linewidth=2, alpha=0.3)
ax.add_patch(patch)
def draw_environment(environment, ax):
rbt, q = psa_utils.build_rbt_from_summary(environment)
draw_rbt(ax, rbt, q)
# Try out gradient descent through the projection operator
import scene_generation.differentiable_nlp as diff_nlp
from copy import deepcopy
device = torch.device('cpu')
env_i = environments["train"][13]
rbt, qf = psa_utils.build_rbt_from_summary(env_i)
nq = rbt.get_num_positions()
#q = torch.tensor(qf.reshape(nq, 1), requires_grad=True)
np.random.seed(1)
q_start = np.random.random([nq, 1])
q = torch.tensor(q_start, requires_grad=True, dtype=torch.double)
q_target = torch.tensor(qf).view(nq, 1)
learning_rate = 0.05
constraints = [diff_nlp.object_origins_within_bounds_constraint_constructor_factory(
np.array([0., 0., 0.5]), np.array([1., 1., 0.5]))]
losses = []
q_snapshots = []
q_pred_snapshots = []
for t in range(41):
# Scheduling on the regularization in the derivative, which helps break off of
# constraint surfaces during the first few iterations.
q_pred = diff_nlp.projectToFeasibilityWithIKTorch.apply(q, rbt, constraints, 1/np.log(t+2))
# Compute and print loss. Loss is a Tensor of shape (), and loss.item()
# is a Python number giving its value.
loss = (q_pred - q_target).pow(2).sum()
losses.append(loss.cpu().detach().item())
q_snapshots.append(q.cpu().detach().numpy().copy())
q_pred_snapshots.append(q_pred.cpu().detach().numpy().copy())
loss.backward()
with torch.no_grad():
q -= learning_rate * q.grad
# Manually zero the gradients after running the backward pass
q.grad.zero_()
if t % 10 == 0:
print ".",
if t % 100 == 0:
print "Loss: ", loss.cpu().detach().item()
print torch.t(q)
plt.figure()
plt.plot(losses)
from underactuated import PlanarRigidBodyVisualizer
from matplotlib import animation, rc
from IPython.display import HTML
Tview = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]])
fig, ax = plt.subplots(figsize=(10, 5))
ax.axis('equal')
ax.axis('off')
viz = PlanarRigidBodyVisualizer(
rbt, Tview, xlim=[-0.25, 1.25], ylim=[-0.25, 1.25], ax=ax)
def animate_update(i):
viz.draw(q_snapshots[i])
ani = animation.FuncAnimation(fig,
animate_update,
len(q_snapshots),
interval=2000/len(q_snapshots),
repeat=True)
plt.close(fig)
HTML(ani.to_html5_video()) # This needs to be the last line for the video to display
from underactuated import PlanarRigidBodyVisualizer
from matplotlib import animation, rc
from IPython.display import HTML
Tview = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]])
fig, ax = plt.subplots(figsize=(10, 5))
ax.axis('equal')
ax.axis('off')
viz = PlanarRigidBodyVisualizer(
rbt, Tview, xlim=[-0.25, 1.25], ylim=[-0.25, 1.25], ax=ax)
def animate_update(i):
viz.draw(q_pred_snapshots[i])
ani = animation.FuncAnimation(fig,
animate_update,
len(q_pred_snapshots),
interval=2000/len(q_pred_snapshots),
repeat=True)
plt.close(fig)
HTML(ani.to_html5_video()) # This needs to be the last line for the video to display
plt.figure().set_size_inches(12, 12)
print "Selection of environments from prior / generative model"
N = 5
for i in range(N):
for j in range(N):
plt.subplot(N, N, i*N+j+1)
draw_environment(environments["train"][i*N+j+1], plt.gca())
plt.grid(True)
plt.tight_layout()
# Test some functionality of the custom distribution type
dummy_env = deepcopy(environments["train"][0])
dummy_env["n_objects"] = 1
one_box_rbt, _ = psa_utils.build_rbt_from_summary(dummy_env)
two_box_rbt, _ = psa_utils.build_rbt_from_summary(environments["train"][0])
ik_constraints = [diff_nlp.object_origins_within_bounds_constraint_constructor_factory(
np.array([0., 0., 0.5]), np.array([1., 1., 0.5]))]
q0 = torch.tensor([[-0.1 , 0., 0., 0., 0., 0]], requires_grad=True)
distrib = diff_nlp.ProjectToFeasibilityWithIKAsDistribution(
one_box_rbt, q0, ik_constraints, 0.02, 0.01)
qf = distrib.sample()
qother = torch.Tensor([[0.1 , 0., 0., 0., 0., 0]])
print "q0: ", q0, distrib.log_prob(q0)
print "qf: ", qf, distrib.log_prob(qf)
print "qother: ", qother, distrib.log_prob(qother)
import matplotlib as mpl
# Evaluate at many points over a 2D grid + draw likelihood out
n_points = 50
x_min = -0.1
x_max = 0.1
y_min = -0.1
y_max = 0.1
X, Y = np.meshgrid(np.linspace(x_min, x_max, n_points), np.linspace(y_min, y_max, n_points))
q_probes = np.zeros([n_points*n_points, 6])
q_probes[:, 0] = X.flatten()
q_probes[:, 1] = Y.flatten()
q_probes = torch.tensor(q_probes, dtype=q0.dtype)
print q_probes
z = np.array([distrib.log_prob(q_probes[k, :]).detach().item() for k in range(q_probes.shape[0])])
z = z.reshape(n_points, n_points)
plt.figure()
h = plt.gca().imshow(np.rot90(z[:, :]),
cmap=plt.cm.gist_earth_r,
extent=[x_min, x_max, y_min, y_max],
alpha=0.6,
norm = mpl.colors.Normalize(vmin=10.,vmax=30))
plt.colorbar(h,fraction=0.046, pad=0.04)
plt.xlabel("x")
plt.ylabel("y")
plt.xlim([x_min, x_max])
plt.ylim([y_min, y_max])
# Eventually qother should be < qf, but > q0, even though it's the same
# distance from the constraint plane at x>0
if distrib.log_prob(qother).item() <= distrib.log_prob(q0).item():
raise ValueError("I must not have finished this yet")
q0_2 = torch.tensor([[1.1 , 0., 0., 0., 0., 0]], requires_grad=True)
distrib = diff_nlp.ProjectToFeasibilityWithIKAsDistribution(
two_box_rbt, q0_2, ik_constraints, 0.1, 0.01, q0_fixed=qf)
qf_2 = distrib.sample()
qother_2 = torch.Tensor([[0.9 , 0., 0., 0., 0., 0]])
print "q0_2: ", q0_2, distrib.log_prob(q0_2)
print "qf_2: ", qf_2, distrib.log_prob(qf_2)
print "qother: ", qother_2, distrib.log_prob(qother_2)
if distrib.log_prob(qother_2).item() <= distrib.log_prob(q0_2).item():
raise ValueError("I must not have finished this yet")
# Ok, now deploy it in an inference context
from collections import namedtuple
import torch
import pyro
import pyro.infer
import pyro.optim
import pyro.distributions as dist
from pyro import poutine
from pyro.infer import config_enumerate
import torch.distributions.constraints as constraints
pyro.enable_validation(True)
class DataWrapperForObs:
# Convenience wrapper on data:
# If data's batch dimension is length-0,
# we must be running the model in generative mode,
# so any slicing returns None.
# Otherwise, pass through slicing to the real data.
def __init__(self, data):
self.data = data
def __getitem__(self, key):
if self.data.shape[0] > 0:
return self.data[key]
else:
return None
# Cobbling together some RBTs ahead of time for convenience
# 2-obj env
print environments["train"][0]
two_box_rbt, _ = psa_utils.build_rbt_from_summary(environments["train"][0])
# 1-obj env
dummy_env = deepcopy(environments["train"][0])
dummy_env["n_objects"] = 1
one_box_rbt, _ = psa_utils.build_rbt_from_summary(dummy_env)
assert(two_box_rbt.get_num_positions() == 12)
assert(one_box_rbt.get_num_positions() == 6)
rbts = [one_box_rbt, two_box_rbt]
ik_constraint = [diff_nlp.object_at_specified_pose_constraint_constructor_factory(
0, np.array([0., 0., 0.5, 0., 0., 0.]), np.array([1., 1., 0.5, 0., 0., 2*np.pi]))]
ik_constraint_2 = ik_constraint + [
diff_nlp.object_at_specified_pose_constraint_constructor_factory(
1, np.array([0., 0., 0.5, 0., 0., 0.]), np.array([1., 1., 0.5, 0., 0., 2*np.pi]))]
ik_constraints = [ik_constraint, ik_constraint_2]
def expand_partial_pose_to_full_pose(this_pose):
if this_pose is None:
return this_pose
full_poses = torch.zeros(this_pose.shape[0], 6,
requires_grad=False,
dtype=this_pose.dtype)
full_poses[:, 0] = this_pose[:, 0]
full_poses[:, 1] = this_pose[:, 1]
full_poses[:, 5] = this_pose[:, 2]
return full_poses
def collapse_full_pose_to_partial_pose(pose):
if pose is None:
return None
partial_pose = torch.zeros(pose.shape[0], 3, requires_grad=False,
dtype=pose.dtype)
partial_pose[:, 0] = pose[:, 0]
partial_pose[:, 1] = pose[:, 1]
partial_pose[:, 2] = pose[:, 5]
return partial_pose
# Sample an exactly 2-object environment
# "data" is either None (run in generative mode),
# or is n_batches x 6 (environment pose vectors)
N_OBJECTS = 2
def model(data = torch.Tensor(0, 12), use_projection=True, subsample_size=None,
noisy_projection=False, use_amortization=True):
if data.shape[1] != 12:
raise ValueError("Expected data to have n_batches x 12 shape.")
# Object placement distribution is shared across objects
# (maybe switch this to Normal-inverse-Wishart_distribution, if supported?)
# Normal distributions are specified by standard deviation (scale),
# but multivariate normals by covariance matrix. :P
box_mean = pyro.sample('box_mean', dist.Normal(
torch.Tensor([0.5, 0.5, np.pi]), torch.Tensor([0.5, 0.5, np.pi/2.])).to_event(1))
box_var = pyro.sample('box_var', dist.LogNormal(
torch.Tensor([0.0, 0.0, 0.0]), torch.Tensor([2.0, 2.0, 2.0])).to_event(1))
box_dist = dist.MultivariateNormal(box_mean, torch.diag(box_var))
data_batch_size = max(1, data.shape[0])
with pyro.plate('data', size=data_batch_size) as ind:
generated_data = torch.zeros(ind.shape[0], 3*N_OBJECTS)
if data.shape[0] > 0:
data_obs = DataWrapperForObs(data[ind, :])
else:
data_obs = DataWrapperForObs(data[:, :])
last_scene_configs = []
for k in range(N_OBJECTS):
if use_projection:
new_pose = pyro.sample("box_%d_pre_pose" % k, box_dist)
q0_fixed = None
if len(last_scene_configs) > 0:
q0_fixed = torch.cat(last_scene_configs)
#projection_dist = dist.Normal(new_pose, 0.01*torch.ones(new_pose.shape)).to_event(1)
#projected_pose = pyro.sample("box_%d_pose" % k, projection_dist,
# obs=collapse_full_pose_to_partial_pose(data_obs[:, (k*6):(k*6+6)]))
#generated_data[:, (k*3):(k*3+3)] = projected_pose[:, :]
#last_scene_configs.append(projected_pose)
#continue
#TODO: OK, this problem is that this is sampling in 6D, while the guide
# is sampling in 3D, so the densities are too different.
# Yuck
projection_dist = diff_nlp.ProjectToFeasibilityWithIKAsDistribution(
rbts[k], expand_partial_pose_to_full_pose(new_pose),
ik_constraints[k], 0.05, 0.01, noisy_projection=noisy_projection,
q0_fixed=q0_fixed, event_select_inds=torch.tensor([0, 1, 5]))
projected_pose = pyro.sample("box_%d_pose" % k, projection_dist,
obs=collapse_full_pose_to_partial_pose(data_obs[:, (k*6):(k*6+6)]))
generated_data[:, (k*3):(k*3+3)] = projected_pose[:, :]
last_scene_configs.append(expand_partial_pose_to_full_pose(projected_pose))
else:
projected_pose = pyro.sample("box_%d_pose" % k, box_dist, obs=data_obs[:, [k*6, k*6+1, k*6+5]])
generated_data[:, (k*3):(k*3+3)] = projected_pose[:, :]
return generated_data
H = 10
inference_module = torch.nn.Sequential(
torch.nn.Linear(6, H),
torch.nn.ReLU(),
torch.nn.Linear(H, 3),
)
def guide(data = torch.Tensor(1, 12), use_projection=True, subsample_size=None,
noisy_projection=False, use_amortization=True):
if data.shape[1] != 12:
raise ValueError("Expected data to have n_batches x 12 shape.")
if data.shape[0] == 0:
raise ValueError("Data must have at least one entry.")
# Intentionally wrong initialization
est_box_mean = pyro.param('auto_box_mean', torch.tensor([0.25, 0.25, np.pi/2.]))
est_box_mean_var = pyro.param('auto_box_mean_var', torch.tensor([0.1, 0.1, 0.1]),
constraint=constraints.positive)
est_box_var = pyro.param('auto_box_var', torch.tensor([0.1, 0.1, 0.5]),
constraint=constraints.positive)
box_mean = pyro.sample("box_mean", dist.Normal(est_box_mean, est_box_mean_var).to_event(1))
box_var = pyro.sample("box_var", dist.Delta(est_box_var).to_event(1))
if use_projection:
box_projection_var = pyro.param("auto_box_proj_var", torch.tensor([0.1, 0.1, 0.1]),
constraint=constraints.positive)
if use_amortization:
pyro.module("inference_module", inference_module)
else:
box_pre_poses = []
for k in range(N_OBJECTS):
box_pre_poses.append(pyro.param('auto_box_%d_pre_poses' % k,
torch.randn(data.shape[0], 3)*0.5 + 0.5))
with pyro.plate('data', size=data.shape[0], subsample_size=subsample_size) as ind:
for k in range(N_OBJECTS):
if use_projection:
if use_amortization:
#print "Input: ", data[ind, (k*6):(k*6+6)]
#print "Output base ", collapse_full_pose_to_partial_pose(
# data[ind, (k*6):(k*6+6)])
#print "Output residual: ", inference_module(data[ind, (k*6):(k*6+6)])
predicted_pre_pose_base = collapse_full_pose_to_partial_pose(
data[ind, (k*6):(k*6+6)])
predicted_pre_pose_residual = inference_module(data[ind, (k*6):(k*6+6)])
# Normal vs delta choice here makes a big impact on the resulting
# ELBO scaling. Having all deltas means the guide LL is always 0 (since
# when we sample forward through the guide, we'll deterministically
# pick the same thing with prob 1). Having normal adds noise to the
# guide distrib + gives the guide positive LL, which decreases the ELBO
# and increases the loss. Here I'm currently picking a normal distrib
# that is the same rough scale as the projection distribution,
# to make the ELBO sit right above zero. But this is super arbitrary?
new_pose = pyro.sample(
"box_%d_pre_pose" % k,
dist.Normal(predicted_pre_pose_base + predicted_pre_pose_residual,
box_projection_var).to_event(1))
else:
new_pose = pyro.sample(
"box_%d_pre_pose" % k,
dist.Normal(box_pre_poses[k][ind, :],
box_projection_var).to_event(1))
pyro.clear_param_store()
trace = poutine.trace(model).get_trace()
trace.compute_log_prob()
print "MODEL WITH NO ARGS"
#print(trace.format_shapes())
pyro.clear_param_store()
trace = poutine.trace(model).get_trace(torch.zeros(10, 12), use_projection=True, subsample_size=5, use_amortization=True)
trace.compute_log_prob()
print "MODEL WITH ARGS"
#print(trace.format_shapes())
pyro.clear_param_store()
trace = poutine.trace(guide).get_trace(torch.zeros(10, 12), use_projection=True, subsample_size=5)
trace.compute_log_prob()
print "GUIDE WITH ARGS"
#print(trace.format_shapes())
def vectorize_environments(envs):
# Vectorization into a
# poses (order x y 0 0 0 theta) per object
n_environments = len(envs)
poses = torch.Tensor(n_environments, 2*6)
poses[:, :] = 0.
for i, env in enumerate(envs):
for k in range(2):
obj = env["obj_%04d" % k]
poses[i, (k*6):(k*6+6)][[0, 1, 5]] = torch.Tensor(obj["pose"])
return poses
def devectorize_environments(data):
envs = []
for i in range(data.shape[0]):
env = {}
n_objects = 0
for k in range(2):
obj = {
"pose": data[i, (k*3):(k*3+3)].cpu().detach().numpy(),
"class": "small_box"
}
env["obj_%04d" % k] = obj
env["n_objects"] = 2
envs.append(env)
return envs
# Make sure we can run model + condition on this dataset
model(vectorize_environments(environments["train"]), subsample_size=20, use_projection=True)
# Some statistics over what we expect of environment mean + var
data = vectorize_environments(
environments["train"] + environments["valid"] + environments["test"]
).detach().numpy()
print "Mean postures: ", np.mean(data, axis=0)
print "std postures: ", np.std(data, axis=0)
print "log-std postures: ", np.log(np.std(data, axis=0))
print "var postures: ", np.square(np.std(data, axis=0))
plt.figure().set_size_inches(12, 12)
print "Selection of environments from prior / generative model"
N = 5
for i in range(N):
for j in range(N):
plt.subplot(N, N, i*N+j+1)
draw_environment(devectorize_environments(
poutine.condition(model, data={"box_var": torch.Tensor([0.25, 0.25, 3.1415]),
"box_mean": torch.Tensor([0.5, 0.5, 3.1415])})())[0], plt.gca())
plt.grid(True)
plt.tight_layout()
```
Notes on SVI performance:
Using noisy projection didn't seem to have an important impact on SVI.
```
# Rig for SVI.
from collections import defaultdict
from torch.distributions import constraints
from pyro.infer import Trace_ELBO, SVI
from pyro.contrib.autoguide import AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal
import pyro.optim
import torch.optim
pyro.enable_validation(True)
interesting_params = ["auto_box_mean", "auto_box_var", "auto_box_mean_var", "auto_box_proj_var"]
def select_interesting(guide):
#all_params = guide.median()
return dict((p, pyro.param(p).cpu().detach().numpy()) for p in interesting_params)
pyro.clear_param_store()
#global_guide = AutoDelta(
# poutine.block(model, hide=["obs"]))
# #poutine.block(model, expose=interesting_params))
optimizer = torch.optim.Adam
def per_param_args(module_name, param_name):
if module_name == 'inference_module':
return {"lr": 0.01, 'betas': [0.9, 0.99]}
else:
return {'lr': 0.1, 'betas': [0.9, 0.99]}
scheduler = pyro.optim.StepLR(
{"optimizer": optimizer,
'optim_args': per_param_args,
'gamma': 0.25, 'step_size': 100})
elbo = Trace_ELBO(max_plate_nesting=1, num_particles=4)
svi = SVI(model, guide, scheduler, loss=elbo)
losses = []
losses_valid = []
data = vectorize_environments(environments["train"])
data_valid = vectorize_environments(environments["valid"])
use_amortization=True
snapshots = {}
start_time = time.time()
avg_duration = None
num_iters = 301
for i in range(num_iters):
loss = svi.step(data, subsample_size=25, use_projection=True, use_amortization=use_amortization)
losses.append(loss)
loss_valid = svi.evaluate_loss(data_valid, subsample_size=50, use_projection=True, use_amortization=use_amortization)
losses_valid.append(loss_valid)
for p in interesting_params:
if p not in snapshots.keys():
snapshots[p] = []
snapshots[p].append(pyro.param(p).cpu().detach().numpy().copy())
elapsed = time.time() - start_time
if avg_duration is None:
avg_duration = elapsed
else:
avg_duration = avg_duration*0.9 + elapsed*0.1
start_time = time.time()
if (i % 10 == 0):
print "Loss %f (%f), Per iter: %f, To go: %f" % (loss, loss_valid, elapsed, (num_iters - i)*elapsed)
if (i % 50 == 0):
print select_interesting(guide)
print "Done"
num_additional_iters = 501
for i in range(num_additional_iters):
loss = svi.step(data, subsample_size=50, use_projection=True, use_amortization=use_amortization)
losses.append(loss)
for p in interesting_params:
if p not in snapshots.keys():
snapshots[p] = []
snapshots[p].append(pyro.param(p).cpu().detach().numpy().copy())
elapsed = time.time() - start_time
if avg_duration is None:
avg_duration = elapsed
else:
avg_duration = avg_duration*0.9 + elapsed*0.1
start_time = time.time()
if (i % 10 == 0):
print "Loss %f, Per iter: %f, To go: %f" % (loss, elapsed, (num_additional_iters - i)*elapsed)
if (i % 50 == 0):
print select_interesting(guide)
print "Done"
import scipy as sp
import scipy.ndimage
start_epoch = 0
plt.figure(figsize=(10,3), dpi=100).set_facecolor('white')
range_fixed_losses = losses[start_epoch:] - np.min(losses[start_epoch:])*0
range_fixed_losses_valid = losses_valid[start_epoch:] - np.min(losses_valid[start_epoch:])*0
plt.plot(range_fixed_losses, label="Loss")
plt.plot(range_fixed_losses_valid, label="Loss Valid")
plt.xlabel('iters')
plt.ylabel('loss')
#plt.ylim(-12500, 20000)
plt.yscale('log')
def smooth(y, box_pts):
# Kind of shitty gaussian kernel
ax = np.arange(-box_pts//2 + 1., box_pts // 2 + 1.)
kernel = np.exp(-ax**2 / (box_pts**2))
kernel /= np.sum(kernel)
y_smooth = sp.ndimage.convolve(y, kernel, mode='nearest')
return y_smooth
plt.plot(smooth(range_fixed_losses, 20)[start_epoch:], label="Smoothed Loss")
smoothed_range_fixed_losses_valid = smooth(range_fixed_losses_valid, 20)[start_epoch:]
plt.plot(smoothed_range_fixed_losses_valid, label="Smoothed Loss Valid")
plt.title('Convergence of SVI. Min smoothed valid loss: %f' % np.min(smoothed_range_fixed_losses_valid));
plt.legend()
plt.figure(figsize=(10,6), dpi=100).set_facecolor('white')
keys = ["auto_box_mean", "auto_box_var", "auto_box_mean_var", "auto_box_proj_var"]
fit_vals = {}
n_plots = len(keys)
for i, key in enumerate(keys):
data_across_epochs = np.vstack(snapshots[key])
for k in range(3):
plt.subplot(n_plots, 3, i*3+k+1)
plt.plot(np.arange(start_epoch, data_across_epochs.shape[0]), data_across_epochs[start_epoch:, k])
plt.xlabel('epoch')
plt.ylabel(key + " %d" % k)
plt.title("Last 100 avg: %f" % data_across_epochs[-100:, k].mean())
fit_vals[key] = data_across_epochs[-100:].mean(axis=0)
plt.tight_layout()
from matplotlib.patches import Ellipse
plt.figure().set_size_inches(12, 12)
data = vectorize_environments(environments["valid"]).detach().numpy()
real_vals = {}
real_vals["box_mean"] = np.mean(data, axis=0)
real_vals["box_var"] = np.square(np.std(data, axis=0))
env = {"n_objects": 1}
for k in range(1):
object_name = "box"
env["obj_%04d" % k] = {"class": "small_box", "pose": fit_vals["auto_" + object_name + "_mean"]}
# One object of each class, right at its mean
draw_environment(env, plt.gca())
# And draw ellipses
object_name = "box"
var = fit_vals["auto_" + object_name + "_var"]
mean = fit_vals["auto_" + object_name + "_mean"]
lambda_ = np.sqrt(var)
ell = Ellipse(xy=mean[0:2],
width=lambda_[0], height=lambda_[1],
color='black')
ell.set_facecolor('none')
plt.gca().add_artist(ell)
var = real_vals[object_name + "_var"]
mean = real_vals[object_name + "_mean"]
lambda_ = np.sqrt(var)
ell = Ellipse(xy=mean[0:2],
width=lambda_[0], height=lambda_[1],
color='red')
ell.set_facecolor('none')
plt.gca().add_artist(ell)
plt.show()
plt.figure().set_size_inches(12, 12)
print "Selection of example environments, with our understanding of the pre-projection locations labeled"
N = 5
data = vectorize_environments(environments["valid"])
if not use_amortization:
pre_poses = [pyro.param("auto_box_%d_pre_poses" % k).cpu().detach().numpy() for k in range(2)]
for i in range(N):
for j in range(N):
env_i = i*N+j+1
plt.subplot(N, N, env_i)
draw_environment(environments["valid"][env_i], plt.gca())
for k in range(2):
if use_amortization:
mean = inference_module(data[env_i, (6*k):(6*k+6)])
else:
mean = pre_poses[k][env_i, 0:2]
ell = Ellipse(xy=mean[0:2],
width=0.05, height=0.05,
color='yellow')
ell.set_facecolor('none')
plt.gca().add_artist(ell)
plt.tight_layout()
plt.figure().set_size_inches(12, 12)
print "Selection of environments from prior / generative model"
N = 5
for i in range(N):
for j in range(N):
plt.subplot(N, N, i*N+j+1)
sample = poutine.condition(model, data={
"box_mean": pyro.param("auto_box_mean"),
"box_var": pyro.param("auto_box_var")})()
draw_environment(devectorize_environments(sample)[0], plt.gca())
plt.grid(True)
plt.tight_layout()
# MCMC as an alternative inference mechanism
from pyro.infer.mcmc import HMC, MCMC, NUTS
from pyro.infer import EmpiricalMarginal
hmc_kernel = NUTS(model)
# Struggles a *lot* with larger sample sizes. 20-50 seems like reasonable range.
# Noisy projection doesn't seem critical...
# From a few runs, it looks like the empirical mean estimates are better
# when I'm using more samples and smaller subsample size. But I suspect
# that's not really very true, as it generally varies significantly run-to-run
# for ~500 sample territory.
# But when the mean estimate is bad, the variance estimate is really high --
# so the empirical distribution over sample locations still looks great. Beyond
# the power of having projection in the loop...
# I'll need to try this on a "harder" distribution...
# Update: looks like I broke this with recent changes, with include
# switching to a multivariate normal prior for the box center prior., and switching
# on the increased logprob penalty for samples out in infeasible space... oops...
# I'll stick with SVI...
mcmc_run = MCMC(hmc_kernel, num_samples=900, warmup_steps=100).run(data, subsample_size=10, use_projection=True, noisy_projection=False
)
# do some analysis of that when it comes out
posterior_box_mean = EmpiricalMarginal(mcmc_run, 'box_mean')
print "Box mean, empirical: ", posterior_box_mean.mean, " +/- ", posterior_box_mean.variance
posterior_box_var = EmpiricalMarginal(mcmc_run, 'box_var')
print "Box var, empirical: ", posterior_box_var.mean, " +/- ", posterior_box_var.variance
print "I don't trust those variance estimates..."
def plot_empirical_marginal(ax, empirical_marginal, subind_x, subind_y):
datapoints = empirical_marginal._samples.detach().numpy()
plt.scatter(datapoints[:, :, subind_x].flatten(), datapoints[:, :, subind_y].flatten())
plt.show()
plt.figure()
plt.subplot(2, 1, 1)
plot_empirical_marginal(plt.gca(), EmpiricalMarginal(mcmc_run, 'scene_0_pose'), 0, 1)
plt.title("Scene 0 pose samples")
plt.subplot(2, 1, 2)
plt.title("Scene 1 pose samples")
plot_empirical_marginal(plt.gca(), EmpiricalMarginal(mcmc_run, 'scene_1_pose'), 0, 1)
plt.tight_layout();
```
| github_jupyter |
# Undertale & Deltarune Soundtrack Generator
---
## Table of Contents
0. [**Table of Contents**](#Table-of-Contents)
1. [**Imports**](#Imports)
2. [**Data Processing**](#Data-Processing)
2.1 [Data Loading](#Data-Loading)
2.2 [Data Preprocessing](#Data-Preprocessing)
2.3 [Dataset & Dataloader Definition](#Dataset-&-Dataloader-Definition)
3. [**Model Definition**](#Model-Definition)
4. [**Hyperparameters & Instantiation**](#Hyperparameters-&-Instantiation)
5. [**Training**](#Training)
---
## Imports
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
Import required packages
```
import os # File handling
import itertools # chain() for merging lists
import random # Shuffling
import collections # Useful tools like Counter, OrderedDict
import math # For... math
from decimal import Decimal # Scientific notations in string formatting
from time import time # For use in progress bar
import tqdm.auto as tqdm # Progress bar
from IPython.display import clear_output
import torch # Deep Learning Framework
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt # Plotting training progress
from matplotlib.ticker import AutoLocator
%matplotlib inline
fig_bg_color = "lightsteelblue"
plot_bg_color = "slategray"
fontsize = 20
```
---
## Data Processing
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
### Data Loading
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
Read the text files in the target directory.
Do some processing to make sure the texts are clean.
```
def get_texts(texts_dir):
if not os.path.isdir(texts_dir):
raise FileNotFoundError("given text directory not found: {}".format(texts_dir))
texts = []
for text_path in (file.path for file in os.scandir(texts_dir) if file.is_file() and file.name.endswith(".txt")):
with open(file=text_path, mode='r', encoding="utf-8") as text_file:
text = text_file.read().strip()
if not text.replace(' ', '').isdigit():
raise RuntimeError("one or more characters other than digits and white spaces are detected: {}".format(text_path))
while " " in text:
text = text.replace(" ", ' ')
texts.append((text_path, text))
return dict(texts)
[(os.path.split(text_path)[1], text[:20]) for text_path, text in get_texts("./source/converted_texts").items()]
```
### Data Preprocessing
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
Get integers out of the text and make lists of ints.
These lists can be used for the input of the models, or be further processed to compress or simplify the sequences.
In this notebook, I'll leave the data as it is and do note-by-note. (Similar to Character-By-Character approach)
```
def texts_to_intlists(text_list):
intlists = []
for i, text in enumerate(iterable=text_list):
int_strings = text.split(' ')
if not all(int_str.isdigit() for int_str in int_strings):
raise RuntimeError("non-digit string detected in text {}".format(i))
ints = [int(int_str) for int_str in int_strings]
intlists.append(ints)
return intlists
print([ints[:10] for ints in texts_to_intlists(get_texts("./source/converted_texts").values())])
```
### Dataset & Dataloader Definition
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
Create a Dataset class from which training data can be sampled.
This Dataset should convert the encoded sequence above into tensors
and have a method for shuffling the order of multiple sequences while
leaving the patterns inside of each sequence untouched.
```
class UndertaleDeltaruneDataset(Dataset):
def __init__(self, texts_dir, batch_size=1):
self.texts = get_texts(texts_dir) # read and get a dictionary of {file_paths: text_contents}
self.sequences = texts_to_intlists(self.texts.values())
self.texts_dir = texts_dir
self.batch_size = batch_size
def __len__(self):
return self.batch_size
def data_len(self):
return sum([len(sequence) for sequence in self.sequences])
def __getitem__(self, index):
shuffled_list = list(itertools.chain(*random.sample(self.sequences, len(self.sequences))))
inputs = torch.LongTensor(shuffled_list[:-1])
labels = torch.LongTensor(shuffled_list[1:])
return inputs, labels
```
Create a custom class that loads the data from the dataset above and
allows iteration over the dataset, yielding a small sequence batch at a time.
```
class UDBatchLoader:
def __init__(self, ud_dataset, batch_size, sequence_len, drop_last=False, batch_first=True):
self.ud_dataset = ud_dataset
self.batch_size = batch_size
self.sequence_len = sequence_len
self.drop_last = drop_last
self.batch_first = batch_first
def __len__(self):
if self.drop_last:
return math.floor((self.ud_dataset.data_len() - 1) / self.sequence_len)
return math.ceil((self.ud_dataset.data_len() - 1) / self.sequence_len)
def generator(self):
seq_len = self.sequence_len
n_seq_batches = self.__len__()
batch_first = self.batch_first
input_batch, target_batch = next(iter(DataLoader(self.ud_dataset, self.batch_size)))
if not batch_first:
input_batch = input_batch.transpose(0, 1).contiguous()
target_batch = target_batch.transpose(0, 1).contiguous()
for start, end in zip(range(0, seq_len * n_seq_batches, seq_len), range(seq_len, (seq_len + 1) * n_seq_batches, seq_len)):
if batch_first:
yield (input_batch[:, start:end].contiguous(), target_batch[:, start:end].contiguous())
else:
yield (input_batch[start:end], target_batch[start:end])
def __iter__(self):
return self.generator()
```
---
## Model Definition
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
Define the model architectures.
```
class UDNet(nn.Module):
def __init__(self, hidden_size, num_layers, dropout):
super(UDNet, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.init_hiddens = nn.Parameter(torch.randn(num_layers, 1, hidden_size))
self.init_cells = nn.Parameter(torch.randn(num_layers, 1, hidden_size))
self.embed = nn.Embedding(num_embeddings=129, embedding_dim=hidden_size)
self.lstm = nn.LSTM(input_size=hidden_size, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout, batch_first=True)
self.fc0 = nn.Sequential(
nn.LayerNorm(hidden_size),
nn.Dropout(p=dropout),
nn.Linear(in_features=hidden_size, out_features=256)
)
self.fc1 = nn.Sequential(
nn.ReLU(),
nn.LayerNorm(256),
nn.Dropout(p=dropout),
nn.Linear(in_features=256, out_features=512)
)
self.fc2 = nn.Sequential(
nn.ReLU(),
nn.LayerNorm(512),
nn.Dropout(p=dropout),
nn.Linear(in_features=512, out_features=129)
)
def forward(self, x, hiddens=None):
if hiddens is None:
hiddens = self.get_init_hiddens(x.size(0))
x = self.embed(x)
x, new_hiddens = self.lstm(x, hiddens)
x = self.fc0(x)
x = self.fc1(x)
x = self.fc2(x)
return x, new_hiddens
def get_init_hiddens(self, n_batches):
return [self.init_hiddens.repeat(1, n_batches, 1), self.init_cells.repeat(1, n_batches, 1)]
```
---
## Training
[(go to top)](#Undertale-&-Deltarune-Soundtrack-Generator)
```
seed = 0
batch_size = 4
sequence_length = 12800
lr = 1e-3
factor = 0.5
patience = 5
n_logs = 30
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
random.seed(seed)
torch.manual_seed(seed)
ud_dataset = UndertaleDeltaruneDataset("./source/converted_texts", batch_size)
ud_loader = UDBatchLoader(ud_dataset, batch_size, sequence_length, drop_last=True, batch_first=True)
model = UDNet(hidden_size=256, num_layers=10, dropout=0.2).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=factor, patience=patience, verbose=True)
print()
print('Data Sequence Total Length:', ud_dataset.data_len())
print()
print(model)
model.train()
logs = {'epoch': [], 'lr':[], 'loss_avg': [], 'acc_1': [], 'acc_5': []}
i_epoch = 0
while True:
hiddens = model.get_init_hiddens(batch_size)
running_loss = 0
n_top1_corrects = 0
n_top5_corrects = 0
n_instances = 0
for i, (inputs, labels) in enumerate(ud_loader):
print("{:d}/{:d}".format(i, len(ud_loader)-1), end='\r')
inputs = inputs.to(device)
labels = labels.view(-1).to(device)
outputs, hidden_states = model(inputs, hiddens)
outputs = outputs.view(-1, outputs.size(-1))
hiddens = [hiddens[0].detach(), hiddens[1].detach()]
loss = F.cross_entropy(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
n_instances += labels.size(0)
running_loss += loss.item() * labels.size(0)
top5_match = outputs.data.topk(k=5, dim=1)[1].eq(labels.unsqueeze(1))
n_top1_corrects += top5_match[:, 0].sum().item()
n_top5_corrects += top5_match.sum().item()
del top5_match
loss_avg = running_loss / n_instances
acc_1 = n_top1_corrects / n_instances
acc_5 = n_top5_corrects / n_instances
logs['epoch'].append(i_epoch)
logs['lr'].append(optimizer.param_groups[0]['lr'])
logs['loss_avg'].append(loss_avg)
logs['acc_1'].append(acc_1)
logs['acc_5'].append(acc_5)
clear_output(wait=True)
print('\n\n'.join(["Epoch {:d} - LR={:e}\n===============================================\n".format(i_e, lr)
+ "Average Loss: {:f}\nAverage Top-1 Accuracy: {:f}\nAverage Top-5 Accuracy: {:f}\n".format(l_a, a_1, a_5)
+ "==============================================="
for i_e, lr, l_a, a_1, a_5 in list(zip(*list(logs.values())))[-n_logs:]]), end='\n\n')
if (i_epoch + 1) % 10 == 0:
torch.save({'logs': logs, 'state_dict': model.state_dict(), 'optim_dict': optimizer.state_dict(), 'lr_dict': lr_scheduler.state_dict()},
"deep/{:d}.pth".format(i_epoch))
lr_scheduler.step(loss_avg)
i_epoch += 1
```
---
| github_jupyter |
For each of the following distributions:
1. --Bernoulli--
2. -Binomial-
3. -Poisson-
4. Gaussian
5. Uniform
6. Beta
A) Read up on what the formula for the probability distribution is and what sorts of problems it is used for
B) use Python, matplotlib and the scipy.stats to plot at least 2 unique parameters(or sets of parameters) for each distribution
C) Wrte a blog post summarizing A and B above for at least 3 of the distributions, post online, and provide a link to the comments by 6pm today.
You may use the entire internet to figure out what these distributions do.
**Note**: Here are examples of a sample short blog post here, just for reference:
- (http://muthu.co/poisson-distribution-with-python/)
- (http://hamelg.blogspot.com/2015/11/python-for-data-analysis-part-22.html).
You do not have to follow these models if you don't want to.
Also, you don't have to write the formula, you can just provide your simple graphing code and a reference link to the formula (for example, from wikipedia)
#### from team
Sean: other resources for probability distributions:
- (http://stattrek.com/probability-distributions/probability-distribution.aspx)
- (https://www.khanacademy.org/math/ap-statistics/random-variables-ap/binomial-random-variable/v/binomial-variables)
- (https://www.intmath.com/counting-probability/13-poisson-probability-distribution.php)
- Lingbin Jin Feb 24th at 12:37 PM
http://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/
- Sean Reed Feb 24th at 1:25 PM
- http://stattrek.com/probability-distributions/probability-distribution.aspx
- Sean Reed Feb 24th at 2:01 PM Using scipy.stats module in Python: https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html
- Carolyn Chomik Feb 24th at 4:04 PM
https://medium.com/@carolynjjankowski/3-statistical-distributions-and-examples-of-how-they-are-used-e1c7cbf2134b
- Will Hall Yesterday at 4:14 PM
https://medium.com/@wcgopher/probability-distributions-7ac506dc2115
```
import numpy as np
from scipy.stats import bernoulli, binom, poisson, norm, uniform, beta
import matplotlib.pyplot as plt
def print_mvsk(*args):
t = args[0]
mean, var, skew, kurt, = float(t[0]),float(t[1]),float(t[2]),float(t[3])
sd = np.sqrt(var)
print(f'mean:{mean:.4f}\tvar:{var:.4f}\tskew:{skew:.4f}\nsd:{sd:.4f}\tkurt:{kurt:.4f}')
```
# Bernulli
Probability distribution witch takes value from 0 to 1
```
fig, ax = plt.subplots(1, 1)
p = 1/6
x = [0,1]
print_mvsk(bernoulli.stats(p, moments='mvsk'))
data = bernoulli.pmf(x, p)
print(x, data)
ax.vlines(x, 0, data, colors='y', lw=20)
###
plt.ylabel('Probability of winning in dice tos')
plt.xlabel('0 mean prob to lose \n 1 - chances to win')
plt.title('Bernulli Probability Distribution')
plt.grid(True)
plt.show()
p = 1/2
x = [0,1]
fig, ax = plt.subplots(1, 1)
data = bernoulli.pmf(x, p)
ax.vlines(x, 0, data, colors='y', lw=20)
plt.ylabel('Probability of winning in coin tos')
plt.xlabel('0 mean prob to lose \n 1 - chances to win')
plt.title('Bernulli Probability Distribution')
plt.grid(False)
plt.show()
```
### Binomial
A binomial experiment is one that possesses the following properties:
- The events that happens with “Success” or “Failure” results during the Bernoulli trial (испытание).
- The experiment consists of \mathbit{n} repeated trials.
- The probability of a success, denoted by \mathbit{p}, remain constant from trial to trial and repeated trial are independent.
Binomial random variable – X in n trials of binomial experiment
The probability distribution of the random variable X is called a binomial distribution, and is given by the formula:
```
fig, ax = plt.subplots(1, 1)
n = 100
p = 1/3
print_mvsk(binom.stats(n, p, moments='mvsk'))
start = binom.ppf(0.000001, n, p)
end = binom.ppf(1, n, p)
x = np.arange(start, end, step=1)
pmf_a = binom.pmf(x, n, p)
print(f'start:{start}\tend:{end}')
ax.plot(x, pmf_a, 'b-', ms=3, label='binom pmf')
plt.ylabel('Prob of Win unfair coin in coin tos')
plt.xlabel('0 mean prob to lose \n 1 - chances to win')
plt.title('Binomial Probability Distribution')
prob20 = binom.pmf([20], n, p)
ax.plot(20, prob20, 'ro', ms=7, label='binom pmf')
mean =int(binom.stats(n, p, moments='m'))
mean_y = binom.pmf([mean], n, p)
ax.vlines(mean, 0, mean_y, colors='y', lw=2)
plt.show()
plt.ylabel('Probability of car passing')
plt.xlabel('Number of cars')
plt.title('Probability Distribution Curve')
arr = []
rv = poisson(25)
for num in range(0,40):
arr.append(rv.pmf(num))
#print(rv.pmf(28))
prob = rv.pmf(28)
plt.grid(True)
plt.plot(arr, linewidth=2.0)
plt.plot([28], [prob], marker='o', markersize=6, color="red")
plt.show()
```
### Poisson
Suppose we are counting the number of occurrences of an event in a given unit of time, distance, area or volume.
For example:
- The number of car accidents in a day.
- The number of dandelions in a square meter plot pf land.
Suppose:
- Events are occurring independently
- The probability that an event occurs in a given length of time does not change through time. Events are occurring randomly and independently.
Then X, the number of events in a fixed unit of time, has a Poisson Distribution.
```
fig, ax = plt.subplots(1, 1)
mu = 4.6
print_mvsk(poisson.stats(mu, moments='mvsk'))
poisson.ppf(0.01, mu),
x = np.arange(poisson.ppf(0.00001, mu),
poisson.ppf(0.99999, mu))
data = poisson.pmf(x, mu)
data2 = [0]*len(data)
data2[3]= poisson.pmf(3, mu)
ax.vlines(x, 0, data, colors='r', lw=18, alpha=1)
ax.vlines(x, 0, data2, colors='b', lw=18, alpha=1)
ax.vlines(x, 0, data2, colors='b', lw=18, alpha=1)
plt.ylabel('Probability')
plt.xlabel('Number of Decays')
plt.title('Plutonium-239 prob of having 3 decays ')
plt.show()
```
## Normal / Gaussian
continues distribution
"The beta distribution can be understood as representing a probability distribution of probabilities"
Very popular distribution that is used to analyze random variables.
The random independent variables has Normal distribution
```
fig, ax = plt.subplots(1, 1)
print_mvsk(norm.stats(moments='mvsk'))
x = np.linspace(norm.ppf(0.00001),
norm.ppf(0.99999), 1000)
data = norm.pdf(x)
ax.plot(x, data, 'b-', ms=1)
ax.vlines(x, 0, data, colors='r', lw=1, alpha=1)
# 95 % of Normal Dist
x_sigma2 = np.linspace(norm.ppf(0.025),
norm.ppf(0.975), 1000)
sigma2 = norm.pdf(x_sigma2)
ax.vlines(x_sigma2,0, sigma2, color='b', lw=1, alpha=.5, label='asd')
p_sigma1 = norm.pdf(1)
x_sigma1 = np.linspace(norm.ppf(p_sigma1),
norm.ppf(1-p_sigma1), 1000)
sigma1 = norm.pdf(x_sigma1)
ax.vlines(x_sigma1,0, sigma1, color='g', lw=1, alpha=.5)
plt.ylabel('Prob')
plt.xlabel('Red 100%\nBlue 95%\nGreen=68.7')
plt.show()
```
## Uniform
# Beta distribution
Beta distribution describes the probability of probabilities
Continues, funtion
https://stats.stackexchange.com/questions/47916/bayesian-batting-average-prior/47921#47921
http://varianceexplained.org/statistics/beta_distribution_and_baseball/
```
fig, ax = plt.subplots(1, 1)
a, b = 81, 219
print_mvsk(beta.stats(a, b, moments='mvsk'))
x = np.linspace(beta.ppf(0, a, b),
beta.ppf(1, a, b), 100)
#print(x)
data = beta.pdf(x, a, b)
ax.plot(x, data,'r-', lw=2, alpha=.8, label='player @ begin')
rv = beta(a+100, b+100)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='Player @ end')
#Check accuracy of cdf and ppf:
vals = beta.ppf([0.1, 0.5, 0.999], a, b)
np.allclose([0.1, 0.5, 0.999], beta.cdf(vals, a, b))
#Generate random numbers:
#r = beta.rvs(a, b, size=1000)
#And compare the histogram:
#ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
```
| github_jupyter |
# 텐서플로 기초
텐서플로 패키지를 임포트하여 아무런 에러가 나타나지 않으면 올바르게 설치된 것으로 보아도 됩니다.
```
import tensorflow as tf
```
## 상수
텐서플로는 계산 그래프라고 부르는 자료 구조를 먼저 만들고, 그다음 이를 실행하여 실제 계산을 수행합니다. 따라서 그래프를 만드는 구성(construction) 단계에서는 아무런 값을 얻을 수 없습니다.
다음은 텐서플로의 기본 자료형인 상수(constant)를 하나 만듭니다. c를 출력하면 값 1 대신 텐서(Tensor) 타입의 객체를 출력합니다.
```
c = tf.constant(1)
c
```
보통 딥러닝에서 __'텐서'__ 라고 하면 다차원 배열을 의미합니다. 이는 NumPy의 ndarray()와 거의 동급으로 생각할 수 있습니다. 하지만 사실 tf.Tensor는 다차원 배열이 아니고 그런 배열을 가리키는 포인터에 가깝습니다. 좀 더 기술적으로 설명하면 상수 c의 연산 노드('Const')의 첫 번째 출력(':0')을 가리킨다고 말할 수 있습니다.
두 개의 상수를 더하는 식을 만들어 보겠습니다.
```
d = tf.constant(2)
e = c + d
e
```
텐서 e는 덧셈 연산의 첫 번째 출력 'add:0'을 가리키고 있습니다. 그런데 아직 값을 가지고 있지 않으므로 e를 단순한 다차원 배열이라고 말하기가 어렵습니다. 반면 c + d의 계산 결과를 얻기 위해서는 e를 사용하는 것이 맞으므로 흔히 텐서를 다차원 배열로 부릅니다. 보통 텐서, 연산, 연산 노드 등으로 다양하게 혼용하여 부릅니다.
그럼 이제 실제로 만들어진 그래프를 실행해 보겠습니다. 텐서플로 연산을 실행하기 위해서는 세션(Session) 객체를 만들어 run() 메소드를 호출해야 합니다.
```
sess = tf.Session()
sess.run(e)
```
기대했던 3이 출력되었습니다. 연산 e를 계산하기 위해 실행(혹은 평가)하면 이에 의존하는 c와 d도 계산되어 (그래프 구조의 잇점) 덧셈이 일어납니다. 하지만 c와 d는 여전히 그대로 텐서일 뿐 실제 값을 출력하지 않습니다.
```
c, d
```
e와 마찬가지로 c, d의 값을 얻으려면 sess.run() 메소드를 사용해야 합니다. 앞에서는 sess.run() 메소드에 하나의 연산을 넣었지만 여러개의 연산을 한꺼번에 넣을 수도 있습니다. 이 때에는 파이썬 리스트로 만들어 전달합니다. 반환되는 값도 역시 리스트입니다.
```
sess.run([c, d])
```
이렇게 텐서플로는 계산 그래프의 구성(construction)와 실행(execution)이 나뉘어져 있는 것이 특징입니다. 앞서 만든 c, d, e 노드는 기본 계산 그래프에 추가됩니다. 기본 계산 그래프는 텐서플로 패키지를 임포트하면서 바로 사용할 수 있는 전역 그래프입니다.
```
g = tf.get_default_graph()
g
```
get_operations() 메서드는 그래프에 있는 모든 연산을 반환합니다. 앞서 추가했던 세 개의 연산 노드가 있는 것을 확인할 수 있습니다.
```
g.get_operations()
```
하지만 기본 그래프에 연습용 노드를 계속 추가하는 것은 복잡하고 버그를 발생시키기 쉽습니다. 따라서 기본 그래프 대신에 명시적으로 그래프를 만들어 예제를 진행하도록 하겠습니다. 다음과 같이 tf.Graph 객체를 만들고 파이썬의 with 컨텍스트로 원하는 그래프를 디폴트로 지정합니다.
```
g = tf.Graph()
with g.as_default() as g:
c = tf.constant(1)
c
```
조금 더 간단히 하려면 그래프를 생성하는 명령을 with 컨텍스트 안으로 집어 넣을 수 있습니다. 이전과 달리 새로운 그래프에 상수 연산이 추가되었으므로 텐서의 이름이 'Const_1'이 되지 않고 계속 'Const'로 유지되는 것을 볼 수 있습니다.
```
with tf.Graph().as_default() as g:
c = tf.constant(3)
c
```
세션은 기본 그래프의 연산을 실행하므로 앞서 만든 그래프를 명시적으로 지정해 줄 필요가 있습니다. 앞서와 마찬가지로 매번 세션 객체를 만들어 사용하기 보다는 with 컨텍스트 안에서 사용하는 것이 좋습니다.
```
with tf.Session(graph=g) as sess:
print(sess.run(c))
```
일반 프로그래밍 언어에서의 의미와 같이 상수는 텐서플로가 계산 그래프를 실행하는 도중에 값이 변하지 않습니다.
상수는 하나의 스칼라 값 뿐만 아니라 다차원 배열도 가능하며 여러가지 데이터 타입(tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64)을 지정할 수 있습니다. 아무 값도 지정하지 않으면 넘겨진 값에 따라 tf.int32, tf.float32가 자동으로 할당됩니다. 만약 데이터 형을 명시적으로 지정하고 싶을 땐 dtype 매개변수를 사용합니다.
```
with tf.Graph().as_default() as g:
c = tf.constant(1.0)
print(c)
with tf.Graph().as_default() as g:
c = tf.constant(1.0, dtype=tf.float64)
print(c)
```
상수 배열에 정수와 실수가 섞여 있을 경우 자동으로 실수로 통일됩니다.
```
with tf.Graph().as_default() as g:
c = tf.constant([[1., 2., 3.,], [4, 5, 6]])
with tf.Session(graph=g) as sess:
print(sess.run(c))
c
```
상수의 크기를 보면 2x3 행렬임을 알 수 있습니다. Scikit-Learn을 비롯하여 많은 라이브러리들이 행(row)에 샘플을 열(column)에 특성을 놓습니다. 하지만 그 반대의 경우도 간혹 있습니다. 여기에서는 일반적인 관례를 따라 행(row)을 따라 샘플을 나열하겠습니다.
텐서플로에서는 이를 2차원 텐서, 2x3 크기의 텐서 등으로 부릅니다.
사실 sess.run() 메서드에서 전달되는 반환값은 파이썬의 NumPy 패키지의 배열 객체입니다. NumPy는 파이썬의 대표적인 다차원 배열을 위한 패키지로 파이썬 과학 패키지들의 데이터 타입의 표준이 되어가고 있습니다. 그래서 Scikit-Learn과 TensorFlow에서도 기본 데이터 타입으로 NumPy를 사용합니다.
```
with tf.Session(graph=g) as sess:
out = sess.run(c)
type(out)
```
## 변수
상수는 계산 그래프를 실행하는 동안 값이 변하지 않으므로 뭔가 쓸모있는 것을 저장하는 용도로는 사용할 수 없습니다. 다른 범용 프로그래밍 언어와 마찬가지로 텐서플로에도 값을 저장하고 변경시킬 수 있는 변수가 있습니다.
tf.Variable에 전달한 첫 번째 값은 변수의 초기값입니다. 상수와 마찬가지로 dtype 매개변수를 사용하여 초기값의 자료형을 명시적으로 지정할 수 있습니다.
```
with tf.Graph().as_default() as g:
v = tf.Variable([1., 2., 3.])
print(v)
with tf.Graph().as_default() as g:
v = tf.Variable([1., 2., 3.], dtype=tf.float64)
print(v)
```
상수와는 달리 변수를 세션 객체로 바로 실행하면 에러가 발생합니다.
```
with tf.Session(graph=g) as sess:
sess.run(v)
```
상수는 값이 고정되어 있어서 상수 연산('Const')에 직접 값이 포함되어 있습니다. 하지만 변수는 값이 변경되어야 하므로 연산과 실제 값이 나뉘어져 있습니다. 이 그래프에서 만들어진 연산 노드를 모두 출력해 보겠습니다.
```
g.get_operations()
```
변수 하나를 만들었는데 4개의 연산이 생겼습니다. 'Variable/initial_value'가 초기값을 가지고 있으며 'Varaible/Assign' 노드에 의해 'Variable'의 값이 할당 되고 'Variable/read' 연산으로 바뀐 값을 읽을 수 있습니다. 그럼 초기 값을 할당해 보겠습니다.
```
with tf.Session(graph=g) as sess:
sess.run('Variable/Assign') # 혹은 sess.run(v.initializer)
print(sess.run(v))
```
기대한 대로 값이 출력되었습니다. 하지만 변수가 많으면 이런 방식은 불편합니다. 대신 모든 변수의 초기와 연산을 한번에 모아서 실행시켜 주는 함수를 사용합니다.
```
with tf.Session(graph=g) as sess:
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(v))
```
global_variables_initializer()는 변수의 초기와 연산을 모아 주는 것외엔 특별한 역할이 없으므로 보통 다음과 같이 줄여서 많이 사용합니다.
```
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(v))
```
고수준의 텐서플로 함수에서는 변수를 자동으로 만들어 주지만 저수준의 함수를 사용할 경우에는 변수를 직접 만들어 줍니다. 어떤 경우에도 전체 변수를 초기화하는 부분은 동일합니다.
변수와 관련된 흔한 실수 중에 하나가 다음과 같이 같은 변수에 덧셈 연산을 적용하는 것입니다.
```
with tf.Graph().as_default() as g:
v = tf.Variable([1, 2, 3])
print(v)
v = v + 1
print(v)
```
v는 tf.Variable의 객체였지만 덧셈 연산에 할당하는 순간 연산의 출력을 가리키는 텐서가 되었습니다. 그래서 v를 여러번 실행해도 덧셈 연산은 그대로 1을 출력합니다. 즉 덧셈의 결과가 원래 변수 v에 할당되지 않습니다.
```
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(v))
print(sess.run(v))
```
이런 문제를 피하려면 텐서플로에서 제공하는 할당 연산자를 사용해야 합니다.
```
with tf.Graph().as_default() as g:
v = tf.Variable([1, 2, 3])
add = v + 1
asn = tf.assign(v, add)
v, add, asn
```
v가 변경되지 않고 여전히 변수를 가리키고 있습니다. 또한 v + 1은 덧셈 연산의 출력을 가리키는 텐서가 되었고 tf.assign()은 할당 연산의 출력을 가리키는 텐서가 되었습니다. 만약 asn을 실행시키면 의존성을 따라 자동으로 add, v+1이 계산될 것입니다.
```
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(v))
print(sess.run(asn))
print(sess.run(asn))
```
결과에서 눈치챌 수 있듯이 1을 더했는데 배열 전체에 더해졌습니다. 이를 브로드캐스팅이라고 하며 파이썬의 과학 패키지에서는 일반적으로 기대되는 기능입니다.
보통은 덧셈 연산 add가 간단하므로 따로 만들지 않고 assign 함수에 직접 덧셈 식을 주입합니다. 하지만 덧셈 연산이 만들어지고 asn 계산에 사용되는 방식은 동일합니다.
```
with tf.Graph().as_default() as g:
v = tf.Variable([1, 2, 3])
asn = tf.assign(v, v + 1)
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(asn))
print(sess.run(asn))
```
## 행렬
신경망의 기본 연산은 행렬 연산입니다. 다음으로 넘어가기 전에 잠시 행렬 연산에 대해 알아 보도록 하겠습니다.
먼저 행렬에 표현할 때 다음과 같이 두 개의 행과 3개의 열이 있을 때 이를 [2x3] 행렬이라고 표현합니다.
$$\left\lgroup\matrix{ 1 & 2 & 3 \\ 4 & 5 & 6 }\right\rgroup$$
이전과 같이 1을 더하면 텐서플로의 브로드캐스팅 덕에 모든 원소에 1이 더해집니다.
$$\left\lgroup\matrix{ 1 & 2 & 3 \\ 4 & 5 & 6 }\right\rgroup + 1 = \left\lgroup\matrix{ 2 & 3 & 4 \\ 5 & 6 & 7 }\right\rgroup$$
이는 마치 모든 원소가 1인 [2x3] 행렬을 더하는 것과 같지만 훨씬 빠르게 작동됩니다.
```
with tf.Graph().as_default() as g:
v = tf.Variable([[1, 2, 3], [4, 5, 6]])
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(v + 1))
v
```
신경망에서 가장 핵심이 되는 행렬 연산은 점곱(dot product)입니다. 아래와 같이 첫 번째 행렬의 행이 두 번째 행렬의 열과 원소별로 곱해져서 덧셈이 되어 결과 행렬의 하나의 원소가 됩니다. 점곱 연산은 첫 번째 행렬의 위에서 아래로, 두 번째 행렬의 왼쪽에서 오른쪽으로 진행됩니다.
$$\left\lgroup\matrix{ 1 & 2 & 3 \\ 4 & 5 & 6 }\right\rgroup + \left\lgroup\matrix{ 1 & 2 \\ 3 & 4 \\ 5 & 6 }\right\rgroup = \left\lgroup\matrix{ 22 & 28 \\ 49 & 64 }\right\rgroup$$
첫 번째 행렬의 행과 두 번째 행렬의 열이 곱해지다 보니 첫 번째 행렬의 열과 두 번째 행렬의 행의 크기가 같아야 합니다. 그리고 첫 번째 행렬의 행과 두 번째 행렬의 열이 결과 행렬의 행과 열의크기가 됩니다.
$$ [2\times3]\cdot[3\times2] = [2\times2]$$
```
with tf.Graph().as_default() as g:
v1 = tf.Variable([[1, 2, 3], [4, 5, 6]])
v2 = tf.Variable([[1, 2], [3, 4], [5, 6]])
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
m = tf.matmul(v1, v2)
print(sess.run(m))
```
행렬의 순서를 바꾸어서 점곱하면 $[3\times2]\cdot[2\times3]=[3\times3]$의 결과를 얻게 됩니다.
```
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(tf.matmul(v2, v1)))
```
## 플레이스홀더
신경망은 데이터로부터 의미있는 패턴을 학습하는 모델 기반 머신러닝 알고리즘입니다. 즉 모델에 있는 어떤 파라미터의 값을 데이터로부터 얻어내어 저장해야 합니다. 이런 모델의 파라미터를 모델 파라미터라고 부르며 모델 파라미터를 얻어 내는 과정을 학습이라고 합니다. 상수와 달리 변수는 값을 바꿀 수 있기 때문에 모델 파라미터를 저장하는데 유용합니다.
훈련 데이터는 근본적으로 변하는 것이 아니기 때문에 상수나 변수로 저장하여 계산 그래프를 만들 수도 있습니다. 그러나 이렇게 되면 그래프의 크기(용량)가 매우 커져서 CPU, GPU에서 계산을 수행할 때 비효율적이며 어떤 경우에는 그래프로 만들기에는 훈련 데이터가 너무 큽니다. 신경망은 대체적으로 대용량의 데이터를 다루기 때문에 이런 경우가 대부분입니다.
그래서 계산 그래프를 만들고 난 후에 외부에서 데이터를 따로 전달해 주어야 하는 통로가 필요하게 됩니다. 특히 훈련 데이터를 조금씩 나누어 신경망 모델을 훈련시킬 때는 더욱 그렇습니다. 이렇게 데이터의 구조를 만들고 실행시에 데이터를 받기 위한 것이 플레이스홀더(placeholder)입니다. 간단한 플레이스홀더를 하나 만들어 보겠습니다.
```
with tf.Graph().as_default() as g:
h = tf.placeholder(tf.int32, shape=(2, 3))
h
```
플레이스홀더의 첫 번째 매개변수는 데이터형이고 두 번째 매개변수는 자료의 크기입니다. 여기에서는 [2, 3]로 2차원 행렬(또는 텐서)를 지정했습니다. 즉 [2x3] 크기의 다차원 배열이 저장될 곳이라는 의미입니다. 두 번째 매개변수의 키워드 shape은 생략해도 되지만 가능하면 명시적으로 써 주는 것이 코드를 읽을 때 좋습니다.
h도 텐서이고 연산은 Placeholder입니다.
```
h.op, h.op.name
```
플레이스홀더 h를 세션을 만들어 실행해 보겠습니다. 변수가 아니므로 별도로 초기화를 해 주어야할 필요는 없습니다.
```
with tf.Session(graph=g) as sess:
print(sess.run(h))
```
플레이스홀더는 값을 담는 그릇으로 볼 수 있기 때문에 실행해서 얻을 수 있는 텐서가 없습니다. 플레이스홀더를 실행하려면 반드시 담을 값을 함께 주입해 주어야 합니다. 이 때 사용되는 매개변수가 sess.run() 메서드의 feed_dict 입니다. feed_dict 는 파이썬의 딕셔너리 형태로 플레이스홀더의 변수가 키가 되고 주입할 데이터를 값으로 지정합니다. 값으로 넣을 수 있는 것은 파이썬 데이터형이나 텐서, 넘파이 배열 등입니다.
```
with tf.Session(graph=g) as sess:
print(sess.run(h, feed_dict={h: [ [1, 2, 3], [4, 5, 6] ]}))
```
플레이스홀더는 미리 정해진 크기와 다른 크기의 값이 전해지면 에러를 냅니다. $[3\times1]$ 크기의 배열을 넣어 보겠습니다.
```
with tf.Session(graph=g) as sess:
print(sess.run(h, feed_dict={h: [ [1, 2, 3] ]}))
```
대신 플레이스홀더는 실행할 때마다 매번 다른 값을 넣을 수 있습니다. 그렇기 때문에 신경망 알고리즘을 훈련시킬 때 훈련 데이터를 주입할 수 있는 주요 통로가 됩니다.
```
with tf.Session(graph=g) as sess:
h_out = sess.run(h, feed_dict={h: [ [1, 2, 3], [4, 5, 6] ]})
print(h_out)
h_out = sess.run(h, feed_dict={h: [ [7, 8, 9], [10, 11, 12] ]})
print(h_out)
```
한가지 재미있는 기능은 플레이스홀더에 크기를 None으로 지정하여 임의의 크기의 플레이스홀더를 만들 수 있다는 점입니다. 이런 기능이 유용한 때는 훈련 데이터의 개수가 정확히 결정되지 않았을 때 유리합니다. 앞에서 만든 플레이스홀더 h에 첫 번째 차원을 None으로 만들어 보겠습니다.
```
with tf.Graph().as_default() as g:
h = tf.placeholder(tf.int32, shape=(None, 3))
with tf.Session(graph=g) as sess:
h_out = sess.run(h, feed_dict={h: [ [1, 2, 3], [4, 5, 6] ]})
print(h_out)
h_out = sess.run(h, feed_dict={h: [ [1, 2, 3] ]})
print(h_out)
```
앞의 예와는 달리 입력값을 $[2\times3]$와 $[1\times3]$로 첫 번째 차원을 달리하여 데이터를 주입했는 데에도 에러가 발생하지 않았습니다.
플레이스홀더가 사용되는 또 하나의 요긴한 사용처가 하이퍼파라미터입니다. 하이퍼파라미터는 모델 파라미터와는 달리 훈련 데이터로부터 학습되지 않고 우리가 지정해 주어야 하는 값입니다. 그런데 이 값이 훈련할 때와 테스트할 때 서로 값이 달라야 할 경우가 있습니다. 만약 변수나 상수로 이런 하이퍼파라미터를 지정했다면 그래프를 다시 만들어야 하기 때문에 낭패입니다. 플레이스홀더를 사용하면 훈련할 때와 테스트할 때 각기 다른 하이퍼파라미터를 주입할 수 있습니다.
플레이스홀더에는 기본값을 지정할 수 있는 tf.placeholder_with_default() 함수가 있습니다. 이 함수는 그래프에 하이퍼파라미터와 같이 상황에 따라 달라질 수 있는 값의 기본값을 지정하는 데 유용합니다. 이 함수의 첫 번째 매개변수는 데이터 타입이 아니고 기본값입니다.
```
with tf.Graph().as_default() as g:
h = tf.placeholder_with_default([[1, 2, 3]], shape=(None, 3))
with tf.Session(graph=g) as sess:
h_out = sess.run(h, feed_dict={h: [ [1, 2, 3], [4, 5, 6] ]})
print(h_out)
h_out = sess.run(h) # 값을 주입하지 않으므로 기본값이 들어갑니다.
print(h_out)
```
| github_jupyter |
```
import logging
import threading
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import seaborn as seabornInstance
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import base
from iotfunctions import bif
from iotfunctions import entity
from iotfunctions import metadata
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.enginelog import EngineLogging
from iotfunctions import estimator
from iotfunctions.ui import (UISingle, UIMultiItem, UIFunctionOutSingle,
UISingleItem, UIFunctionOutMulti, UIMulti, UIExpression,
UIText, UIStatusFlag, UIParameters)
from mmfunctions.anomaly import (SaliencybasedGeneralizedAnomalyScore, SpectralAnomalyScore,
FFTbasedGeneralizedAnomalyScore, KMeansAnomalyScore)
import datetime as dt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import scipy as sp
import scipy.fftpack
import skimage as ski
from skimage import util as skiutil # for nifty windowing
import pyod as pyod
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.example import visualize
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
%matplotlib inline
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
EngineLogging.configure_console_logging(logging.INFO)
# setting to make life easier
Temperature='Temperature'
kmeans='TemperatureKmeansScore'
fft='TemperatureFFTScore'
spectral='TemperatureSpectralScore'
sal='SaliencyAnomalyScore'
gen='TemperatureGeneralizedScore'
kmeansA='kmeansAnomaly'
kmeansB='kmeansAnomalyB'
spectralA='spectralAnomaly'
fftA='fftAnomaly'
salA='salAnomaly'
genA='genAnomaly'
kmeans_break=1.3
spectral_break = 20
fft_break = 180
sal_break = 100
gen_break = 30000
```
#### What will be shown
General approach is straightforward
* read raw data in
* transform it so that it is compatible to the Monitoring pipeline
* add yet another anomaly detector based on computer vision technology. The point here is to show how to run pipeline anomaly functions 'locally', an important concept for automated testing.
* simplify the dataframe - we have only one entity, no need for an entity index
* render input data and anomaly scores properly scaled
<br>
We start with Microsoft's anomaly test data found here
https://github.com/microsoft/anomalydetector/blob/master/samples/sample.csv
and then proceed to applying anomaly detection to real life pump data
<br>
#### Current inventory of anomaly detectors by type
This is the list of functions to apply
| Detector | ML Type | Type | How does it work |
| ------- | ------------ | ------- | ---------------- |
| KMeans | Unsupervised | Proximity | Clusters data points in centroid buckets, small buckets are outliers, score is distance to closest other bucket |
| Generalized | Unsupervised | Linear Model | Covariance matrix over data point vectors serves to measure multi-dimensional deviation |
| FFT | Unsupervised | Linear Model | Run FFT before applying Generalized |
| Spectral | Unsupervised | Linear Model | Compute signal energy to reduce dimensions |
| Saliency | Unsupervised | Linear Model | Apply saliency transform (from computer vision |
| SimpleAnomaly | **Supervised** | Ensemble | Run Gradient boosting on training data, anomaly if prediction deviates from actual data |
```
# Run on microsoft's anomaly data
# Get stuff in
df_i = pd.read_csv('./samples/AzureAnomalysample.csv', index_col=False, parse_dates=['timestamp'])
df_i['entity']='MyRoom'
df_i['Temperature']=df_i['value'] + 20
df_i = df_i.drop(columns=['value'])
# and sort it by timestamp
df_i = df_i.sort_values(by='timestamp')
df_i = df_i.set_index(['entity','timestamp']).dropna()
df_i.head(2)
# Now run the anomaly functions as if they were executed in a pipeline
spsi = SpectralAnomalyScore(Temperature, 12, spectral)
et = spsi._build_entity_type(columns = [Column(Temperature,Float())])
spsi._entity_type = et
df_i = spsi.execute(df=df_i)
sali = SaliencybasedGeneralizedAnomalyScore(Temperature, 12, sal)
et = sali._build_entity_type(columns = [Column(Temperature,Float())])
sali._entity_type = et
df_i = sali.execute(df=df_i)
ffti = FFTbasedGeneralizedAnomalyScore(Temperature, 12, fft)
et = ffti._build_entity_type(columns = [Column(Temperature,Float())])
ffti._entity_type = et
df_i = ffti.execute(df=df_i)
kmi = KMeansAnomalyScore(Temperature, 12, kmeans)
et = kmi._build_entity_type(columns = [Column(Temperature,Float())])
kmi._entity_type = et
df_i = kmi.execute(df=df_i)
df_i.describe()
# Simplify our pandas dataframe to prepare input for plotting
EngineLogging.configure_console_logging(logging.INFO)
df_input2 = df_i.loc[['MyRoom']]
df_input2.reset_index(level=[0], inplace=True)
df_input2[spectral].values[df_input2[spectral] > 0.001] = 0.001
df_input2[fft].values[df_input2[fft] < -1] = -1
df_input2[kmeansA] = df_input2[kmeans]
df_input2[kmeansA].values[df_input2[kmeansA] < kmeans_break] = np.nan
df_input2[kmeansA].values[df_input2[kmeansA] > kmeans_break] = kmeans_break
df_input2[kmeansB] = df_input2[kmeans]
df_input2[kmeansB].values[df_input2[kmeansB] >= kmeans_break] = 4
df_input2[kmeansB].values[df_input2[kmeansB] < kmeans_break] = 3
# Scale spectral and saliency
df_input2[spectral].values[df_input2[spectral] < -40] = -40
df_input2[sal].values[df_input2[sal] > 200] = 200
df_input2[fftA] = df_input2[fft]
df_input2[fftA].values[df_input2[fftA] < fft_break] = np.nan
df_input2[fftA].values[df_input2[fftA] > fft_break] = fft_break
df_input2[spectralA] = -df_input2[spectral]
df_input2[spectralA].values[df_input2[spectralA] < 20] = np.nan
df_input2[spectralA].values[df_input2[spectralA] > 20] = 20
df_input2[salA] = df_input2[sal]
df_input2[salA].values[df_input2[salA] < 100] = np.nan
df_input2[salA].values[df_input2[salA] > 100] = 100
#df_input2[genA] = df_input2[gen]
#df_input2[genA].values[df_input2[genA] < gen_break] = np.nan
#df_input2[genA].values[df_input2[genA] > gen_break] = gen_break
fig, ax = plt.subplots(5, 1, figsize=(16,24))
cnt = 0
ax[cnt].plot(df_input2.index, df_input2[Temperature]-20,linewidth=1,color='black',label=Temperature)
ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt].set_ylabel('Input Temperature - 20',fontsize=14,weight="bold")
cnt = 1
#ax[cnt].plot(df_input2.index, df_input2[Temperature]-20,linewidth=1,color='black',label=Input)
#ax[cnt].plot(df_input2.index, df_input2[kmeans], linewidth=2, color='magenta',label=kmeans)
#ax[cnt].plot(df_input2.index, df_input2[fft]/fft_break, linewidth=2,color='darkgreen',label=fft)
#ax[cnt].plot(df_input2.index, -df_input2[spectral]/40, linewidth=2,color='dodgerblue', label=spectral)
#ax[cnt].plot(df_input2.index, df_input2[sal]/100, linewidth=2,color='chartreuse',label=sal)
#ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
#ax[cnt].set_ylabel('ALL',fontsize=14,weight="bold")
cnt = 1
ax[cnt].plot(df_input2.index, df_input2[Temperature]-20,linewidth=1,color='black',label=Temperature)
ax[cnt].plot(df_input2.index, df_input2[kmeans], linewidth=2, color='magenta',label=kmeans)
ax[cnt].plot(df_input2.index, df_input2[kmeansA], linewidth=10, color='red') #,label=kmeansA)
ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt].set_ylabel('KMeans \n detects chanages in "steepness"',fontsize=14)
cnt = 2
ax[cnt].plot(df_input2.index, df_input2[Temperature]-20,linewidth=1,color='black',label=Temperature)
ax[cnt].plot(df_input2.index, df_input2[fft]/fft_break, linewidth=2,color='darkgreen',label=fft)
ax[cnt].plot(df_input2.index, df_input2[fftA]/fft_break, linewidth=10, color='red') #,label=kmeansA)
ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt].set_ylabel('FFT \n detects frequency changes', fontsize=14)
cnt = 3
ax[cnt].plot(df_input2.index, df_input2[Temperature]-20,linewidth=1,color='black',label=Temperature)
ax[cnt].plot(df_input2.index, -df_input2[spectral]/20, linewidth=2,color='dodgerblue', label=spectral)
ax[cnt].plot(df_input2.index, df_input2[spectralA]/20, linewidth=10, color='red') #,label=kmeansA)
ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt].set_ylabel('Spectral \n like FFT for less "CPU"\n less sensitive', fontsize=14)
cnt = 4
ax[cnt].plot(df_input2.index, df_input2[Temperature]-20,linewidth=1,color='black',label=Temperature)
ax[cnt].plot(df_input2.index, df_input2[sal]/100, linewidth=2,color='chartreuse', label=sal)
ax[cnt].plot(df_input2.index, df_input2[salA]/100, linewidth=10, color='red') #,label=kmeansA)
ax[cnt].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt].set_ylabel('Saliency \n like FFT, part of Azure\'s approach', fontsize=14)
```
#### Results
Clear **winners** are
* **KMeans** and
* **FFT**.
Spectral is way too sensitive while Saliency
doesn't detect the negative peak at 10/10 midnight
```
# Now we proceed to customer data
# Get stuff in
df_input = pd.read_csv('./TemperatureAnomalyScore.csv', index_col=False, parse_dates=['timestamp'])
df_input['entity']=df_input['deviceid']
# and sort it by timestamp
df_input = df_input.sort_values(by='timestamp')
df_input = df_input.set_index(['entity','timestamp']).dropna()
df_input.head(5)
salii = SaliencybasedGeneralizedAnomalyScore('Temperature', 12, 'SaliencyAnomalyScore')
et = salii._build_entity_type(columns = [Column('Temperature',Float())])
salii._entity_type = et
df_input = salii.execute(df=df_input)
spsii = SpectralAnomalyScore('Temperature', 12, 'TemperatureSpectralScore')
et = spsii._build_entity_type(columns = [Column('Temperature',Float())])
spsii._entity_type = et
df_input = spsii.execute(df=df_input)
df_input.describe()
EngineLogging.configure_console_logging(logging.INFO)
df_input2 = df_input.loc[['04714B60011A']]
df_input2.reset_index(level=[0], inplace=True)
df_input2[spectral].values[df_input2[spectral] > 0.001] = 0.001
df_input2[fft].values[df_input2[fft] < -1] = -1
df_input2[kmeansA] = df_input2[kmeans]
df_input2[kmeansA].values[df_input2[kmeansA] < kmeans_break] = np.nan
df_input2[kmeansA].values[df_input2[kmeansA] > kmeans_break] = kmeans_break
df_input2[kmeansB] = df_input2[kmeans]
df_input2[kmeansB].values[df_input2[kmeansB] >= kmeans_break] = 4
df_input2[kmeansB].values[df_input2[kmeansB] < kmeans_break] = 3
df_input2[fftA] = df_input2[fft]
df_input2[fftA].values[df_input2[fftA] < fft_break] = np.nan
df_input2[fftA].values[df_input2[fftA] > fft_break] = fft_break
df_input2[spectralA] = -df_input2[spectral]
df_input2[spectralA].values[df_input2[spectralA] < 20] = np.nan
df_input2[spectralA].values[df_input2[spectralA] > 20] = 20
df_input2[salA] = df_input2[sal]
df_input2[salA].values[df_input2[salA] < 100] = np.nan
df_input2[salA].values[df_input2[salA] > 100] = 100
df_input2[genA] = df_input2[gen]
df_input2[genA].values[df_input2[genA] < gen_break] = np.nan
df_input2[genA].values[df_input2[genA] > gen_break] = gen_break
fig, ax = plt.subplots(6, 1, figsize=(12,20))
ax[0].plot(df_input2.index, df_input2[Temperature]-50,linewidth=1,color='black',label=Temperature)
ax[0].plot(df_input2.index, df_input2[kmeans], linewidth=2, color='magenta',label=kmeans)
ax[0].plot(df_input2.index, df_input2[fft]/fft_break, linewidth=2,color='darkgreen',label=fft)
ax[0].plot(df_input2.index, -df_input2[spectral]/40, linewidth=2,color='dodgerblue', label=spectral)
ax[0].plot(df_input2.index, df_input2[sal]/100, linewidth=2,color='chartreuse',label=sal)
ax[0].plot(df_input2.index, df_input2[gen]/gen_break, linewidth=2,color='darkviolet',label=gen)
ax[0].legend(bbox_to_anchor=(1.1, 1.05))
ax[0].set_ylabel('ALL',fontsize=14,weight="bold")
ax[1].plot(df_input2.index, df_input2[Temperature]-50,linewidth=1,color='black',label=Temperature)
ax[1].plot(df_input2.index, df_input2[kmeans], linewidth=2, color='magenta',label=kmeans)
#ax[1].plot(df_input2.index, df_input2[kmeansB], linewidth=2, color='yellow') #label=kmeans)
ax[1].plot(df_input2.index, df_input2[kmeansA], linewidth=10, color='red') #,label=kmeansA)
ax[1].legend(bbox_to_anchor=(1.1, 1.05))
ax[1].set_ylabel('KMeans \n detects chanages in "steepness"',fontsize=14)
ax[2].plot(df_input2.index, df_input2[Temperature]-50,linewidth=1,color='black',label=Temperature)
ax[2].plot(df_input2.index, df_input2[fft]/fft_break, linewidth=2,color='darkgreen',label=fft)
ax[2].plot(df_input2.index, df_input2[fftA]/fft_break, linewidth=10, color='red') #,label=kmeansA)
ax[2].legend(bbox_to_anchor=(1.1, 1.05))
ax[2].set_ylabel('FFT \n detects frequency changes', fontsize=14)
ax[3].plot(df_input2.index, df_input2[Temperature]-50,linewidth=1,color='black',label=Temperature)
ax[3].plot(df_input2.index, -df_input2[spectral]/20, linewidth=2,color='dodgerblue', label=spectral)
ax[3].plot(df_input2.index, df_input2[spectralA]/20, linewidth=10, color='red') #,label=kmeansA)
ax[3].legend(bbox_to_anchor=(1.1, 1.05))
ax[3].set_ylabel('Spectral \n like FFT for less "CPU"\n less sensitive', fontsize=14)
ax[4].plot(df_input2.index, df_input2[Temperature]-50,linewidth=1,color='black',label=Temperature)
ax[4].plot(df_input2.index, df_input2[sal]/100, linewidth=2,color='chartreuse', label=sal)
ax[4].plot(df_input2.index, df_input2[salA]/100, linewidth=10, color='red') #,label=kmeansA)
ax[4].legend(bbox_to_anchor=(1.1, 1.05))
ax[4].set_ylabel('Saliency \n like FFT, part of Azure\'s approach', fontsize=14)
ax[5].plot(df_input2.index, df_input2[Temperature]-50,linewidth=1,color='black',label=Temperature)
ax[5].plot(df_input2.index, df_input2[gen]/gen_break, linewidth=2,color='darkviolet', label=gen)
ax[5].plot(df_input2.index, df_input2[genA]/gen_break, linewidth=10, color='red')
ax[5].legend(bbox_to_anchor=(1.1, 1.05))
ax[5].set_ylabel('Generalized \n more (too ?) sensitive', fontsize=14)
#ax.set_ylabel('Temperature-50')
```
### Results
FFT, Spectral, Saliency and Generalized detect an anomaly after midnight while KMeans spots one at ~8:00 am.
| Detector | How does it work |
| ------- | ---------------- |
| KMeans | Spots "steep line" anomaly at roughly 8am, nothing else |
| FFT | Spots "high signal energy" anomaly after midnight, "steep line" at 4 and "peak" shortly before 6am |
| Spectral | Spots "high signal energy" anomaly after midnight |
| Saliency | Spots "high signal energy" anomaly after midnight, less "pronounced" anomaly scores -> less useful |
| Generalized | Spots "high signal energy" anomaly after midnight and "peak" shortly before 6am, couple of false alerts |
| SimpleAnomaly| We don't have SimpleAnomaly running yet: we can only train on entity input data right now, not on pipeline generated data. |
### Conclusion
**There is no one size fits all.**
Without prior knowledge I'd apply KMeans and FFT as basic unsupervised anomaly detectors
### Next steps
#### Improving unsupervised methods
* Get the scales right
current anomaly functions vary wildly in scale (KMeans between 0,2, Spectral from -infinity to 0 ...)
* Data Cleansing stage is missing
* Longer term: Follow microsoft's approach
- Get the anomaly generator working as part of the pipeline
- Generate randomized timeseries data with known anomalies
- Slice it and label the slices by anomaly type (None, Near vertical line, flat line, ...)
- Train a plain CNN on generated anomalies and apply it to real data
#### Make supervised methods work
* Lift the restriction that SimpleAnomaly can only train on raw input data
| github_jupyter |
```
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import pandas as pd
from pyscf import lib, gto, scf
import pyqmc.recipes
import h5py
import matplotlib.pyplot as plt
```
This function computes the mean-field solution and saves the results to the file specified. We recommend using the ccecp pseudopotentials for high accuracy and efficiency.
```
def mean_field(chkfile):
mol = gto.M(atom = "O 0 0 0; H 0 -2.757 2.587; H 0 2.757 2.587", basis='ccecpccpvdz', ecp='ccecp', unit='bohr')
mf = scf.RHF(mol)
mf.chkfile = chkfile
mf.kernel()
mean_field("h2o.hdf5")
```
Now we wish to construct a Slater-Jastrow wave function and optimize its energy. This is done using the OPTIMIZE function in pyqmc.recipes. It's often helpful to do the first optimization with only a few configurations, to get close to the minimum cheaply.
```
pyqmc.recipes.OPTIMIZE("h2o.hdf5","h2o_sj_200.hdf5",nconfig=200, **{'max_iterations':10,'verbose':True})
```
* Since we want to start from the previous optimization, we use the `start_from` command in OPTIMIZE.
* Using `**kwargs` to pass a keyword to the line minimization algorithm so it only runs for 10 iterations, just to keep the computation low for this example.
```
pyqmc.recipes.OPTIMIZE("h2o.hdf5","h2o_sj_800.hdf5", start_from="h2o_sj_200.hdf5", nconfig=800, **{'max_iterations':10,'verbose':True})
import seaborn as sns
df = pd.concat([pyqmc.recipes.read_opt(f"h2o_sj_{n}.hdf5") for n in [200,800]])
g = sns.FacetGrid(hue='fname',data=df)
g.map(plt.errorbar,'iteration','energy','error', marker='o')
g.add_legend()
for n in [200,800]:
pyqmc.recipes.VMC("h2o.hdf5",f"h2o_sj_vmc_{n}.hdf5", start_from=f"h2o_sj_{n}.hdf5", **dict(nblocks=100,verbose=True))
df = pd.DataFrame([pyqmc.recipes.read_mc_output(f"h2o_sj_vmc_{n}.hdf5") for n in [200,800]])
df['nconfig'] = [int(x.split('_')[3].replace('.hdf5','')) for x in df['fname']]
print(df)
plt.errorbar("nconfig","energytotal","energytotal_err", data=df, marker='o')
plt.xlabel("nconfig")
plt.ylabel("energy (Ha)")
pyqmc.recipes.DMC("h2o.hdf5",f"h2o_sj_dmc_800.hdf5", start_from=f"h2o_sj_800.hdf5",**{'verbose':True})
with h5py.File("h2o_sj_dmc_800.hdf5") as f:
en = f['energytotal'][...]
df = pd.DataFrame([pyqmc.recipes.read_mc_output("h2o_sj_dmc_800.hdf5", warmup=warmup) for warmup in [10,20,30,40,50, 60,70, 100, 150]])
plt.errorbar("warmup",'energytotal', 'energytotal_err',data=df, marker='o')
```
| github_jupyter |
# Loading Image Data
So far we've been working with fairly artificial datasets that you wouldn't typically be using in real projects. Instead, you'll likely be dealing with full-sized images like you'd get from smart phone cameras. In this notebook, we'll look at how to load images and use them to train neural networks.
We'll be using a [dataset of cat and dog photos](https://www.kaggle.com/c/dogs-vs-cats) available from Kaggle. Here are a couple example images:
<img src='assets/dog_cat.png'>
We'll use this dataset to train a neural network that can differentiate between cats and dogs. These days it doesn't seem like a big accomplishment, but five years ago it was a serious challenge for computer vision systems.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
import helper
```
The easiest way to load image data is with `datasets.ImageFolder` from `torchvision` ([documentation](http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder)). In general you'll use `ImageFolder` like so:
```python
dataset = datasets.ImageFolder('path/to/data', transform=transform)
```
where `'path/to/data'` is the file path to the data directory and `transform` is a sequence of processing steps built with the [`transforms`](http://pytorch.org/docs/master/torchvision/transforms.html) module from `torchvision`. ImageFolder expects the files and directories to be constructed like so:
```
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
```
where each class has it's own directory (`cat` and `dog`) for the images. The images are then labeled with the class taken from the directory name. So here, the image `123.png` would be loaded with the class label `cat`. You can download the dataset already structured like this [from here](https://s3.amazonaws.com/content.udacity-data.com/nd089/Cat_Dog_data.zip). I've also split it into a training set and test set.
### Transforms
When you load in the data with `ImageFolder`, you'll need to define some transforms. For example, the images are different sizes but we'll need them to all be the same size for training. You can either resize them with `transforms.Resize()` or crop with `transforms.CenterCrop()`, `transforms.RandomResizedCrop()`, etc. We'll also need to convert the images to PyTorch tensors with `transforms.ToTensor()`. Typically you'll combine these transforms into a pipeline with `transforms.Compose()`, which accepts a list of transforms and runs them in sequence. It looks something like this to scale, then crop, then convert to a tensor:
```python
transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
```
There are plenty of transforms available, I'll cover more in a bit and you can read through the [documentation](http://pytorch.org/docs/master/torchvision/transforms.html).
### Data Loaders
With the `ImageFolder` loaded, you have to pass it to a [`DataLoader`](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader). The `DataLoader` takes a dataset (such as you would get from `ImageFolder`) and returns batches of images and the corresponding labels. You can set various parameters like the batch size and if the data is shuffled after each epoch.
```python
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
```
Here `dataloader` is a [generator](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/). To get data out of it, you need to loop through it or convert it to an iterator and call `next()`.
```python
# Looping through it, get a batch on each loop
for images, labels in dataloader:
pass
# Get one batch
images, labels = next(iter(dataloader))
```
>**Exercise:** Load images from the `Cat_Dog_data/train` folder, define a few transforms, then build the dataloader.
```
data_dir = 'Cat_Dog_data/train'
transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
dataset = datasets.ImageFolder(data_dir, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
# Run this to test your data loader
images, labels = next(iter(dataloader))
helper.imshow(images[0], normalize=False)
```
If you loaded the data correctly, you should see something like this (your image will be different):
<img src='assets/cat_cropped.png' width=244>
## Data Augmentation
A common strategy for training neural networks is to introduce randomness in the input data itself. For example, you can randomly rotate, mirror, scale, and/or crop your images during training. This will help your network generalize as it's seeing the same images but in different locations, with different sizes, in different orientations, etc.
To randomly rotate, scale and crop, then flip your images you would define your transforms like this:
```python
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])])
```
You'll also typically want to normalize images with `transforms.Normalize`. You pass in a list of means and list of standard deviations, then the color channels are normalized like so
```input[channel] = (input[channel] - mean[channel]) / std[channel]```
Subtracting `mean` centers the data around zero and dividing by `std` squishes the values to be between -1 and 1. Normalizing helps keep the network weights near zero which in turn makes backpropagation more stable. Without normalization, networks will tend to fail to learn.
You can find a list of all [the available transforms here](http://pytorch.org/docs/0.3.0/torchvision/transforms.html). When you're testing however, you'll want to use images that aren't altered other than normalizing. So, for validation/test images, you'll typically just resize and crop.
>**Exercise:** Define transforms for training data and testing data below. Leave off normalization for now.
```
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
# change this to the trainloader or testloader
data_iter = iter(testloader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
helper.imshow(images[ii], ax=ax, normalize=False)
```
Your transformed images should look something like this.
<center>Training examples:</center>
<img src='assets/train_examples.png' width=500px>
<center>Testing examples:</center>
<img src='assets/test_examples.png' width=500px>
At this point you should be able to load data for training and testing. Now, you should try building a network that can classify cats vs dogs. This is quite a bit more complicated than before with the MNIST and Fashion-MNIST datasets. To be honest, you probably won't get it to work with a fully-connected network, no matter how deep. These images have three color channels and at a higher resolution (so far you've seen 28x28 images which are tiny).
In the next part, I'll show you how to use a pre-trained network to build a model that can actually solve this problem.
```
# Optional TODO: Attempt to build a network to classify cats vs dogs from this dataset
```
| github_jupyter |
```
from IPython.display import Image
```
# CNTK 103: Part B - Feed Forward Network with MNIST
We assume that you have successfully completed CNTK 103 Part A.
In this tutorial we will train a fully connected network on MNIST data. This notebook provides the recipe using Python APIs. If you are looking for this example in BrainScript, please look [here](https://github.com/Microsoft/CNTK/tree/v2.0.beta15.0/Examples/Image/GettingStarted)
## Introduction
**Problem** (recap from the CNTK 101):
The MNIST data comprises of hand-written digits with little background noise.
```
# Figure 1
Image(url= "http://3.bp.blogspot.com/_UpN7DfJA0j4/TJtUBWPk0SI/AAAAAAAAABY/oWPMtmqJn3k/s1600/mnist_originals.png", width=200, height=200)
```
**Goal**:
Our goal is to train a classifier that will identify the digits in the MNIST dataset.
**Approach**:
The same 5 stages we have used in the previous tutorial are applicable: Data reading, Data preprocessing, Creating a model, Learning the model parameters and Evaluating (a.k.a. testing/prediction) the model.
- Data reading: We will use the CNTK Text reader
- Data preprocessing: Covered in part A (suggested extension section).
Rest of the steps are kept identical to CNTK 102.
```
# Import the relevant components
from __future__ import print_function
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import cntk as C
from cntk import UnitType
from cntk.io import CTFDeserializer, MinibatchSource, StreamDef, StreamDefs
from cntk.io import INFINITELY_REPEAT, FULL_DATA_SWEEP
from cntk.initializer import glorot_uniform
from cntk.layers import default_options, Input, Dense
# Select the right target device when this notebook is being tested:
if 'TEST_DEVICE' in os.environ:
import cntk
if os.environ['TEST_DEVICE'] == 'cpu':
cntk.device.try_set_default_device(cntk.device.cpu())
else:
cntk.device.try_set_default_device(cntk.device.gpu(0))
%matplotlib inline
```
## Data reading
In this section, we will read the data generated in CNTK 103 Part B.
```
# Ensure we always get the same amount of randomness
np.random.seed(0)
# Define the data dimensions
input_dim = 784
num_output_classes = 10
```
## Data reading
In this tutorial we are using the MNIST data you have downloaded using CNTK_103A_MNIST_DataLoader notebook. The dataset has 60,000 training images and 10,000 test images with each image being 28 x 28 pixels. Thus the number of features is equal to 784 (= 28 x 28 pixels), 1 per pixel. The variable `num_output_classes` is set to 10 corresponding to the number of digits (0-9) in the dataset.
The data is in the following format:
|labels 0 0 0 0 0 0 0 1 0 0 |features 0 0 0 0 ...
(784 integers each representing a pixel)
In this tutorial we are going to use the image pixels corresponding the integer stream named "features". We define a `create_reader` function to read the training and test data using the [CTF deserializer](https://cntk.ai/pythondocs/cntk.io.html?highlight=ctfdeserializer#cntk.io.CTFDeserializer). The labels are [1-hot encoded](https://en.wikipedia.org/wiki/One-hot).
```
# Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file
def create_reader(path, is_training, input_dim, num_label_classes):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
labels = StreamDef(field='labels', shape=num_label_classes, is_sparse=False),
features = StreamDef(field='features', shape=input_dim, is_sparse=False)
)), randomize = is_training, epoch_size = INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP)
# Ensure the training and test data is generated and available for this tutorial.
# We search in two locations in the toolkit for the cached MNIST data set.
data_found = False
for data_dir in [os.path.join("..", "Examples", "Image", "DataSets", "MNIST"),
os.path.join("data", "MNIST")]:
train_file = os.path.join(data_dir, "Train-28x28_cntk_text.txt")
test_file = os.path.join(data_dir, "Test-28x28_cntk_text.txt")
if os.path.isfile(train_file) and os.path.isfile(test_file):
data_found = True
break
if not data_found:
raise ValueError("Please generate the data by completing CNTK 103 Part A")
print("Data directory is {0}".format(data_dir))
```
<a id='#Model Creation'></a>
## Model Creation
Our feed forward network will be relatively simple with 2 hidden layers (`num_hidden_layers`) with each layer having 400 hidden nodes (`hidden_layers_dim`).
```
# Figure 2
Image(url= "http://cntk.ai/jup/feedforward_network.jpg", width=200, height=200)
```
If you are not familiar with the terms *hidden_layer* and *number of hidden layers*, please refer back to CNTK 102 tutorial.
For this tutorial: The number of green nodes (refer to picture above) in each hidden layer is set to 200 and the number of hidden layers (refer to the number of layers of green nodes) is 2. Fill in the following values:
- num_hidden_layers
- hidden_layers_dim
Note: In this illustration, we have not shown the bias node (introduced in the logistic regression tutorial). Each hidden layer would have a bias node.
```
num_hidden_layers = 2
hidden_layers_dim = 400
```
Network input and output:
- **input** variable (a key CNTK concept):
>An **input** variable is a container in which we fill different observations in this case image pixels during model learning (a.k.a.training) and model evaluation (a.k.a. testing). Thus, the shape of the `input_variable` must match the shape of the data that will be provided. For example, when data are images each of height 10 pixels and width 5 pixels, the input feature dimension will be 50 (representing the total number of image pixels). More on data and their dimensions to appear in separate tutorials.
**Question** What is the input dimension of your chosen model? This is fundamental to our understanding of variables in a network or model representation in CNTK.
```
input = Input(input_dim)
label = Input(num_output_classes)
```
## Feed forward network setup
If you are not familiar with the feedforward network, please refer to CNTK 102. In this tutorial we are using the same network.
```
def create_model(features):
with default_options(init = glorot_uniform(), activation = C.ops.relu):
h = features
for _ in range(num_hidden_layers):
h = Dense(hidden_layers_dim)(h)
r = Dense(num_output_classes, activation = None)(h)
return r
z = create_model(input)
```
`z` will be used to represent the output of a network.
We introduced sigmoid function in CNTK 102, in this tutorial you should try different activation functions. You may choose to do this right away and take a peek into the performance later in the tutorial or run the preset tutorial and then choose to perform the suggested activity.
** Suggested Activity **
- Record the training error you get with `sigmoid` as the activation function
- Now change to `relu` as the activation function and see if you can improve your training error
*Quiz*: Different supported activation functions can be [found here][]. Which activation function gives the least training error?
[found here]: https://github.com/Microsoft/CNTK/wiki/Activation-Functions
```
# Scale the input to 0-1 range by dividing each pixel by 256.
z = create_model(input/256.0)
```
### Learning model parameters
Same as the previous tutorial, we use the `softmax` function to map the accumulated evidences or activations to a probability distribution over the classes (Details of the [softmax function][] and other [activation][] functions).
[softmax function]: http://cntk.ai/pythondocs/cntk.ops.html#cntk.ops.softmax
[activation]: https://github.com/Microsoft/CNTK/wiki/Activation-Functions
## Training
Similar to CNTK 102, we use minimize the cross-entropy between the label and predicted probability by the network. If this terminology sounds strange to you, please refer to the CNTK 102 for a refresher.
```
loss = C.cross_entropy_with_softmax(z, label)
```
#### Evaluation
In order to evaluate the classification, one can compare the output of the network which for each observation emits a vector of evidences (can be converted into probabilities using `softmax` functions) with dimension equal to number of classes.
```
label_error = C.classification_error(z, label)
```
### Configure training
The trainer strives to reduce the `loss` function by different optimization approaches, [Stochastic Gradient Descent][] (`sgd`) being one of the most popular one. Typically, one would start with random initialization of the model parameters. The `sgd` optimizer would calculate the `loss` or error between the predicted label against the corresponding ground-truth label and using [gradient-decent][] generate a new set model parameters in a single iteration.
The aforementioned model parameter update using a single observation at a time is attractive since it does not require the entire data set (all observation) to be loaded in memory and also requires gradient computation over fewer datapoints, thus allowing for training on large data sets. However, the updates generated using a single observation sample at a time can vary wildly between iterations. An intermediate ground is to load a small set of observations and use an average of the `loss` or error from that set to update the model parameters. This subset is called a *minibatch*.
With minibatches we often sample observation from the larger training dataset. We repeat the process of model parameters update using different combination of training samples and over a period of time minimize the `loss` (and the error). When the incremental error rates are no longer changing significantly or after a preset number of maximum minibatches to train, we claim that our model is trained.
One of the key parameter for optimization is called the `learning_rate`. For now, we can think of it as a scaling factor that modulates how much we change the parameters in any iteration. We will be covering more details in later tutorial.
With this information, we are ready to create our trainer.
[optimization]: https://en.wikipedia.org/wiki/Category:Convex_optimization
[Stochastic Gradient Descent]: https://en.wikipedia.org/wiki/Stochastic_gradient_descent
[gradient-decent]: http://www.statisticsviews.com/details/feature/5722691/Getting-to-the-Bottom-of-Regression-with-Gradient-Descent.html
```
# Instantiate the trainer object to drive the model training
learning_rate = 0.2
lr_schedule = C.learning_rate_schedule(learning_rate, UnitType.minibatch)
learner = C.sgd(z.parameters, lr_schedule)
trainer = C.Trainer(z, (loss, label_error), [learner])
```
First let us create some helper functions that will be needed to visualize different functions associated with training.
```
# Define a utility function to compute the moving average sum.
# A more efficient implementation is possible with np.cumsum() function
def moving_average(a, w=5):
if len(a) < w:
return a[:] # Need to send a copy of the array
return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
# Defines a utility that prints the training progress
def print_training_progress(trainer, mb, frequency, verbose=1):
training_loss = "NA"
eval_error = "NA"
if mb%frequency == 0:
training_loss = trainer.previous_minibatch_loss_average
eval_error = trainer.previous_minibatch_loss_average
if verbose:
print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}%".format(mb, training_loss, eval_error*100))
return mb, training_loss, eval_error
```
<a id='#Run the trainer'></a>
### Run the trainer
We are now ready to train our fully connected neural net. We want to decide what data we need to feed into the training engine.
In this example, each iteration of the optimizer will work on `minibatch_size` sized samples. We would like to train on all 60000 observations. Additionally we will make multiple passes through the data specified by the variable `num_sweeps_to_train_with`. With these parameters we can proceed with training our simple feed forward network.
```
# Initialize the parameters for the trainer
minibatch_size = 64
num_samples_per_sweep = 60000
num_sweeps_to_train_with = 10
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size
# Create the reader to training data set
reader_train = create_reader(train_file, True, input_dim, num_output_classes)
# Map the data streams to the input and labels.
input_map = {
label : reader_train.streams.labels,
input : reader_train.streams.features
}
# Run the trainer on and perform model training
training_progress_output_freq = 500
plotdata = {"batchsize":[], "loss":[], "error":[]}
for i in range(0, int(num_minibatches_to_train)):
# Read a mini batch from the training data file
data = reader_train.next_minibatch(minibatch_size, input_map = input_map)
trainer.train_minibatch(data)
batchsize, loss, error = print_training_progress(trainer, i, training_progress_output_freq, verbose=1)
if not (loss == "NA" or error =="NA"):
plotdata["batchsize"].append(batchsize)
plotdata["loss"].append(loss)
plotdata["error"].append(error)
```
Let us plot the errors over the different training minibatches. Note that as we iterate the training loss decreases though we do see some intermediate bumps.
Hence, we use smaller minibatches and using `sgd` enables us to have a great scalability while being performant for large data sets. There are advanced variants of the optimizer unique to CNTK that enable harnessing computational efficiency for real world data sets and will be introduced in advanced tutorials.
```
# Compute the moving average loss to smooth out the noise in SGD
plotdata["avgloss"] = moving_average(plotdata["loss"])
plotdata["avgerror"] = moving_average(plotdata["error"])
# Plot the training loss and the training error
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(211)
plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
plt.xlabel('Minibatch number')
plt.ylabel('Loss')
plt.title('Minibatch run vs. Training loss')
plt.show()
plt.subplot(212)
plt.plot(plotdata["batchsize"], plotdata["avgerror"], 'r--')
plt.xlabel('Minibatch number')
plt.ylabel('Label Prediction Error')
plt.title('Minibatch run vs. Label Prediction Error')
plt.show()
```
## Evaluation / Testing
Now that we have trained the network, let us evaluate the trained network on the test data. This is done using `trainer.test_minibatch`.
```
# Read the training data
reader_test = create_reader(test_file, False, input_dim, num_output_classes)
test_input_map = {
label : reader_test.streams.labels,
input : reader_test.streams.features,
}
# Test data for trained model
test_minibatch_size = 512
num_samples = 10000
num_minibatches_to_test = num_samples // test_minibatch_size
test_result = 0.0
for i in range(num_minibatches_to_test):
# We are loading test data in batches specified by test_minibatch_size
# Each data point in the minibatch is a MNIST digit image of 784 dimensions
# with one pixel per dimension that we will encode / decode with the
# trained model.
data = reader_test.next_minibatch(test_minibatch_size,
input_map = test_input_map)
eval_error = trainer.test_minibatch(data)
test_result = test_result + eval_error
# Average of evaluation errors of all test minibatches
print("Average test error: {0:.2f}%".format(test_result*100 / num_minibatches_to_test))
```
Note, this error is very comparable to our training error indicating that our model has good "out of sample" error a.k.a. generalization error. This implies that our model can very effectively deal with previously unseen observations (during the training process). This is key to avoid the phenomenon of overfitting.
We have so far been dealing with aggregate measures of error. Let us now get the probabilities associated with individual data points. For each observation, the `eval` function returns the probability distribution across all the classes. The classifier is trained to recognize digits, hence has 10 classes. First let us route the network output through a `softmax` function. This maps the aggregated activations across the network to probabilities across the 10 classes.
```
out = C.softmax(z)
```
Let us a small minibatch sample from the test data.
```
# Read the data for evaluation
reader_eval = create_reader(test_file, False, input_dim, num_output_classes)
eval_minibatch_size = 25
eval_input_map = { input : reader_eval.streams.features }
data = reader_test.next_minibatch(eval_minibatch_size, input_map = test_input_map)
img_label = data[label].value
img_data = data[input].value
predicted_label_prob = [out.eval(img_data[i,:,:]) for i in range(img_data.shape[0])]
# Find the index with the maximum value for both predicted as well as the ground truth
pred = [np.argmax(predicted_label_prob[i]) for i in range(len(predicted_label_prob))]
gtlabel = [np.argmax(img_label[i,:,:]) for i in range(img_label.shape[0])]
print("Label :", gtlabel[:25])
print("Predicted:", pred)
```
Let us visualize some of the results
```
# Plot a random image
sample_number = 5
plt.imshow(img_data[sample_number].reshape(28,28), cmap="gray_r")
plt.axis('off')
img_gt, img_pred = gtlabel[sample_number], pred[sample_number]
print("Image Label: ", img_pred)
```
**Exploration Suggestion**
- Try exploring how the classifier behaves with different parameters - suggest changing the `minibatch_size` parameter from 25 to say 64 or 128. What happens to the error rate? How does the error compare to the logistic regression classifier?
- Suggest trying to increase the number of sweeps
- Can you change the network to reduce the training error rate? When do you see *overfitting* happening?
#### Code link
If you want to try running the tutorial from Python command prompt please run the [SimpleMNIST.py](https://github.com/Microsoft/CNTK/tree/v2.0.beta15.0/Examples/Image/Classification/MLP/Python) example.
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
base = automap_base()
# reflect the tables
base.prepare(engine,reflect=True)
# View all of the classes that automap found
base.classes.keys()
# Save references to each table
measurement=base.classes.measurement
station=base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
# Exploratory Precipitation Analysis
```
# Find the most recent date in the data set.
recent_date = session.query(measurement.date).\
order_by(measurement.date.desc()).first()
recent_date
# Design a query to retrieve the last 12 months of precipitation data and plot the results.
# Starting from the most recent data point in the database.
# Calculate the date one year from the last date in data set.
one_year=dt.date(2017,8,23)-dt.timedelta(days=365)
one_year
# Perform a query to retrieve the data and precipitation scores
pre_scores=session.query(measurement.date,measurement.prcp).\
filter(measurement.date > one_year).\
order_by(measurement.date).all()
pre_scores
# Save the query results as a Pandas DataFrame and set the index to the date column
precipitation_df=pd.DataFrame(pre_scores)
precipitation_df.head()
# Sort the dataframe by date
precipitation_df.set_index('date')
precipitation_df.head()
# Use Pandas Plotting with Matplotlib to plot the data
precipitation_df .plot(title="Precipitation Over The Last Year")
plt.savefig("Images/Precipitation.png")
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
precipitation_df.describe()
```
# Exploratory Station Analysis
```
# Design a query to calculate the total number stations in the dataset
stations=session.query(measurement).\
group_by(measurement.station).count()
print(f'stations: {stations}')
# Design a query to find the most active stations (i.e. what stations have the most rows?)
# List the stations and the counts in descending order.
actv_stations = session.query(measurement.station,
func.count(measurement.station)).\
group_by(measurement.station).\
order_by(func.count(measurement.station).desc()).all()
actv_stations
# Using the most active station id from the previous query, calculate the lowest, highest, and average temperature.
most_actv=actv_stations[0][0]
most_actv
temps = session.query(func.min(measurement.tobs), func.max(measurement.tobs),
func.avg(measurement.tobs)).filter(measurement.station == most_actv).all()
print(f'low: {temps[0][0]}')
print(f'high: {temps[0][1]}')
print(f'average: {temps[0][2]}')
# Using the most active station id
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
most_actv
temperatures = session.query( measurement.tobs).filter(measurement.date >= one_year).\
filter(measurement.station == most_actv).all()
temperatures = pd.DataFrame(temperatures, columns=['temperature'])
temperatures_df=temperatures
temperatures_df.head()
plt.hist(temperatures_df['temperature'], bins =12)
plt.xlabel("temperature")
plt.ylabel("frequency")
plt.title("Frequency of Temp in Station USC00519281")
plt.savefig("Images/Temperature Frequency.png")
```
# Close session
```
# Close Session
session.close()
```
| github_jupyter |
# Training of a super simple model for celltype classification
```
import tensorflow as tf
!which python
!python --version
print(tf.VERSION)
print(tf.keras.__version__)
!pwd # start jupyter under notebooks/ for correct relative paths
import datetime
import inspect
import pandas as pd
import numpy as np
import seaborn as sns
from tensorflow.keras import layers
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from depiction.models.examples.celltype.celltype import one_hot_encoding, one_hot_decoding
```
## a look at the data
labels are categories 1-20, here's the associated celltype:
```
meta_series = pd.read_csv('../data/single-cell/metadata.csv', index_col=0)
meta_series
```
There are 13 unbalanced classes, and over 80k samples
```
data_df = pd.read_csv('../data/single-cell/data.csv')
data_df.groupby('category').count()['CD45']
data_df.sample(n=10)
print(inspect.getsource(one_hot_encoding)) # from keras, but taking care of 1 indexed classes
print(inspect.getsource(one_hot_decoding))
classes = data_df['category'].values
labels = one_hot_encoding(classes)
#scale the data from 0 to 1
min_max_scaler = MinMaxScaler(feature_range=(0, 1), copy=True)
data = min_max_scaler.fit_transform(data_df.drop('category', axis=1).values)
data.shape
one_hot_decoding(labels)
data_train, data_test, labels_train, labels_test = train_test_split(
data, labels, test_size=0.33, random_state=42, stratify=data_df.category)
labels
batchsize = 32
dataset = tf.data.Dataset.from_tensor_slices((data_train, labels_train))
dataset = dataset.shuffle(2 * batchsize).batch(batchsize)
dataset = dataset.repeat()
testset = tf.data.Dataset.from_tensor_slices((data_test, labels_test))
testset = testset.batch(batchsize)
```
## I don't know how a simpler network would look like
```
model = tf.keras.Sequential()
# Add a softmax layer with output units per celltype:
model.add(layers.Dense(
len(meta_series), activation='softmax',
batch_input_shape=tf.data.get_output_shapes(dataset)[0]
))
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=[tf.keras.metrics.categorical_accuracy])
# evaluation on testset on every epoch
# log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(
dataset,
epochs=20, steps_per_epoch=np.ceil(data_train.shape[0]/batchsize),
validation_data=testset, # callbacks=[tensorboard_callback]
)
```
## Is such a simple model interpretable?
```
# Save entire model to a HDF5 file
model.save('./celltype_model.h5')
# tensorboard --logdir logs/fit
# To recreate the exact same model, including weights and optimizer.
# model = tf.keras.models.load_model('../data/models/celltype_dnn_model.h5')
```
# What is the effect of increasing model complexity?
Play around by adding some layers, train and save the model under some name to use with the other notebook.

```
model = tf.keras.Sequential()
# Adds a densely-connected layers with 64 units to the model:
model.add(layers.Dense(64, activation='relu', batch_input_shape=tf.data.get_output_shapes(dataset)[0])) #
# ...
# do whatever you want
# model.add(layers.Dense(64, activation='relu'))
# model.add(layers.Dropout(0.5))
# ...
# Add a softmax layer with output units per celltype:
model.add(layers.Dense(len(meta_series), activation='softmax'))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Import Danych z Filmwebu
```
data_path='https://raw.githubusercontent.com/mateuszrusin/ml-filmweb-score/master/oceny.csv'
marks = pd.read_csv(data_path)
marks.head(10)
```
Scalamy tytuł oryginalny z polskim
```
marks['Tytuł oryginalny']=marks['Tytuł oryginalny'].fillna(marks['Tytuł polski']);
marks.head(10)
```
Tworzenie funkcji, która wybiera pierwszy kraj produkcji/nagrywania do dalszej analizy.
Hipotezą jest sprawdzenie czy jeżeli krajem produkcji są Stany Zjednoczone to użytkownik wyżej je ocenia
```
def firstValue(x, new_column, split_column, delimiter=','):
x[new_column]=[ y[0:y.find(delimiter)] if y.find(delimiter)>0 else y for y in x[split_column] ]
firstValue(marks, 'firstCountry', 'Kraj produkcji')
marks.head()
```
<b>Tworzenie lambda funkcji, która będzie tworzyć label. Plus konwersja typu danych</b>
```
converter = lambda x: 1 if x == 'USA' else 0
marks.dtypes
marks_Oceny=marks[marks['Ocena']!='brak oceny']
marks_Oceny['Ocena']=marks_Oceny['Ocena'].astype(int)
marks_Oceny['isUSA']= marks_Oceny['firstCountry'].apply(converter)
marks_Oceny.head()
country_dict= lambda x: 'USA' if x == 1 else 'Other'
```
<b>Piewrszy wykres</b> <br>
Sprawdzamy rozkład ocen dla IsUSA za pomocą barplot
```
a = marks_Oceny[['isUSA','Ocena']].groupby('isUSA').hist(alpha=0.4,by=marks_Oceny.isUSA.apply(country_dict))
```
Sprawdzamy rozkład zmiennej Oceny
```
marks_Oceny[['Ocena','isUSA']].plot(x='isUSA', y='Ocena', kind='kde', sharex=True, sharey=True)
a=marks_Oceny[['Ocena','isUSA']].groupby('isUSA').mean()
a.Ocena.at[1]
```
Wykres barplot dla porówania rozkładów. Ten Tutaj po niżej nie jest dobry, ponieważ nie sumuje się do 1 <br>
Nadal można wnioskować, że rozkłady są do siebie zbliżone
```
plt.hist(marks_Oceny[marks_Oceny.isUSA==1]['Ocena'], 50, density=1, facecolor='g', alpha=1,width=1.0)
plt.xlabel('Marks')
plt.title('Histogram of firstCountry=USA')
plt.text(3,1.2, r'$\mu1=%s$' % a.Ocena.at[1].round(3))
plt.show()
plt.xlabel('Marks')
plt.title('Histogram of firstCountry=Other')
plt.hist(marks_Oceny[marks_Oceny.isUSA==0]['Ocena'], 50, density=1, facecolor='r', alpha=1,width=1.0)
plt.text(3,1.7, r'$\mu0=%s$' % a.Ocena.at[0].round(3))
plt.axis([2, 10, 0, 2])
plt.show()
plt.xlabel('Marks')
plt.title('Histogram of firstCountry=Other')
plt.hist(marks_Oceny[marks_Oceny.isUSA==0]['Ocena'], 50, density=1, facecolor='r', alpha=1,width=1.0)
plt.hist(marks_Oceny[marks_Oceny.isUSA==1]['Ocena'], 50, density=1, facecolor='b', alpha=0.5,width=1.0)
plt.axis([2, 10, 0, 2])
plt.show()
marks_Oceny[['Ocena', 'isUSA']].groupby('isUSA').mean().plot(kind='bar', legend=True, ylim=5.2)
marks_Oceny.Gatunek.groupby(marks_Oceny.Gatunek).agg('count')
```
Wybranie pierwszego gatunku filmowego do dalszych analiz
```
firstValue(marks_Oceny,'firstGenrePL', 'Gatunek')
marks_Oceny[marks_Oceny.Gatunek=='Czarna komedia']
countGenre=marks_Oceny.firstGenrePL.groupby(marks_Oceny.firstGenrePL).agg('count')
countGenre.index
```
Wybranie pierwszych 5 najbardziej popularnych gatunków po zmapowaniu
```
genreMap=pd.DataFrame({'genre':countGenre.index,'value':countGenre.values})
firstFiveGenres=genreMap.sort_values(by='value' ,ascending=False).head()
firstFiveGenres
```
Zaczytanie słownika i łaczenie z naszym zbiorem
```
genres_map=pd.read_csv('genre.txt', sep=':')
genres_map.head()
marks_Oceny=marks_Oceny.merge(genres_map,how='left', left_on='firstGenrePL', right_on='Gatunek', suffixes=('_left','_right') )
marks_Oceny.head()
```
Stworzenie subsetu z pierwszych 5 najpopularniejszych gatunków
```
def subsets_create(x, column, by, by_column):
j=1
for i in by[by_column].index:
if j ==1 :
subset=x[x[column]==by.at[i,by_column]]
j=j+1
else:
subset=pd.concat([subset,x[x[column]==by.at[i,by_column]]], ignore_index=True )
return subset
New_set=subsets_create(marks_Oceny,'firstGenrePL',firstFiveGenres,'genre' )
New_set.head()
```
Import seaborn i tworzenie wykresów rozkładów
```
import seaborn as sns
p1=sns.kdeplot(New_set[New_set.Map=='Drama']['Ocena'], shade=True, color='c', label='Drama')
p1=sns.kdeplot(New_set[New_set.Map=='Thriller']['Ocena'], shade=True, color='r', label='Thriller')
p1=sns.kdeplot(New_set[New_set.Map=='Horror']['Ocena'], shade=True, color='b', label='Horror')
p1=sns.kdeplot(New_set[New_set.Map=='Comedy']['Ocena'], shade=True, color='y', label='Comedy')
p1=sns.kdeplot(New_set[New_set.Map=='Action']['Ocena'], shade=True, color='black', label='Action')
#p1=sns.kdeplot(df['sepal_length'], shade=True, color="b")
#sns.plt.show()
```
Merging with IMDB kod pochodzi z poprzedniego spotkania
```
import wget
import gzip
!wget https://datasets.imdbws.com/title.basics.tsv.gz
!wget https://datasets.imdbws.com/title.ratings.tsv.gz
!gzip -d -f title.basics.tsv.gz
!gzip -d -f title.ratings.tsv.gz
imdb_title = pd.read_csv('title.basics.tsv', sep='\t')
imdb_raiting = pd.read_csv('title.ratings.tsv', sep='\t')
imdb = pd.merge(imdb_title, imdb_raiting, how='left',on='tconst')
marks_Oceny['originalTitle'] = marks_Oceny['Tytuł oryginalny']
marks_Oceny['startYear'] =marks_Oceny['Rok produkcji'].astype(str)
match = {
'akcja': 'Action',
'animacja': 'Animation',
'biograficzny': 'Biography',
'czarna komedia': 'Comedy',
'dramat': 'Drama',
'dramat historyczny': 'Drama',
'dramat obyczajowy': 'Drama',
'dramat sądowy': 'Drama',
'erotyczny': 'Romance',
'familijny': 'Family',
'fantasy': 'Fantasy',
'gangsterski': 'Crime',
'horror': 'Horror',
'katastroficzny': 'Adventure',
'komedia': 'Comedy',
'komedia kryminalna': 'Comedy',
'komedia obycz.': 'Comedy',
'komedia rom.': 'Comedy',
'komediarom.': 'Comedy',
'kostiumowy': 'Kostiumowy',
'kryminał': 'Crime',
'melodramat': 'Melodramat',
'obyczajowy': 'Obyczajowy',
'przygodowy': 'Adventure',
'romans': 'Romance',
'sci-Fi': 'Sci-Fi',
'sensacyjny': 'Sensacyjny',
'surrealistyczny': 'Surrealistyczny',
'thriller': 'Thriller',
'western': 'Western',
'wojenny': 'War'
}
def to_list(textdata):
return "".join(textdata.lower().split()).split(',')
def change_type(t):
arr = [match[s.lower()] if s.lower() in match else s.lower() for s in to_list(t)]
return ", ".join(arr)
marks_Oceny['genre_eng'] = marks_Oceny.apply(lambda x: change_type(x['Gatunek']), axis=1)
marks_Oceny.head()
print(len(imdb))
imdb = imdb.dropna(subset=['startYear','originalTitle'])
imdb = imdb[imdb['titleType']=='movie']
imdb.head()
oceny_imdb = pd.merge(
marks_Oceny,
imdb,
how='inner',
on=['startYear','originalTitle'])
print(len(oceny_imdb))
oceny_imdb.head()
print('Zduplikowane: ', len(oceny_imdb[oceny_imdb.duplicated(subset=['originalTitle'])]))
oceny_imdb[oceny_imdb['originalTitle']=='Joker']
def get_similarity(row):
text_list_eng = to_list(row['genre_eng'])
text_list_genres = to_list(row['genres'])
# product of those lists
commons = set(text_list_eng) & set(text_list_genres)
return len(commons)
oceny_imdb['similarity'] = oceny_imdb.apply(get_similarity,axis=1)
oceny_duplicated = oceny_imdb[oceny_imdb.duplicated(subset=['originalTitle'], keep=False)]
oceny_duplicated
top1 = oceny_imdb.groupby(['ID']).apply(lambda x: x.sort_values(["similarity"], ascending = False)).reset_index(drop=True)
oceny_imdb2 = top1.groupby('ID').head(1).copy()
oceny_imdb2[oceny_imdb2['originalTitle']=='Joker']
oceny_imdb2.head()
```
Koniec kodu z poprzedniego Spotkania
Wykresy wiolinowe dla Oceny i średniej. Służą do porównania rozkładów w zależności od Grupy.
Zawiera takie informacje jak Min, Max, Mediana i Kwartyle
```
p1=sns.violinplot(x=oceny_imdb2['isUSA'], y=oceny_imdb2['Ocena'], palette=sns.color_palette("husl", 8) ,linewidth=5, inner='box')
p2=sns.violinplot(x=oceny_imdb2['isUSA'], y=oceny_imdb2['averageRating'], palette=sns.color_palette("Set1", n_colors=8, desat=.5),inner='box' ,linewidth=5)
p1=sns.violinplot(x=oceny_imdb2['isUSA'], y=oceny_imdb2['Ocena'], palette=sns.color_palette("husl", 8), inner=None ,linewidth=5)
p2=sns.violinplot(x=oceny_imdb2['isUSA'], y=oceny_imdb2['averageRating'], palette=sns.color_palette("Set1", n_colors=8, desat=.5), inner=None ,linewidth=5)
oceny_imdb2[['isUSA', 'Ocena', 'averageRating']].groupby('isUSA').agg(['mean','max','min'])
```
Porównanie rozkładów Ocen z filmweb i średniej użytkowników <br>
Patrz dystrybuantę dla OCeny wyżej. Widać, że kształt jest zachowany chodź minimalnie zniwelowany w zależnosci od gatunku
```
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Drama']['Ocena'], shade=True, color='c', label='Drama_filmweb')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Drama']['averageRating'], shade=True, color='y', label='Drama_average')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Horror']['Ocena'], shade=True, color='g', label='Horror_filmweb')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Horror']['averageRating'], shade=True, color='y', label='Horror_average')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Comedy']['averageRating'], shade=True, color='y', label='Comedy_average')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Comedy']['Ocena'], shade=True, color='r', label='Comedy_filmweb')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Action']['averageRating'], shade=True, color='y', label='Action_average')
p1=sns.kdeplot(oceny_imdb2[oceny_imdb2.Map_right=='Action']['Ocena'], shade=True, color='black', label='Action_filmweb')
```
| github_jupyter |
```
import os
import re
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,plot_confusion_matrix
from sklearn.metrics import accuracy_score,f1_score,classification_report,precision_score,recall_score
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# nltk.download('stopwords')
stopwords = set(stopwords.words('english'))
import spacy
import string
from string import digits
from bs4 import BeautifulSoup
from html import unescape
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
data = pd.read_csv('python_data .csv')
data.info()
```
# 1 Text Cleaning
```
# Data Cleaning
def remove_html(text):
html_pattern = re.compile('<.*?>')
return html_pattern.sub(r'', text)
#en = spacy.load('en_core_web_sm')
#sw_spacy = en.Defaults.stop_words
stop_words = stopwords.words('english')
def remove_stopwords(text):
stopwords_removed = ' '.join([word for word in text.split() if word not in stop_words])
return stopwords_removed
# Setting for digit removing
remove_digits = str.maketrans('', '', digits)
# Setting for punctuation removing
remove_punkt = str.maketrans(string.punctuation,' '*len(string.punctuation))
lemmatizer = WordNetLemmatizer()
def get_lemmatized_text(text):
lemmatized = ' '.join([lemmatizer.lemmatize(word) for word in text.split()])
return lemmatized
def text_processing(df_text):
# lower case
df_text = df_text.str.lower()
# Decode html
df_text = df_text.apply(unescape)
# Remove html
df_text = df_text.apply(lambda x: remove_html(x))
# Remove stopwords
df_text = df_text.apply(lambda x: remove_stopwords(x))
# Remove digits
df_text = df_text.apply(lambda x: x.translate(remove_digits))
# Remove punctuation
df_text = df_text.apply(lambda x: x.translate(remove_punkt))
# Lemmatization
df_text_processed = df_text.apply(lambda x: get_lemmatized_text(x))
return df_text_processed
def trial_processing(text):
#lower case
text = text.lower()
#remove html
text = unescape(text)
text = remove_html(text)
#remove stopwords
text = remove_stopwords(text)
#remove digits
text = text.translate(remove_digits)
#remove punctuation
text = text.translate(remove_punkt)
#Lemmatization
text = get_lemmatized_text(text)
return text
def clean_tags(tags):
#clean tags
tags = re.sub(r'[,<>.?:]', ' ', tags)
return tags
#clean text
data['clean_question'] = text_processing(data['QuestionBody'])
data['clean_title'] = text_processing(data['Title'])
data['clean_Tags'] = data['Tags'].apply(clean_tags)
#get trial question
tag = data['Tags'][10]
title = data['Title'][10]
question = data['QuestionBody'][10]
```
# 2. Filtering with SimTitle and SimTag
# 2.1 Calculate the similarity from title
```
#get the tf-idf vectorizer for title
tf_title = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0,max_features =500)
tfidf_title = tf_title.fit_transform(data['clean_title'])
def sim_title(title):
#cleaning the input
title = trial_processing(title)
title_asSeries = pd.Series(title)
#transform the string input into a matrix
title_matrix = tf_title.transform(title_asSeries)
title_trial1 = title_matrix.tocsr().todense()
#calculate the cosine similarity
cosine_similarities = cosine_similarity(title_trial1, tfidf_title)
#get index
similarity_indices = cosine_similarities[0].argsort()[::-1]
#ger id and cosine similarity
similar_items = [(cosine_similarities[0][i], data['Id'][i]) for i in similarity_indices]
result_title = {}
#put id and cosine similarity in the dict
for i in similar_items:
result_title[i[1]] = i[0]
return result_title
result_title = sim_title(title)
```
# 2.2 Calculate the similarity from tags
```
#get the tf-idf vectorizer for tag
tag_vectorizer = TfidfVectorizer()
count_matrix = tag_vectorizer.fit_transform(data['clean_Tags'])
def sim_tags(tags):
#clean tags
tags = clean_tags(tags)
tags_asSeries = pd.Series(tags)
#transform the string input into a matrix
tags_matrix = tag_vectorizer.transform(tags_asSeries)
tag_trial1 = tags_matrix.tocsr().todense()
#calculate the cosine similarity
cosine_similarities_tag = cosine_similarity(tag_trial1, count_matrix)
#get index
similarity_indices_tag = cosine_similarities_tag[0].argsort()[::-1]
#ger id and cosine similarity
similar_items_tag = [(cosine_similarities_tag[0][i], data['Id'][i]) for i in similarity_indices_tag]
#put id and cosine similarity in the dict
result_tag = {}
for i in similar_items_tag:
result_tag[i[1]] = i[0]
return result_tag
#result_tag
result_tag = sim_tags(tag)
```
# 2.3 Filtering with threshold
```
def filter_score(result_title, result_tags):
#set alpha to be 0.9
alpha = 0.9
result_filter = {}
#get title score and tag score
for i in result_title.keys():
title_score = result_title.get(i)
tag_score = result_tag.get(i)
#calculate the combined score
sim_score = alpha* title_score + (1-alpha)*tag_score
#put it in the dict with id
result_filter[i] = sim_score
results_ID = []
#filter with threshold 0.2
for i in result_filter.keys():
if result_filter[i] >= 0.2:
results_ID.append(i)
return results_ID
result_ID = filter_score(result_title, result_tag)
len(result_ID)
```
# 2.4 Get Clean Question Body for each ID after filtering
```
def get_clean_question (result_ID):
result_text = {}
for i in result_ID :
#get filtered and cleaned question body by id
text = data.loc[data['Id'] == i]['clean_question'].values[0]
#get index
index = int(data.loc[data['Id'] == i].index.values)
result_text[index] = text
#transform to series
ser = pd.Series(data = result_text)
return ser
clean_question = get_clean_question (result_ID)
```
# 3 Final Recommendation based on content of question body
```
#get question body values
def item(id):
return data.loc[data['Id'] == id]['QuestionBody'].values
def recommend (question, num):
#fit a TF-IDF vectorizer
tf_question_body = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0,max_features =500)
#transform to matrixs
tfidf_matrix = tf_question_body.fit_transform(clean_question)
#clean input question
Question_body_clean = trial_processing(question)
QB_asSeries = pd.Series(Question_body_clean)
#get transformed matrix
QB_matrix = tf_question_body.transform(QB_asSeries)
QB_trial1 = QB_matrix.tocsr().todense()
#calculate the similarity score
cosine_similarities = cosine_similarity(QB_trial1, tfidf_matrix)
#return index of 50 question with higest score
similarity_indices = cosine_similarities[0].argsort()[:-num-2:-1]
#get highest score
sim_score = [cosine_similarities[0][i] for i in similarity_indices]
#get index for raw dataset
sim_index = []
for i in similarity_indices:
sim_index.append(clean_question.index.values[i])
#get ID
sim_id = [(data['Id'][i]) for i in sim_index]
result_id = sim_id[1:num+1]
print("Input ID:")
print(sim_id[0])
print("\n")
print("Recommend ID :")
print(result_id)
print("\n")
print("Cosine Similarity Score:")
print(sim_score[1:])
print("\n")
print("Recommending " + str(num) + " product similar to : \n" + Question_body_clean)
print('\n')
for i in result_id:
print("Recommend: " + str(item(i)))
print('\n')
recommend(question,5)
```
| github_jupyter |
# Use Decision Optimization to plan your diet with `ibm-watson-machine-learning`
This notebook facilitates Decision Optimization and Watson Machine Learning services. It contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. It also introduces commands for getting model and training data, persisting model, deploying model and scoring it.
Some familiarity with Python is helpful. This notebook uses Python 3.
## Learning goals
The learning goals of this notebook are:
- Load a DO model file into an Watson Machine learning repository.
- Prepare data for training and evaluation.
- Create an DO machine learning job.
- Persist a DO model Watson Machine Learning repository.
- Deploy a model for batch scoring using Wastson Machine Learning API.
## Contents
This notebook contains the following parts:
1. [Setup](#setup)
2. [Download externally created DO model](#download)
3. [Persist externally created DO model](#persistence)
4. [Deploy](#deploy)
5. [Create job](#job)
6. [Clean up](#cleanup)
7. [Summary and next steps](#summary)
<a id="setup"></a>
## 1. Set up the environment
Before you use the sample code in this notebook, you must perform the following setup tasks:
- Contact with your Cloud Pack for Data administrator and ask him for your account credentials
### Connection to WML
Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `password`.
```
username = 'PASTE YOUR USERNAME HERE'
password = 'PASTE YOUR PASSWORD HERE'
url = 'PASTE THE PLATFORM URL HERE'
wml_credentials = {
"username": username,
"password": password,
"url": url,
"instance_id": 'openshift',
"version": '3.5'
}
```
### Install and import the `ibm-watson-machine-learning` package
**Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>.
```
!pip install -U ibm-watson-machine-learning
from ibm_watson_machine_learning import APIClient
client = APIClient(wml_credentials)
```
### Working with spaces
First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one.
- Click New Deployment Space
- Create an empty space
- Go to space `Settings` tab
- Copy `space_id` and paste it below
**Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Space%20management.ipynb).
**Action**: Assign space ID below
```
space_id = 'PASTE YOUR SPACE ID HERE'
```
You can use `list` method to print all existing spaces.
```
client.spaces.list(limit=10)
```
To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
```
client.set.default_space(space_id)
```
#### <a id="download"></a>
## 2. Download externally created DO model and data
In this section, you will download externally created DO models and data used for training it.
**Action**: Get your DO model.
```
!wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cpd3.5/models/decision_optimization/do-model.tar.gz \
-O do-model.tar.gz
model_path = 'do-model.tar.gz'
```
<a id="persistence"></a>
## 3. Persist externally created DO model
In this section, you will learn how to store your model in Watson Machine Learning repository by using the Watson Machine Learning Client.
### 3.1: Publish model
#### Publish model in Watson Machine Learning repository on Cloud.
Define model name, autor name and email.
Get software specification for DO model
```
sofware_spec_uid = client.software_specifications.get_uid_by_name("do_12.9")
```
Output data schema for storing model in WML repository
```
output_data_schema = [{'id': 'stest',
'type': 'list',
'fields': [{'name': 'age', 'type': 'float'},
{'name': 'sex', 'type': 'float'},
{'name': 'cp', 'type': 'float'},
{'name': 'restbp', 'type': 'float'},
{'name': 'chol', 'type': 'float'},
{'name': 'fbs', 'type': 'float'},
{'name': 'restecg', 'type': 'float'},
{'name': 'thalach', 'type': 'float'},
{'name': 'exang', 'type': 'float'},
{'name': 'oldpeak', 'type': 'float'},
{'name': 'slope', 'type': 'float'},
{'name': 'ca', 'type': 'float'},
{'name': 'thal', 'type': 'float'}]
}, {'id': 'teste2',
'type': 'test',
'fields': [{'name': 'age', 'type': 'float'},
{'name': 'sex', 'type': 'float'},
{'name': 'cp', 'type': 'float'},
{'name': 'restbp', 'type': 'float'},
{'name': 'chol', 'type': 'float'},
{'name': 'fbs', 'type': 'float'},
{'name': 'restecg', 'type': 'float'},
{'name': 'thalach', 'type': 'float'},
{'name': 'exang', 'type': 'float'},
{'name': 'oldpeak', 'type': 'float'},
{'name': 'slope', 'type': 'float'},
{'name': 'ca', 'type': 'float'},
{'name': 'thal', 'type': 'float'}]}]
model_meta_props = {
client.repository.ModelMetaNames.NAME: "LOCALLY created DO model",
client.repository.ModelMetaNames.TYPE: "do-docplex_12.9",
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid,
client.repository.ModelMetaNames.OUTPUT_DATA_SCHEMA: output_data_schema
}
published_model = client.repository.store_model(model=model_path, meta_props=model_meta_props)
```
**Note:** You can see that model is successfully stored in Watson Machine Learning Service.
### 3.2: Get model details
```
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
```
### 3.3 Get all models
```
client.repository.list_models()
```
<a id="deploy"></a>
## 4. Deploy
In this section you will learn how to create batch deployment to create job using the Watson Machine Learning Client.
You can use commands bellow to create batch deployment for stored model (web service).
### 4.1: Create model deployment
```
meta_data = {
client.deployments.ConfigurationMetaNames.NAME: "deployment_DO",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"name": "S", "num_nodes": 1}
}
deployment_details = client.deployments.create(published_model_uid, meta_props=meta_data)
```
**Note**: Here we use deployment url saved in published_model object. In next section, we show how to retrive deployment url from Watson Machine Learning instance.
```
deployment_uid = client.deployments.get_uid(deployment_details)
```
Now, You can list all deployments.
```
client.deployments.list()
```
### 4.2: Get deployment details
```
client.deployments.get_details(deployment_uid)
```
<a id="job"></a>
## 5. Create job
You can create job to web-service deployment using `create_job` method.
Prepare test data
```
import pandas as pd
diet_food = pd.DataFrame([["Roasted Chicken", 0.84, 0, 10],
["Spaghetti W/ Sauce", 0.78, 0, 10],
["Tomato,Red,Ripe,Raw", 0.27, 0, 10],
["Apple,Raw,W/Skin", 0.24, 0, 10],
["Grapes", 0.32, 0, 10],
["Chocolate Chip Cookies", 0.03, 0, 10],
["Lowfat Milk", 0.23, 0, 10],
["Raisin Brn", 0.34, 0, 10],
["Hotdog", 0.31, 0, 10]], columns=["name", "unit_cost", "qmin", "qmax"])
diet_food_nutrients = pd.DataFrame([
["Spaghetti W/ Sauce", 358.2, 80.2, 2.3, 3055.2, 11.6, 58.3, 8.2],
["Roasted Chicken", 277.4, 21.9, 1.8, 77.4, 0, 0, 42.2],
["Tomato,Red,Ripe,Raw", 25.8, 6.2, 0.6, 766.3, 1.4, 5.7, 1],
["Apple,Raw,W/Skin", 81.4, 9.7, 0.2, 73.1, 3.7, 21, 0.3],
["Grapes", 15.1, 3.4, 0.1, 24, 0.2, 4.1, 0.2],
["Chocolate Chip Cookies", 78.1, 6.2, 0.4, 101.8, 0, 9.3, 0.9],
["Lowfat Milk", 121.2, 296.7, 0.1, 500.2, 0, 11.7, 8.1],
["Raisin Brn", 115.1, 12.9, 16.8, 1250.2, 4, 27.9, 4],
["Hotdog", 242.1, 23.5, 2.3, 0, 0, 18, 10.4]
], columns=["Food", "Calories", "Calcium", "Iron", "Vit_A", "Dietary_Fiber", "Carbohydrates", "Protein"])
diet_nutrients = pd.DataFrame([
["Calories", 2000, 2500],
["Calcium", 800, 1600],
["Iron", 10, 30],
["Vit_A", 5000, 50000],
["Dietary_Fiber", 25, 100],
["Carbohydrates", 0, 300],
["Protein", 50, 100]
], columns=["name", "qmin", "qmax"])
job_payload_ref = {
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA: [
{
"id": "diet_food.csv",
"values": diet_food
},
{
"id": "diet_food_nutrients.csv",
"values": diet_food_nutrients
},
{
"id": "diet_nutrients.csv",
"values": diet_nutrients
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA: [
{
"id": ".*.csv"
}
]
}
```
Create job using Watson Machine Learning client
```
job = client.deployments.create_job(deployment_uid, meta_props=job_payload_ref)
```
Checking created job status and calculated KPI.
```
import time
job_id = client.deployments.get_job_uid(job)
elapsed_time = 0
while client.deployments.get_job_status(job_id).get('state') != 'completed' and elapsed_time < 300:
elapsed_time += 10
time.sleep(10)
if client.deployments.get_job_status(job_id).get('state') == 'completed':
job_details_do = client.deployments.get_job_details(job_id)
kpi = job_details_do['entity']['decision_optimization']['solve_state']['details']['KPI.Total Calories']
print(f"KPI: {kpi}")
else:
print("Job hasn't completed successfully in 5 minutes.")
```
<a id="cleanup"></a>
## 6. Clean up
If you want to clean up all created assets:
- experiments
- trainings
- pipelines
- model definitions
- models
- functions
- deployments
please follow up this sample [notebook](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd3.5/notebooks/python_sdk/instance-management/Machine%20Learning%20artifacts%20management.ipynb).
<a id="summary"></a>
## 7. Summary and next steps
You successfully completed this notebook! You learned how to use DO as well as Watson Machine Learning for model creation and deployment.
Check out our _[Online Documentation](https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html)_ for more samples, tutorials, documentation, how-tos, and blog posts.
### Authors
**Wojciech Jargielo**, Software Engineer
Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
| github_jupyter |
# Kernel Derivatives
**Linear Operators and Stochastic Partial Differential Equations in GPR** - Simo Särkkä - [PDF](https://users.aalto.fi/~ssarkka/pub/spde.pdf)
> Expresses derivatives of GPs as operators
[**Demo Colab Notebook**](https://colab.research.google.com/drive/1pbb0qlypJCqPTN_cu2GEkkKLNXCYO9F2)
He looks at ths special case where we have a GP with a mean function zero and a covariance matrix $K$ defined as:
$$
\mathbb{E}[f(\mathbf{x})f^\top(\mathbf{x'})] = K_{ff}(\mathbf{x,x'})
$$
So in GP terminology:
$$
f(\mathbf(x)) \sim \mathcal{GP}(\mathbf{0}, K_{ff}(\mathbf{x,x'}))
$$
We use the rulse for linear transformations of GPs to obtain the different transformations of the kernel matrix.
Let's define the notation for the derivative of a kernel matrix. Let $g(\cdot)$ be the derivative operator on a function $f(\cdot)$. So:
$$
g(\mathbf{x}) = \mathcal{L}_x f(\mathbf{x})
$$
So now, we want to define the cross operators between the derivative $g(\cdot)$ and the function $f(\cdot)$.
**Example**: He draws a distinction between the two operators with an example of how this works in practice. So let's take the linear operator $\mathcal{L}_{x}=(1, \frac{\partial}{\partial x})$. This operator:
* acts on a scalar GP $f(x)$
* a scalar input $x$
* a covariance function $k_{ff}(x,x')$
* outputs a scalar value $y$
We can get the following transformations:
$$
\begin{aligned}
K_{gf}(\mathbf{x,x'})
&= \mathcal{L}_x f(\mathbf{x}) f(\mathbf{x}) = \mathcal{L}_xK_{ff}(\mathbf{x,x'}) \\
K_{fg}(\mathbf{x,x'})
&= f(\mathbf{x}) f(\mathbf{x'}) \mathcal{L}_{x'} = K_{ff}(\mathbf{x,x'})\mathcal{L}_{x'} \\
K_{gg}(\mathbf{x,x'})
&= \mathcal{L}_x f(\mathbf{x}) f(\mathbf{x'}) \mathcal{L}_{x'}
= \mathcal{L}_xK_{ff}(\mathbf{x,x'})\mathcal{L}_{x'}^\top \\
\end{aligned}
$$
```
#@title Packages
import functools
import jax
import jax.numpy as jnp
import numpy as onp
from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn
# Plotting libraries
import matplotlib.pyplot as plt
plt.style.use(['seaborn-paper'])
#@title Plot Functions
def plot_kernel_mat(K):
# plot
plt.figure()
plt.imshow(K, cmap='Reds')
plt.title(r'$K_{ff}$, (rbf)', fontsize=20, weight='bold')
plt.tight_layout()
plt.show()
#@title Data
def get_1d_data(N=30, sigma_inputs=0.15, sigma_obs=0.15, N_test=400):
onp.random.seed(0)
X = jnp.linspace(-10, 10, N)
# Y = X + 0.2 * np.power(X, 3.0) + 0.5 * np.power(0.5 + X, 2.0) * np.sin(4.0 * X)
Y = jnp.sin(1.0 * jnp.pi / 1.6 * jnp.cos(5 + .5 * X))
Y += sigma_obs * onp.random.randn(N)
X += sigma_inputs * onp.random.randn(N)
Y -= jnp.mean(Y)
Y /= jnp.std(Y)
X_test = jnp.linspace(-11, 11, N_test)
X_test += sigma_inputs * onp.random.randn(N_test)
X = X[:, None]
X_test = X[:, None]
assert X.shape == (N,1)
assert Y.shape == (N,)
return X, Y, X_test
def get_2d_data(N=30, sigma_obs=0.15, N_test=400):
onp.random.seed(0)
X1 = jnp.linspace(-10, 10, N)
X2 = jnp.linspace(-5, 2, N)
# Y = X + 0.2 * np.power(X, 3.0) + 0.5 * np.power(0.5 + X, 2.0) * np.sin(4.0 * X)
Y = jnp.sin(1.0 * jnp.pi / 1.6 * jnp.cos(5 + .5 * X1)) + jnp.exp(X2)
Y += sigma_obs * onp.random.randn(N)
Y -= jnp.mean(Y)
Y /= jnp.std(Y)
X1_test = jnp.linspace(-11, 11, N_test)
X2_test = jnp.linspace(-6, 4, N_test)
X = jnp.vstack((X1,X2)).T
X_test = jnp.vstack((X1_test,X2_test)).T
assert X.shape == (N,2)
assert Y.shape == (N,)
return X, Y, X_test
# Get Data
X, Y, X_test = get_1d_data(100, sigma_inputs=0.0, sigma_obs=0.1, N_test=100)
```
## Kernel Function
$$
\text{dist} = \sum_{i=1}^D (\mathbf{x_i} - \mathbf{y_i})^2
$$
```
#@title Kernel Functions
# Squared Euclidean Distance Formula
@jax.jit
def sqeuclidean_distance(x, y):
return jnp.sum((x-y)**2)
# RBF Kernel
@jax.jit
def rbf_kernel(params, x, y):
return jnp.exp( - params['gamma'] * sqeuclidean_distance(x, y))
# Covariance Matrix
def covariance_matrix(kernel_func, x, y):
mapx1 = jax.vmap(lambda x, y: kernel_func(x, y), in_axes=(0, None), out_axes=0)
mapx2 = jax.vmap(lambda x, y: mapx1(x, y), in_axes=(None, 0), out_axes=1)
return mapx2(x, y)
```
#### RBF Kernel
```
X.shape
X, Y, X_test = get_2d_data(10, sigma_obs=0.1)
test_X = X[:1, :]
test_Y = X[:1, :]
rbf_x_sk = rbf_sklearn(
onp.array(test_X.reshape(1, -1)),
onp.array(test_Y.reshape(1, -1)),
gamma=1.0
)
print(rbf_x_sk.shape, test_X.shape)
params = {'gamma': 1.0, 'var_f': 1.0}
gamma = 1.0
rbf_k_ = functools.partial(rbf_kernel, params)
rbf_x = rbf_k_(
test_X.squeeze(),
test_Y.squeeze()
)
onp.testing.assert_array_almost_equal(onp.array(rbf_x), rbf_x_sk)
```
### Kernel Matrix
We defined all of our functions above with only dimensions in mind, not the number of samples or the batch size. So we need to account for that. So if we wanted to calculate the kernel matrix, we would have to loop through all of the samples and calculate the products individually, which would take a long time; especially for large amounts of data.
> Avoid Loops at all cost in python...
Fortunately, Jax has this incredible function `vmap` which handles batching automatically at apparently, no extra cost. So we can write our functions to account for vectors without having to care about the batch size and then use the `vmap` function to essentially "vectorize" our functions. It essentially allows us to take a product between a matrix and a sample or two vectors of multiple samples. Let's go through an example of how we can construct our kernel matrix.
1. We need to map all points with one vector to another.
We're going to take a single sample from $X'$ and take the rbf kernel between it and all of $X$. So:
$$\text{vmap}_f(\mathbf{X}, \mathbf{x})$$
where $X\in \mathbb{R}^{N \times D}$ is a matrix and $\mathbf{x} \in \mathbb{R}^{D}$ is a vector.
```
# Gram Matrix
def gram(func, x, y):
return jax.vmap(lambda x1: jax.vmap(lambda y1: func(x1, y1))(y))(x)
# map function 1
mapx1 = jax.vmap(lambda x, y: rbf_kernel(params, x, y), in_axes=(0, None), out_axes=0)
# test the mapping
x1_mapped = mapx1(X, X[0, :])
# Check output shapes, # of dimensions
assert x1_mapped.shape[0] == X.shape[0]
assert jnp.ndim(x1_mapped) == 1
```
This that's good: we have an array of size $N$. So we've effectively mapped all points from one array to the other.
So now we can do another vector mapping which allows us to take all samples of $X'$ and map them against all samples of $X$. So it'll be a `vmap` of a `vmap`. Then we'll get the $N\times N$ kernel matrix.
```
mapx2 = jax.vmap(lambda x, y: mapx1(x, y), in_axes=(None, 0), out_axes=1)
K = mapx2(X, X)
# Check output shapes, # of dimensions
assert K.shape[0] == X.shape[0], X.shape[0]
assert jnp.ndim(K) == 2
rbf_x_sk = rbf_sklearn(X, X, 1.0)
onp.testing.assert_array_almost_equal(onp.array(rbf_x_sk), K)
```
So great! We now have our kernel matrix. Let's plot it and check to see if it matches the manually constructed kernel matrix.
Great! We have a vectorized kernel function and we were still able to construct our functions in terms of vectors only! This is nice for me personally because I've always struggled with understanding some of the coding when trying to deal with samples/batch-sizes. Most pseudo-code is written in vector format so paper $\rightarrow$ has always been a painful transition for me. So now, let's wrap this in a nice function so that we can finish "wrap up" this model.
```
X, Y, X_test = get_2d_data(10, sigma_obs=0.1)
test_X = X.copy()#[:2, :]
test_Y = X.copy() #[:2, :]
rbf_x_sk = rbf_sklearn(
onp.array(test_X),
onp.array(test_Y),
gamma=1.0
)
params = {'gamma': 1.0, 'var_f': 1.0}
rbf_k_ = functools.partial(rbf_kernel, params)
rbf_x = covariance_matrix(
rbf_k_,
test_X,
test_Y
)
onp.testing.assert_array_almost_equal(onp.array(rbf_x), rbf_x_sk)
plot_kernel_mat(rbf_x)
#@title Tests
kx = rbf_kernel(params, X[0], X[0])
# check, the output should be 1.0
assert kx == 1.0, f"Output: {kx}"
kx = rbf_kernel(params, X[0], X[1])
# check, the output should NOT be 1.0
assert kx != 1.0, f"Output: {kx}"
# dk_dx = drbf_kernel(gamma, X[0], X[0])
# # check, the output should be 0.0
# assert dk_dx == 0.0, f"Output: {dk_dx}"
# dk_dx = drbf_kernel(gamma, X[0], X[1])
# # check, the output should NOT be 0.0
# assert dk_dx != 0.0, f"Output: {dk_dx}"
#@title Speed Test
# Covariance Matrix
def covariance_matrix(kernel_func, x, y):
mapx1 = jax.vmap(lambda x, y: kernel_func(x, y), in_axes=(0, None), out_axes=0)
mapx2 = jax.vmap(lambda x, y: mapx1(x, y), in_axes=(None, 0), out_axes=1)
return mapx2(x, y)
def gram(func, x, y):
return jax.vmap(lambda x1: jax.vmap(lambda y1: func(x1, y1))(x))(y)
rbf_K = functools.partial(rbf_kernel, params)
rbf_cov = jax.jit(functools.partial(covariance_matrix, rbf_K))
rbf_x = rbf_cov(test_X, test_Y)
rbf_cov2 = jax.jit(functools.partial(gram, rbf_K))
rbf_x2 = rbf_cov2(test_X, test_Y)
onp.testing.assert_array_almost_equal(onp.array(rbf_x), onp.array(rbf_x2))
%timeit _ = rbf_cov(test_X, test_Y)
%timeit _ = rbf_cov2(test_X, test_Y)
```
Seems like they are comparable and there is no real difference.
## 1. Cross-Covariance Term - 1st Derivative
We can calculate the cross-covariance term $K_{fg}(\mathbf{x,x})$. We apply the following operation
$$
K_{fg}(x,x') = k_{ff}(\mathbf{x,x'})(1, \frac{\partial}{\partial x'})
$$
If we multiply the terms across, we get:
$$
K_{fg}(x,x') = k_{ff}(\mathbf{x,x'})\frac{\partial k_{ff}(\mathbf{x,x'})}{\partial x'}
$$
For the RBF Kernel, it's this:
$$\frac{\partial k(x,y)}{\partial x^j}=-2 \gamma (x^j - y^j) k(x,y)$$
Note: I did the derivations from scratch.
### Single Sample
```
X, Y, X_test = get_1d_data(10, sigma_obs=0.1)
test_X = X[0:1, :]
test_Y = X[1:2, :]
```
#### From Scratch
From scratch, we're going to be using loops. There are more efficient ways to implement this but it's harder to mess up loops and it's also clearer what's going on. Tricks with broadcasting are often hard to read and very hard to interpret because of the change in dimensions.
```
def drbf_kernel_scratch(gamma, X, Y):
# initialize matrix
dK_fg_ = onp.empty(X.shape[-1])
constant = - 2 * gamma
# calculate kernel matrix w. sklearn kernel
k_val = rbf_sklearn(onp.array(X), onp.array(Y), gamma=gamma)
# loop through features/dimensions
for idim in range(X.shape[1]):
x_val = X[:, idim] - Y[:, idim]
dK_fg_[idim] = constant * k_val * x_val
return dK_fg_
dK_fg_ = drbf_kernel_scratch(gamma, test_X, test_Y)
```
#### Jax
For Jax, we're going to use the built-in Jacobian function. **Note**: this function only allows us take the derivative of functions that output a scalar value.
```
rbf_kernel(params, test_X[0,:], test_Y[0,:])
drbf_kernel_fg
# define the cross operator K_fg(x, y), dK wrt x
drbf_kernel_fg = jax.jacobian(rbf_kernel, argnums=(1))
# calculate for a single sample
dK_fg = drbf_kernel_fg(params, test_X[0,:], test_Y[0,:])
# check theyre the same
assert dK_fg.all() == dK_fg_.all()
```
### Multiple Dimensions
Now, we have the same problem but for a vector $\mathbf{x}$ instead of a single sample $x$. In this example, $\mathbf{x}$ has 2 features, $\mathbf{x} \in \mathbb{R}^2$. We're still going to do it for a single sample.
```
# generate some data
X, Y, X_test = get_2d_data(10, sigma_obs=0.1)
# extract a single sample
test_X = X[0:1, :]
test_Y = X[1:2, :]
```
#### From Scratch
```
dK_fg_ = drbf_kernel_scratch(gamma, test_X, test_Y)
```
#### Jax
```
# define the cross operator K_fg(x, y), dK wrt x
drbf_kernel_fg = jax.jacobian(rbf_kernel, argnums=(1))
# calculate for a single sample
dK_fg = drbf_kernel_fg(params, test_X[0,:], test_Y[0,:])
# check theyre the same
assert dK_fg.all() == dK_fg_.all()
```
### Multiple Samples (Batches)
Now, we're going to input a matrix $\mathbf{X}$ which are stacked samples of multiple features. So $\mathbf{X} \in \mathbb{R}^{N\times D}$.
```
X, Y, X_test = get_2d_data(10, sigma_obs=0.1)
test_X = X
test_Y = X
```
#### From Scratch
```
dK_fg_ = onp.empty((test_X.shape[0], test_X.shape[0], test_X.shape[1]))
for i in range(test_X.shape[0]):
for j in range(test_Y.shape[0]):
dK_fg_[i, j, :] = drbf_kernel_scratch(gamma, onp.array(test_X[i, :]).reshape(1,-1), onp.array(test_Y[j, :]).reshape(1, -1))
```
#### Jax
```
# define the cross operator K_fg(x, y), dK wrt x
drbf_kernel_fg = jax.jacobian(rbf_kernel, argnums=(1))
K_func = functools.partial(drbf_kernel_fg, params)
# calculate kernel matrix
dK_fg = gram(K_func, test_X, test_Y)
# check
onp.testing.assert_array_almost_equal(onp.array(dK_fg), dK_fg_)
```
## 2. Cross-Covariance Term - 2nd Derivative
Recall the 1st derivative is:
$$\frac{\partial k(x,y)}{\partial x^j}=-2 \gamma (x^j - y^j) k(x,y)$$
So now we repeat. First we decompose the function using the product rule:
$$
\begin{aligned}
\frac{\partial^2 k(x,y)}{\partial x^{j^2}} &=
-2 \gamma (x^j - y^j) \frac{\partial }{\partial x^j} k(x,y) + k(x,y) \frac{\partial }{\partial x^j} \left[ -2 \gamma (x^j - y^j) \right]\\
\end{aligned}
$$
The first term is basically the 1st Derivative squared and the 2nd term is a constant. So after applying the derivative and simplifying, we get:
$$
\begin{aligned}
\frac{\partial^2 k(x,y)}{\partial x^{j^2}} &=
4 \gamma^2 (x^j - y^j)^2 k(x,y) -2 \gamma k(x,y)\\
&=
\left[ 4\gamma^2(x^j - y^j)^2 - 2\gamma\right] k(\mathbf{x}, \mathbf{y}) \\
&=
2 \gamma \left[ 2\gamma(x^j - y^j)^2 - 1\right] k(\mathbf{x}, \mathbf{y}) \\
\end{aligned}
$$
#### From Scratch
Recall, this is a Jacobian so we have
```
def d2rbf_kernel_scratch_jac(gamma, X, Y):
d2K_fg2_ = onp.empty(X.shape[-1])
constant = 2 * gamma
k_val = rbf_sklearn(onp.array(X), onp.array(Y), gamma=gamma)
for idim in range(X.shape[1]):
# compute the xterm: 2 gamma (xj - yj)^2
x_val = constant * (X[:, idim] - Y[:, idim]) ** 2 - 1
# compute the derivative term
d2K_fg2_[idim] = constant * x_val * k_val
return d2K_fg2_
# initialize matrix
d2K_fg2_ = onp.empty((test_X.shape[0], test_X.shape[0], test_X.shape[1]))
for i in range(test_X.shape[0]):
for j in range(test_Y.shape[0]):
d2K_fg2_[i, j, :] = d2rbf_kernel_scratch_jac(gamma, onp.array(test_X[i, :]).reshape(1,-1), onp.array(test_Y[j, :]).reshape(1, -1))
```
#### Jax
So with jax, we're computing the hessian so we'll get a matrix of size $N \times N \times D \times D$. So the 2nd derivative is just the diagonal terms of $D\times D$ part.
```
# define the cross operator K_fg(x, y), dK wrt x
dK_fg_func = jax.hessian(rbf_kernel, argnums=(1))
# fix params for kernel function
K_func = functools.partial(dK_fg_func, params)
# calculate kernel matrix
d2K_fg2 = covariance_matrix(K_func, test_X, test_Y)
# get the diagonal terms
d2K_fg2 = jnp.diagonal(d2K_fg2, axis1=2, axis2=3)
d2K_fg2.shape
onp.testing.assert_array_almost_equal(onp.array(d2K_fg2), d2K_fg2_)
```
Awesome, they're the same! So that gives me hope!
## 3. Cross-Covariance Term - 2nd Derivative (Partial Derivatives)
Recall the 1st derivative is:
$$\frac{\partial k(x,y)}{\partial x^j}=-2 \gamma (x^j - y^j) k(x,y)$$
So now we repeat. First we decompose the function using the product rule. But this time, we need to do the product rule first w.r.t. $x^j$ and then w.r.t. $y^k$.
$$
\begin{aligned}
\frac{\partial^2 k(x,y)}{\partial x^j y^k} &=
-2 \gamma (x^j - y^j) \frac{\partial }{\partial y^k} k(x,y) + k(x,y) \frac{\partial }{\partial y^k} \left[ -2 \gamma (x^j - y^j) \right]\\
\end{aligned}
$$
So now let's start expanding and collapsing terms:
$$
\begin{aligned}
\frac{\partial^2 k(x,y)}{\partial x^j y^k} &=
4 \gamma^2 (x^j - y^j)(x^k - y^k) k(x,y) \\
\end{aligned}
$$
The second term should go to zero and the first term is the same except it has different dimensions (w.r.t. $y$ instead of $x$).
$$
\frac{\partial^2 k(x,y)}{\partial x^j \partial y^k} =
4 \gamma^2 (x^k - y^k)(x^j - y^j) k(\mathbf{x}, \mathbf{y})
$$
#### From Scratch
```
def d2rbf_kernel_scratch_hessian(gamma, X, Y):
d2K_fg2_ = onp.empty((X.shape[-1], X.shape[-1]))
constant = 2 * gamma
constant_sq = constant ** 2
k_val = rbf_sklearn(onp.array(X), onp.array(Y), gamma=gamma)
for idim in range(X.shape[1]):
for jdim in range(X.shape[1]):
# x_val = constant * (1 - constant * (X[:, idim] - Y[:, idim]) * (X[:, jdim] - Y[:, jdim]))# - constant
x_val = constant_sq * (X[:, idim] - Y[:, idim]) * (X[:, jdim] - Y[:, jdim])
d2K_fg2_[idim, jdim] = k_val * x_val
return d2K_fg2_
d2K_fg2_ = onp.empty((test_X.shape[0], test_X.shape[0], test_X.shape[1], test_X.shape[1]))
for i in range(test_X.shape[0]):
for j in range(test_Y.shape[0]):
d2K_fg2_[i, j, ...] = d2rbf_kernel_scratch_hessian(gamma, onp.array(test_X[i, :]).reshape(1,-1), onp.array(test_Y[j, :]).reshape(1, -1))
```
#### Jax
```
# define the cross operator K_fg(x, y), dK wrt x
dK_fg_func = jax.hessian(rbf_kernel, argnums=(1))
K_func = functools.partial(dK_fg_func, params)
d2K_fg2 = covariance_matrix(K_func, test_X, test_Y)
d2K_fg2.shape
onp.testing.assert_array_almost_equal(onp.array(onp.diagonal(d2K_fg2, axis1=2, axis2=3 )), jnp.diagonal(d2K_fg2, axis1=2, axis2=3))
```
So this is good. The diagonal terms are correct but the off-diagonal entries are incorrect. I'm not entirely sure how to fix this. I can't point to the part of the equation where you would actually calculate the off-diagonal entries
```
onp.testing.assert_array_almost_equal(onp.array(d2K_fg2), d2K_fg2_, decimal=4)
```
| github_jupyter |
```
# %%
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.animation import FuncAnimation
from scipy.stats import bernoulli
from svgpathtools import svg2paths
from svgpath2mpl import parse_path
# matplotlib parameters to ensure correctness of Chinese characters
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif']=['Arial Unicode MS', 'SimHei'] # Chinese font
plt.rcParams['axes.unicode_minus']=False # correct minus sign
plt.rcParams["font.size"] = 20
plt.rcParams["xtick.labelsize"] = 24
plt.rcParams["ytick.labelsize"] = 24
np.random.seed(1901)
xx, yy = np.meshgrid(np.arange(100), np.arange(100)) #(100,100)
indices = np.random.choice(np.arange(100000), 5, replace=False) #从给定的1维数组中随机采样的函数
mask = np.zeros(100000).astype(bool)
# mask[indices] = True
# fig, ax = plt.subplots(1,1, figsize=(10,10), dpi=150)
# ax.scatter(xx.flatten()[mask],yy.flatten()[mask], s=40, color='k', marker='x' )
# ax.scatter(xx.flatten()[~mask],yy.flatten()[~mask],s=20, color='k', )
# ax.set_xlim(-1,100)
# ax.set_ylim(-1,100)
# %%
rvar = np.random.rand(mask.shape[0])
test_res = np.zeros_like(mask)
for i in range(mask.shape[0]):
test_res[i] = rvar[i] >= 0.9995 # 索赔车辆
test_res[77]=True
test_res[333]=True
sum(test_res==True)
def gen_marker(fname):
person_path, attributes = svg2paths(fname)
person_marker = parse_path(attributes[0]['d'])
person_marker.vertices -= person_marker.vertices.mean(axis=0)
person_marker = person_marker.transformed(mpl.transforms.Affine2D().rotate_deg(180))
person_marker = person_marker.transformed(mpl.transforms.Affine2D().scale(-1,1))
return person_marker
car_marker = gen_marker('figures/car.svg')
xx, yy = np.meshgrid(np.arange(4), np.arange(4))
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.scatter(xx.flatten(),yy.flatten(), s=2000, color='k', marker=car_marker )
ax.axis('off')
class UpdateDist:
def __init__(self, ax0, ax, ax1, patient_mask):
self.success = 0
self.ax0 = ax0
xn, yn = 40, 10
xx, yy = np.meshgrid(np.arange(xn), np.arange(yn))
# indices = np.random.choice(np.arange(int(xn*yn)), int(xn*yn/100), replace=False)
self.mask_plt = patient_mask[:int(xn*yn)]
#self.sc_patient = ax0.scatter(xx.flatten()[self.mask_plt],yy.flatten()[self.mask_plt], s=2000, facecolor=[1,0,0,1], marker=patient_marker)
self.sc_car = ax0.scatter(xx.flatten()[~self.mask_plt],yy.flatten()[~self.mask_plt],s=2000, facecolor=[0,32./255,96./255,1], marker=car_marker)
self.color = np.tile([0,32./255,96./255,1],(int(xn*yn),1))
self.color[self.mask_plt,:] = [1,0,0,1]
self.ax0.set_xlim(-1,xn)
self.ax0.set_ylim(-1,yn)
self.ax0.invert_yaxis()
self.rects = ax.barh([1,2], [0,0], ) #条形图
for rec, color in zip(self.rects, ( [228./255,131./255,18./255,1], [0,176./255,80./255,1] )):
rec.set_color(color)
self.ax = ax
self.ax.set_yticks([1,2])
self.ax.set_yticklabels(["", ""])
self.ax.set_xlabel("投保车辆数对数"+r'$log_{10}(x)$', fontsize=30)
# self.ax.set_yticklabels(["检测阳性新冠患者","检测阳性健康人群", "检测阴性"], fontsize=20)
# self.ax.text(-0.130, 0.17, "新冠患者", transform=self.ax.transAxes, fontsize=25, color='r', )
# self.ax.text(-0.130, 0.27, "检测阳性", transform=self.ax.transAxes, fontsize=25, color=[228./255,131./255,18./255,1], )
# self.ax.text(-0.130, 0.43, "健康人群", transform=self.ax.transAxes, fontsize=25, color=[0,32./255,96./255,1], )
self.ax.text(-0.170, 0.21, "索赔车辆数", transform=self.ax.transAxes, fontsize=25, color=[228./255,131./255,18./255,1], )
self.ax.text(-0.190, 0.47, "未索赔车辆数", transform=self.ax.transAxes, fontsize=25, color=[0,176./255,80./255,1], )
# self.ax.set_yticklabels(["True Positive","False Positive", "Negative"], fontsize=20)
# self.ax.set_xlabel("Number of people", fontsize=20)
# Set up plot parameters
self.ax.set_ylim(0, 4)
self.ax.set_xlim(0, 6)
self.ax.spines['top'].set_visible(False)
self.ax.spines['right'].set_visible(False)
self.line, =ax1.plot([],[])
self.ax1 = ax1
self.ax1.set_xlim([0,100000])
self.ax1.set_ylim([-200,200])
self.ax1.set_ylabel("盈利金额", fontsize=25)
self.ax1.set_xlabel("投保车辆", fontsize=30)
# self.ax1.set_xlabel('Number of people tested', fontsize=20)
# self.ax1.set_ylabel('Test accuracy', fontsize=20)
self.ax1.spines['top'].set_visible(False)
self.ax1.spines['right'].set_visible(False)
#self.ax1.axhline(175.0, linestyle='--', color='black')#0.01*0.9/(0.01*0.9+0.99*0.09)=0.0917
# This vertical line represents the theoretical value, to
# which the plotted distribution should converge.
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i == 0:
self.success = 0
for rect, h in zip(self.rects, [0,0]):
rect.set_width(h)
self.line, = self.ax1.plot([], [], lw=5, color='r')
return self.rects
# Choose success based on exceed a threshold with a uniform pick
# if np.random.rand(1,) < self.prob:
# self.success += 1
# y = beta_pdf(self.x, self.success + 1, (i - self.success) + 1)
# self.line.set_data(self.x, y)
if i <= 100:
self.ax.set_xlim([0,6])
n_inc = 4
# update histogram
negative = np.sum(~test_res[:n_inc*(i+1)]) #未索赔车辆数
positive = np.sum(test_res[:n_inc*(i+1)]) #索赔车辆数
for rect, h in zip(self.rects, [math.log(positive+1,10),math.log(negative+1,10)]):
rect.set_width(h)
# update curve
self.ax1.set_xlim([0,500])
self.ax1.set_ylim([-50000,100000])
self.ax1.plot([0,500], [0,175*500], lw=3, ls="--",color='black')
xdata, ydata = self.line.get_data()
if len(xdata) == 0:
xdata = [0]
ydata = [(negative*200.0-50000.0*positive)]
else:
xdata = np.append(xdata, xdata[-1]+n_inc)
ydata = np.append(ydata,(negative*200.0-50000.0*positive) )
self.line.set_data(xdata, ydata)
else:
self.ax.set_xlim([0,6])
self.ax1.set_xlim([0,100000])
self.ax1.set_ylim([0,200*1000*i])
self.ax1.plot([0,100000], [0,175*100000], lw=3, ls="--",color='black')
n_inc = 1000
# update histogram
negative = np.sum(~test_res[:401+n_inc*(i-99)])
positive = np.sum(test_res[:401+n_inc*(i-99)])
for rect, h in zip(self.rects, [math.log(positive,10),math.log(negative,10)]):
rect.set_width(h)
# update curve
xdata, ydata = self.line.get_data()
xdata = np.append(xdata, xdata[-1]+n_inc)
ydata = np.append(ydata, (negative*200.0-50000.0*positive))
self.line.set_data(xdata, ydata)
# update scatter facecolor
if i <= 100:
n_inc = 4
for j in range(n_inc):
# idx = i-1
idx = (i-1)*n_inc+j
self.color[idx,:] = [228./255,131./255,18./255,1] if test_res[idx] else [0,176./255,80./255,1]
self.sc_car.set_facecolor(self.color)
return self.rects
fig = plt.figure(figsize=(30,17),dpi=100)
spec1 = gridspec.GridSpec(ncols=1, nrows=1, left=0.04, right=0.96, top=0.98, bottom=0.38, figure=fig)
ax0 = fig.add_subplot(spec1[0])
ax0.axis('off')
spec2 = gridspec.GridSpec(ncols=2, nrows=1, left=0.08, right=0.92, top=0.32, bottom=0.08, wspace=0.15, figure=fig)
ax1 = fig.add_subplot(spec2[0])
ax2 = fig.add_subplot(spec2[1])
ud = UpdateDist(ax0, ax1, ax2, mask)
anim = FuncAnimation(fig, ud, frames=198, blit=True)
anim.save('car_movie3.mp4', fps=10, dpi=200, codec='libx264', bitrate=-1, extra_args=['-pix_fmt', 'yuv420p'])
```
| github_jupyter |
(sec:hmm-ex)=
# Hidden Markov Models
In this section, we introduce Hidden Markov Models (HMMs).
## Boilerplate
```
# Install necessary libraries
try:
import jax
except:
# For cuda version, see https://github.com/google/jax#installation
%pip install --upgrade "jax[cpu]"
import jax
try:
import jsl
except:
%pip install git+https://github.com/probml/jsl
import jsl
try:
import rich
except:
%pip install rich
import rich
# Import standard libraries
import abc
from dataclasses import dataclass
import functools
import itertools
from typing import Any, Callable, NamedTuple, Optional, Union, Tuple
import matplotlib.pyplot as plt
import numpy as np
import jax
import jax.numpy as jnp
from jax import lax, vmap, jit, grad
from jax.scipy.special import logit
from jax.nn import softmax
from functools import partial
from jax.random import PRNGKey, split
import inspect
import inspect as py_inspect
from rich import inspect as r_inspect
from rich import print as r_print
def print_source(fname):
r_print(py_inspect.getsource(fname))
```
## Utility code
```
def normalize(u, axis=0, eps=1e-15):
'''
Normalizes the values within the axis in a way that they sum up to 1.
Parameters
----------
u : array
axis : int
eps : float
Threshold for the alpha values
Returns
-------
* array
Normalized version of the given matrix
* array(seq_len, n_hidden) :
The values of the normalizer
'''
u = jnp.where(u == 0, 0, jnp.where(u < eps, eps, u))
c = u.sum(axis=axis)
c = jnp.where(c == 0, 1, c)
return u / c, c
```
(sec:casino-ex)=
## Example: Casino HMM
We first create the "Ocassionally dishonest casino" model from {cite}`Durbin98`.
```{figure} /figures/casino.png
:scale: 50%
:name: casino-fig
Illustration of the casino HMM.
```
There are 2 hidden states, each of which emit 6 possible observations.
```
# state transition matrix
A = np.array([
[0.95, 0.05],
[0.10, 0.90]
])
# observation matrix
B = np.array([
[1/6, 1/6, 1/6, 1/6, 1/6, 1/6], # fair die
[1/10, 1/10, 1/10, 1/10, 1/10, 5/10] # loaded die
])
pi, _ = normalize(np.array([1, 1]))
pi = np.array(pi)
(nstates, nobs) = np.shape(B)
```
Let's make a little data structure to store all the parameters.
We use NamedTuple rather than dataclass, since we assume these are immutable.
(Also, standard python dataclass does not work well with JAX, which requires parameters to be
pytrees, as discussed in https://github.com/google/jax/issues/2371).
```
class HMM(NamedTuple):
trans_mat: jnp.array # A : (n_states, n_states)
obs_mat: jnp.array # B : (n_states, n_obs)
init_dist: jnp.array # pi : (n_states)
params = HMM(A, B, pi)
print(params)
print(type(params.trans_mat))
```
## Sampling from the joint
Let's write code to sample from this model. First we code it in numpy using a for loop. Then we rewrite it to use jax.lax.scan, which is faster.
```
def hmm_sample_numpy(params, seq_len, random_state=0):
def sample_one_step_(hist, a, p):
x_t = np.random.choice(a=a, p=p)
return np.append(hist, [x_t]), x_t
np.random.seed(random_state)
trans_mat, obs_mat, init_dist = params.trans_mat, params.obs_mat, params.init_dist
n_states, n_obs = obs_mat.shape
state_seq = np.array([], dtype=int)
obs_seq = np.array([], dtype=int)
latent_states = np.arange(n_states)
obs_states = np.arange(n_obs)
state_seq, zt = sample_one_step_(state_seq, latent_states, init_dist)
obs_seq, xt = sample_one_step_(obs_seq, obs_states, obs_mat[zt])
for _ in range(1, seq_len):
state_seq, zt = sample_one_step_(state_seq, latent_states, trans_mat[zt])
obs_seq, xt = sample_one_step_(obs_seq, obs_states, obs_mat[zt])
return state_seq, obs_seq
seq_len = 20
state_seq, obs_seq = hmm_sample_numpy(params, seq_len, random_state=0)
print(state_seq)
print(obs_seq)
```
Now let's write a JAX version.
```
#@partial(jit, static_argnums=(1,))
def hmm_sample(params, seq_len, rng_key):
trans_mat, obs_mat, init_dist = params.trans_mat, params.obs_mat, params.init_dist
n_states, n_obs = obs_mat.shape
initial_state = jax.random.categorical(rng_key, logits=logit(init_dist), shape=(1,))
obs_states = jnp.arange(n_obs)
def draw_state(prev_state, key):
logits = logit(trans_mat[:, prev_state])
state = jax.random.categorical(key, logits=logits.flatten(), shape=(1,))
return state, state
rng_key, rng_state, rng_obs = jax.random.split(rng_key, 3)
keys = jax.random.split(rng_state, seq_len - 1)
final_state, states = jax.lax.scan(draw_state, initial_state, keys)
state_seq = jnp.append(jnp.array([initial_state]), states)
def draw_obs(z, key):
obs = jax.random.choice(key, a=obs_states, p=obs_mat[z])
return obs
keys = jax.random.split(rng_obs, seq_len)
obs_seq = jax.vmap(draw_obs, in_axes=(0, 0))(state_seq, keys)
return state_seq, obs_seq
seq_len = 20
state_seq, obs_seq = hmm_sample(params, seq_len, PRNGKey(1))
print(state_seq)
print(obs_seq)
```
| github_jupyter |
```
#default_exp config
#hide
%load_ext autoreload
%autoreload 2
%load_ext line_profiler
#export
import torch
import datetime
import warnings
#hide
from fastcore.test import test_fail
```
# Config
Here we define a class `Config` to hold hyperparameters and global variables.
Design from https://github.com/cswinter/DeepCodeCraft/blob/master/hyper_params.py
```
#export
class Config:
def __init__(self):
"""Set up default parameters"""
### Models and datasets
# PP options
# 1. tuner007/pegasus_paraphrase (2.12 GB)
# 2. prithivida/parrot_paraphraser_on_T5 (850 MB)
# 3. ramsrigouthamg/t5-large-paraphraser-diverse-high-quality (2.75 GB)
self.pp_name = "prithivida/parrot_paraphraser_on_T5"
self.dataset_name = "rotten_tomatoes"
# STS options
# 1. sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
# 2. sentence-transformers/paraphrase-MiniLM-L12-v2
self.sts_name = "sentence-transformers/paraphrase-MiniLM-L12-v2"
# NLI options
# 1. microsoft/deberta-base-mnli (~512 MB)
# 2. howey/electra-small-mnli
self.nli_name = "howey/electra-small-mnli"
self._select_vm_model()
### Important parameters
self.seed = 420
self.use_small_ds = False
self.sampling_strategy = "sample" # "sample" or "greedy"
self.lr = 4e-5
self.reward_fn = "reward_fn_contradiction_and_letter_diff"
self.reward_clip_max = 3
self.reward_clip_min = 0
self.reward_base = 0
self.reward_vm_multiplier = 12
self.sts_threshold = 0.6
self.contradiction_threshold = 0.2
self.pp_letter_diff_threshold = 30
self.reward_penalty_type = "ref_logp" # "kl_div" or "ref_logp"
self.kl_coef = 0.2 # only used if reward_penalty_type == "kl_div"
self.ref_logp_coef = 0.05 # only used if reward_penalty_type == "ref_logp"
self.pp = {
"do_sample": False if self.sampling_strategy == "greedy" else True,
"min_length": 4,
"max_length": 48,
"temperature": 0.7,
"top_p": 0.98,
"length_penalty" : 1.,
"repetition_penalty": 1.
}
# Other parameters (usually left untouched)
self.orig_max_length = 32 # longest for pegasus is 60, longest for Parrot is 32
self.pin_memory = True
self.zero_grad_with_none = False
self.pad_token_embeddings = False
self.embedding_padding_multiple = 8
self.orig_padding_multiple = 8 # pad input to multiple of this
self.bucket_by_length = True
self.shuffle_train = False
self.remove_misclassified_examples = True
self.remove_long_orig_examples = True
self.unfreeze_last_n_layers = "all" #counting from the back. set to "all" to do no layer freezing, else set to an int
### Used for testing
self.n_shards = None
self.shard_contiguous = None
### Logging parameters
self.save_model_while_training = False
self.save_model_freq = 10
### W&B parameters
self.wandb = dict(
project = "travis_attack",
entity = "uts_nlp",
mode = "disabled", # set to "disabled" to turn off wandb, "online" to enable it
log_grads = False,
log_grads_freq = 1, # no effect if wandb_log_grads is False
log_token_entropy = True,
log_token_probabilities = True,
run_notes = f""
)
### Devices and GPU settings
#### TODO: do you need this with accelerator? does this handle the post-processing analytics too?
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.devicenum = torch.cuda.current_device() if self.device.type == 'cuda' else -1
# When not using Accelerator
#n_wkrs = 4 * torch.cuda.device_count()
# When using Accelerator
self.n_wkrs = 0
## Globals
self.splits = ['train', 'valid', 'test']
self.metrics = [ 'loss', 'pp_logp', 'ref_logp', 'kl_div', 'reward_with_penalty', 'reward', 'vm_score', "sts_score", 'label_flip', 'contradiction_score', 'pp_letter_diff']
self.path_data = "./data/"
self.path_checkpoints = "../model_checkpoints/travis_attack/"
self.path_run = None # keep as None; this is automatically filled out by Trainer class
self.path_data_cache = "/data/tproth/.cache/huggingface/datasets/"
self.path_logs = f"./logs/"
self.path_logfile = self.path_logs + f"run_{datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.txt"
# Adjust config depending on dataset.
if self.dataset_name == "simple": self.adjust_config_for_simple_dataset()
elif self.dataset_name == "rotten_tomatoes": self.adjust_config_for_rotten_tomatoes_dataset()
elif self.dataset_name == "financial": self.adjust_config_for_financial_dataset()
# Checks
self._validate_n_epochs()
def _select_vm_model(self):
if self.dataset_name in ["rotten_tomatoes", "simple"]: self.vm_name = "textattack/distilbert-base-uncased-rotten-tomatoes"
elif self.dataset_name == "financial": self.vm_name = "mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis"
def adjust_config_for_simple_dataset(self):
"""Adjust config for the simple dataset."""
self.dataset_name = "simple"
self.orig_cname = "text"
self.label_cname = 'label'
self.batch_size_train = 4
self.batch_size_eval = 4
self.acc_steps = 2
self.n_train_epochs = 10
self.eval_freq = 5
self._select_vm_model()
return self
def adjust_config_for_rotten_tomatoes_dataset(self):
"""Adjust config for the rotten_tomatoes dataset."""
self.dataset_name = "rotten_tomatoes"
self.orig_cname = "text"
self.label_cname = 'label'
self.batch_size_train = 32
self.batch_size_eval = 32
self.acc_steps = 2
self.n_train_epochs = 10
self.eval_freq = 1
self._select_vm_model()
return self
def adjust_config_for_financial_dataset(self):
"""Adjust config for the financial dataset."""
self.dataset_name = "financial"
self.orig_cname = "sentence"
self.label_cname = 'label'
self.batch_size_train = 16
self.batch_size_eval = 32
self.acc_steps = 2
self.n_train_epochs = 4
self.eval_freq = 1
self._select_vm_model()
return self
def small_ds(self):
"""Adjust the config to use a small dataset (for testing purposes).
Not possible when using the simple dataset. """
if self.dataset_name == "simple":
raise Exception("Don't shard when using the simple dataset (no need)")
self.use_small_ds = True # for testing purposes
self.n_shards = 100
self.shard_contiguous = False
return self
def _validate_n_epochs(self):
if self.n_train_epochs % self.eval_freq != 0:
raise Exception("Set n_train_epochs to a multiple of eval_freq so there are no leftover epochs.")
def using_t5(self):
return self.pp_name in ["prithivida/parrot_paraphraser_on_T5", "ramsrigouthamg/t5-large-paraphraser-diverse-high-quality"]
#hide
from nbdev.export import notebook2script
notebook2script()
```
## Usage
### Basics
The easiest way is to edit the variables in the config object as you please and then initialise the config object. This will first initialise a set of default values as specified in `__init__()`. Next it calls the methods `adjust_config_for_simple_dataset()` or `adjust_config_for_rotten_tomatoes_dataset()` to overwrite some of these defaults with dataset-specific variables.
Once ready, call `cfg = Config()` and access values as attributes of `cfg`. For example:
```
cfg = Config()
print("Dataset name: ", cfg.dataset_name)
print("Number of train epochs: ", cfg.n_train_epochs)
print("Batch size for train?: ", cfg.batch_size_train)
print("Max paraphrase length?: ", cfg.pp['max_length'])
```
You can also manually specify which dataset to use by calling the `adjust_config_...` functions yourself. This is useful for writing test cases.
```
cfg = Config().adjust_config_for_simple_dataset()
print("Dataset name: ", cfg.dataset_name)
print("Number of train epochs: ", cfg.n_train_epochs)
print("Batch size for train?: ", cfg.batch_size_train)
print("Max paraphrase length?: ", cfg.pp['max_length'])
cfg = Config().adjust_config_for_rotten_tomatoes_dataset()
print("Dataset name: ", cfg.dataset_name)
print("Number of train epochs: ", cfg.n_train_epochs)
print("Batch size for train?: ", cfg.batch_size_train)
print("Max paraphrase length?: ", cfg.pp['max_length'])
```
You can use `vars(cfg)` to get all parameters as a dict:
```
from pprint import pprint
pprint(vars(cfg))
```
### Using a small dataset for testing
If you want to do testing on a small dataset you can chain on `use_small_ds()` to adjust the config accordingly.
```
cfg = Config().adjust_config_for_rotten_tomatoes_dataset().small_ds()
print("Dataset name: ", cfg.dataset_name)
print("Number of train epochs: ", cfg.n_train_epochs)
print("Batch size for train?: ", cfg.batch_size_train)
print("Max paraphrase length?: ", cfg.pp['max_length'])
print("Using small dataset?", cfg.use_small_ds)
print("How many shards?", cfg.n_shards)
```
This functionality is disabled for the simple dataset because we only have 4 data points for each split.
```
test_fail(Config().adjust_config_for_simple_dataset().adjust_config_for_simple_dataset().small_ds)
```
| github_jupyter |
ERROR: type should be string, got "https://docs.python.org/2/library/stdtypes.html\n\n5.1. Truth Value Testing\nAny object can be tested for truth value, for use in an if or while condition or as operand of the Boolean operations below. The following values are considered false:\n\n* None\n\n* False\n\n* zero of any numeric type, for example, 0, 0L, 0.0, 0j.\n\n* any empty sequence, for example, '', (), [].\n\n* any empty mapping, for example, {}.\n\n* instances of user-defined classes, if the class defines a __nonzero__() or __len__() method, when that method returns the integer zero or bool value False. [1]\n\nAll other values are considered true — so objects of many types are always true.\n\nOperations and built-in functions that have a Boolean result always return 0 or False for false and 1 or True for true, unless otherwise stated. (Important exception: the Boolean operations or and and always return one of their operands.)\n\n```\nprint('@when using False and None, they are always refer to same object, see ids below:')\nprint('id(False)', id(False), ', id(None)', id(None))\nprint('id(False)', id(False), ', id(None)', id(None))\nprint('id(False)', id(False), ', id(None)', id(None))\n```\n\n## Mutable vs Immutable Objects in Python\nhttps://medium.com/@meghamohan/mutable-and-immutable-side-of-python-c2145cf72747\n\nmutable object can be changed after it is created, and an immutable object can’t.\n\n* Objects of built-in types like (int, float, bool, str, tuple, unicode) are immutable. \n* Objects of built-in types like (list, set, dict) are mutable. \n* Custom classes are generally mutable.\n* To simulate immutability in a class, one should override attribute setting and deletion to raise exceptions.\n\n### Mutable objects\nObjects with same value may have diffent ids\n\n```\nprint('@when using empty mutable data type, they refer to differnt object, see ids below:')\nprint('Run 1: id(list()):', id(list()), ', id(dict()):', id(dict()), ', id(set()):', id(set())) \nprint('Run 2: id(list()):', id(list()), ', id(dict()):', id(dict()), ', id(set()):', id(set())) \nprint('Run 2: id(list()):', id(list()), ', id(dict()):', id(dict()), ', id(set()):', id(set())) \nprint('Run 4: id(list()):', id([]), ', id(dict()):', id({}))\nprint('Run 5: id(list()):', id([]), ', id(dict()):', id({}))\nprint('Run 6: id(list()):', id([]), ', id(dict()):', id({}))\nprint()\nprint('@but python will reuse same object if possible to be more efficient, e.g. :')\nprint('Run 1: id(list()):', id(list()), id([]), ', id(dict()):', id(dict()), id({}))\n```\n\n### Mutable objects evaluated as False\n\n* set() or {}\n* list() or [] \n* dict()\n\n```\n## print the truth values of the empty objects from these data types\nprint('@Truth values:',not not set(), not not {}, not not list(), not not [], not not dict())\nprint('@use two not operators, don\\'t use this: \\nExammple Code: print(1 and set())')\nprint('Output: \\'%s\\' is an object, not boolean value:' %(1 and set()))\n```\n\n### Immutable objects evaluated as False\n\n* zero of any numeric type\n* tuple()\n\n```\nprint('@Truth values:',not not 0, not not 0.0, not not complex(0), bool(0))\nprint('@Truth values:',not not tuple(), not not ())\n## empty tuple has same id\nprint(id(tuple()))\nprint('check follow:')\nprint(id(tuple()))\nprint(id(tuple()))\nprint(id(tuple()))\nprint(id(tuple()))\nprint(id(tuple()))\nprint(id(tuple()))\n```\n\n### Other objects evaluated as False\n* range(0)\n\n```\nprint(not not range(0))\n```\n\n" | github_jupyter |
## self-attention-cv : illustration of a training process with subvolume sampling for 3d segmentation
The dataset can be found here: https://iseg2019.web.unc.edu/ . i uploaded it and mounted from my gdrive
```
from google.colab import drive
drive.mount('/gdrive')
import zipfile
root_path = '/gdrive/My Drive/DATASETS/iSeg-2019-Training.zip'
!echo "Download and extracting folders..."
zip_ref = zipfile.ZipFile(root_path, 'r')
zip_ref.extractall("./")
zip_ref.close()
!echo "Finished"
!pip install torchio
!pip install self-attention-cv
```
## Training example
```
import glob
import torchio as tio
import torch
from torch.utils.data import DataLoader
paths_t1 = sorted(glob.glob('./iSeg-2019-Training/*T1.img'))
paths_t2 = sorted(glob.glob('./iSeg-2019-Training/*T2.img'))
paths_seg = sorted(glob.glob('./iSeg-2019-Training/*label.img'))
assert len(paths_t1) == len(paths_t2) == len(paths_seg)
subject_list = []
for pat in zip(paths_t1, paths_t2, paths_seg):
path_t1, path_t2, path_seg = pat
subject = tio.Subject(t1=tio.ScalarImage(path_t1,),
t2=tio.ScalarImage(path_t2,),
label=tio.LabelMap(path_seg))
subject_list.append(subject)
transforms = [tio.RescaleIntensity((0, 1)),tio.RandomAffine() ]
transform = tio.Compose(transforms)
subjects_dataset = tio.SubjectsDataset(subject_list, transform=transform)
patch_size = 24
queue_length = 300
samples_per_volume = 50
sampler = tio.data.UniformSampler(patch_size)
patches_queue = tio.Queue(
subjects_dataset,
queue_length,
samples_per_volume,sampler, num_workers=1)
patches_loader = DataLoader(patches_queue, batch_size=16)
from self_attention_cv.Transformer3Dsegmentation import Transformer3dSeg
def crop_target(img, target_size):
dim = img.shape[-1]
center = dim//2
start_dim = center - (target_size//2) - 1
end_dim = center + (target_size//2)
return img[:,0,start_dim:end_dim,start_dim:end_dim,start_dim:end_dim].long()
target_size = 3 # as in the paper
patch_dim = 8
num_epochs = 50
num_classes = 4
model = Transformer3dSeg(subvol_dim=patch_size, patch_dim=patch_dim,
in_channels=2, blocks=2, num_classes=num_classes).cuda()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
print(len(patches_loader))
for epoch_index in range(num_epochs):
epoch_loss = 0
for c,patches_batch in enumerate(patches_loader):
optimizer.zero_grad()
input_t1 = patches_batch['t1'][tio.DATA]
input_t2 = patches_batch['t2'][tio.DATA]
input_tensor = torch.cat([input_t1, input_t2], dim=1).cuda()
logits = model(input_tensor) # 8x8x8 the 3d transformer-based approach
# for the 3d transformer-based approach the target must be cropped again to the desired size
targets = patches_batch['label'][tio.DATA]
cropped_target = crop_target(targets, target_size).cuda()
loss = criterion(logits, cropped_target)
loss.backward()
optimizer.step()
epoch_loss = epoch_loss+loss.cpu().item()
print(f'epoch {epoch_index} loss {epoch_loss/c}')
```
## Inference
```
import torch
import torch.nn as nn
import torchio as tio
patch_overlap = 0
patch_size = 24, 24, 24
target_patch_size = 3
#input sampling
grid_sampler = tio.inference.GridSampler(subject_list[0], patch_size, patch_overlap)
patch_loader = torch.utils.data.DataLoader(grid_sampler, batch_size=4)
# target vol sampling
grid_sampler_target = tio.inference.GridSampler(subject_list[0], target_patch_size, patch_overlap)
aggregator = tio.inference.GridAggregator(grid_sampler_target)
target_loader = torch.utils.data.DataLoader(grid_sampler_target, batch_size=4)
model.eval()
with torch.no_grad():
for patches_batch,target_patches in zip(patch_loader,target_loader):
input_t1 = patches_batch['t1'][tio.DATA]
input_t2 = patches_batch['t2'][tio.DATA]
input_tensor = torch.cat([input_t1, input_t2], dim=1).float().cuda()
locations = target_patches[tio.LOCATION]
logits = model(input_tensor)
labels = logits.argmax(dim=tio.CHANNELS_DIMENSION, keepdim=True)
outputs = labels
aggregator.add_batch(outputs.type(torch.int32), locations)
print('output tensor shape:',outputs.shape)
output_tensor = aggregator.get_output_tensor()
print(output_tensor.shape)
```
| github_jupyter |
```
"""Simple tutorial following the TensorFlow example of a Convolutional Network.
Parag K. Mital, Jan. 2016"""
# %% Imports
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
from libs.utils import *
# %% Setup input to the network and true output label. These are
# simply placeholders which we'll fill in later.
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
# %% Since x is currently [batch, height*width], we need to reshape to a
# 4-D tensor to use it in a convolutional graph. If one component of
# `shape` is the special value -1, the size of that dimension is
# computed so that the total size remains constant. Since we haven't
# defined the batch dimension's shape yet, we use -1 to denote this
# dimension should not change size.
x_tensor = tf.reshape(x, [-1, 28, 28, 1])
# %% We'll setup the first convolutional layer
# Weight matrix is [height x width x input_channels x output_channels]
filter_size = 5
n_filters_1 = 16
W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1])
# %% Bias is [output_channels]
b_conv1 = bias_variable([n_filters_1])
# %% Now we can build a graph which does the first layer of convolution:
# we define our stride as batch x height x width x channels
# instead of pooling, we use strides of 2 and more layers
# with smaller filters.
h_conv1 = tf.nn.relu(
tf.nn.conv2d(input=x_tensor,
filter=W_conv1,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv1)
# %% And just like the first layer, add additional layers to create
# a deep net
n_filters_2 = 16
W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2])
b_conv2 = bias_variable([n_filters_2])
h_conv2 = tf.nn.relu(
tf.nn.conv2d(input=h_conv1,
filter=W_conv2,
strides=[1, 2, 2, 1],
padding='SAME') +
b_conv2)
# %% We'll now reshape so we can connect to a fully-connected layer:
h_conv2_flat = tf.reshape(h_conv2, [-1, 7 * 7 * n_filters_2])
# %% Create a fully-connected layer:
n_fc = 1024
W_fc1 = weight_variable([7 * 7 * n_filters_2, n_fc])
b_fc1 = bias_variable([n_fc])
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# %% And finally our softmax layer:
W_fc2 = weight_variable([n_fc, 10])
b_fc2 = bias_variable([10])
y_pred = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)
# %% Monitor accuracy
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% We'll train in minibatches and report accuracy:
batch_size = 100
n_epochs = 5
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
x: batch_xs, y: batch_ys, keep_prob: 0.5})
print(sess.run(accuracy,
feed_dict={
x: mnist.validation.images,
y: mnist.validation.labels,
keep_prob: 1.0
}))
# %% Let's take a look at the kernels we've learned
W = sess.run(W_conv1)
plt.imshow(montage(W / np.max(W)), cmap='coolwarm')
```
| github_jupyter |
## PHYS 105A: Introduction to Scientific Computing
# Random Numbers and Monte Carlo Methods
Chi-kwan Chan
* In physical science, students very often start with the concept that everything can be done exactly and deterministically.
* This can be one of the biggest misconcept!
* Many physical processes are non-deterministic by nature. Examples include:
* Radioactive decay
* Quantum mechanics
* Sometime, even when the governing equaitons are deterministic, the results are still non-deterministic. Examples include:
* 3-body problem
* Brownian motion
* Thermal noise
* Therefore, in computer we need some way to model these non-deterministic systems.
* For electronic computers, all operations are deterministic.
* Nevertheless, we can approximate random processes by *pseudo* random number generators.
* These pseudo random number generators can then be used to model non-deterministic systems.
* In addition, people started to realize, even for deterministic problems, randomizing them can still be a very powerful numerical method! Applications include
* Numerical integration of high-dimensional space
* Statistical inference
* This results a large number of random number based numerical methods.
* Monte Carlo is an area of Monaco well known for its world-famous Place du Casino.
* Monte Carlo methods are used to refer to random number based numerical methods.

```
# In order to understand the concept of a random number generator, let's implement one ourself.
mynext = 1
def myrand(): # NOT RECOMMENDED for real application.
global mynext
mynext = mynext * 1103515245 + 12345
return (mynext//65536) % 32768
# This random number generator would generate integers in the domain [0, 32768).
# This random is usually provided to user by
MYRAND_MAX = 32768-1
# There are reasons for choosing the strange constants. Take a look at
# https://en.wikipedia.org/wiki/Linear_congruential_generator
# if you are interested.
# Now, every time we run `rand()`, we will get a different number
myrand()
# For we may just print many of them at the same time:
Rs = [myrand() for i in range(100)]
print(Rs)
# We may even plot the random numbers
from matplotlib import pyplot as plt
plt.imshow([[myrand() for i in range(100)] for j in range(100)])
# Sometime it is useful to make sure your random number sequence remains the same.
# In our case, you may notice that we can simply reset the `mynext` global variable to reset the sequence.
# The value you put in `mynext` is often called the "seed".
print('The following two lists are not the same:')
print([myrand() for i in range(10)])
print([myrand() for i in range(10)])
print('We may ensure that they are the same by "seeding" the random number generator with a fixed value:')
mynext = 1234
print([myrand() for i in range(10)])
mynext = 1234
print([myrand() for i in range(10)])
```
* The above random number generator is very simple and is the *sample* implementation in many ANSI C libraries!
* However, because how the standard was written, this create a lot problems.
* The standard only require RAND_MAX be at least 32767. If one want to evulate 1e6 points (which is pretty small, as we will see below), you will actually be evaluating the same 32768 points 30 times each!
* Some implementation "tried" to imporve the algorithm, e.g., swapping the lower and higher bytes. But these tricks sometime ruins the generator!
* We mentioned that integrating high-dimension space is an important application of Monte Carlo methods. However, the above random number generator create correlation in k-space.
* Thankfully, `ptyhon`'s random number generator is based on the "more reliable" [Mersenne Twister algorithm](https://en.wikipedia.org/wiki/Mersenne_Twister).
* From now on, unless for demostration purpose, we will use python's built-in random number generators.
```
# Let's now try python's random number library
import random as rnd
print(rnd.random()) # return a random float in the range [0,1)
print(rnd.randrange(100)) # return a random int in the range [0, stop)
print(rnd.randint(a=0,b=99)) # return a random int in the range [a, b+1)
print(rnd.gauss(mu=0, sigma=1)) # sample from a Gaussian distribution
# We may plot the results of these random number generators
Rs = [rnd.random() for i in range(1000)]
plt.hist(Rs)
Rs = [rnd.randrange(100) for i in range(1000)]
plt.hist(Rs)
Rs = [rnd.gauss(0, 1) for i in range(1000)]
plt.hist(Rs)
# There is also a seed() function
rnd.seed(1234)
print([rnd.randrange(100) for i in range(10)])
rnd.seed(1234)
print([rnd.randrange(100) for i in range(10)])
```
* Once we have a (pseudo) random number generator, we are ready to develop Monte Carlo methods!
* We will start with a simple example of random walk. The model is very simple:
* We start with a (drunk) person at the center of the street.
* As the person step forward toward +t, the person random also step left +1 or right -1.
* The problem is, after n steps, how far away is the person away from the center of the street?
```
# We may step up this problem in the following way:
T = range(1, 1000+1)
X = [0] # initial position
for t in T:
last = X[-1] # last position
r = rnd.randint(0,1) # we generate 0 or 1 randomly
if r == 0: # depending on r, we step left or right
curr = last + 1
else:
curr = last - 1
X.append(curr) # append the current position to the list X
# We may plot this random walk
plt.plot(X)
# Awesome!
# But in order to find out how random walk behave statistically,
# we want to be able to run many simulations!
# It is convenient to define a function
def randomwalk(n_steps=1000):
X = [0] # initial position
for t in range(n_steps):
last = X[-1] # last position
r = rnd.randint(0,1) # we generate 0 or 1 randomly
if r == 0: # depending on r, we step left or right
curr = last + 1
else:
curr = last - 1
X.append(curr) # append the current position to the list X
return X # return the result
# And we can use this function in another loop.
for i in range(10):
plt.plot(randomwalk())
# We may now ask how far away the peron would walk depending on the number of stpes.
D = []
for t in T:
X = randomwalk(t)
D.append(abs(X[-1]))
plt.plot(D)
# Clearly, the distance gets farther when the number of steps increase.
# But this figure is too noise to read off the dependency.
# There are multiple ways to make the above figure less noise.
# One way is to simply do multiple numerical experiments for the same number of steps.
# And obtain the average distance.
n_trials = 100
D = []
for t in T:
M = 0
for trial in range(n_trials):
X = randomwalk(t)
M += abs(X[-1])
M /= n_trials
D.append(M)
plt.plot(D)
# The plot is much better!
# Later in the class, we will learn how to fit a curve.
# But for now, let's simply plot this in log-log scale.
# And compare it with the law of diffusion D ~ sqrt(T)
plt.loglog(T, D)
plt.plot(T, [t**0.5 for t in T])
```
* You may use this simple random walk model to model real physical process.
* For example, the Brownian motion, which describe how pollen is randomly pushed by water molecules.

* Einstein published a paper on Brownian motion in 1905, which is one of his first major scientific contributions.
```
# The simplest model of Brownian motion is simply a two-dimension random walk.
X = randomwalk()
Y = randomwalk()
plt.figure(figsize=(12,12))
plt.plot(X, Y)
plt.gca().set_aspect('equal')
# The resulting plot looks slightly funny because random walk forces x and y both to move at exactly one step.
# The final outcome is that the particle can only move in diagonal directions.
# But this artifact becomes irrelevant when we model random walk for many many more steps.
X = randomwalk(100000)
Y = randomwalk(100000)
plt.figure(figsize=(12,12))
plt.plot(X, Y)
plt.gca().set_aspect('equal')
# Here is a physics question: how far does a Brownian motion particle move as a function of time?
```
| github_jupyter |
<a href="https://colab.research.google.com/github/IMOKURI/wandb-demo/blob/main/WandB_Baseline_Image.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 📔 About this notebook
Image classification baseline.
## 📝 Memo
# Check Environment
```
!free -m
!python --version
!nvidia-smi
!nvcc --version
```
# Prepare for Kaggle
- Add dataset [imokuri/wandbtoken](https://www.kaggle.com/imokuri/wandbtoken)
# Prepare for Colab
```
import os
import sys
import zipfile
if os.path.exists('init.txt'):
print("Already initialized.")
else:
if 'google.colab' in sys.modules:
from google.colab import drive
drive.mount('/content/drive')
dataset_dir = "/content/drive/MyDrive/Datasets"
# ====================================================
# Competition datasets
# ====================================================
with zipfile.ZipFile(f"{dataset_dir}/cassava-leaf-disease-classification-2021.zip", "r") as zp:
zp.extractall(path="./")
# for StratifiedGroupKFold
# !pip install -q -U scikit-learn
# for MultilabelStratifiedKFold
# !pip install -q iterative-stratification
# for CosineAnnealingWarmupRestarts
# !pip install -qU 'git+https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup'
!pip install -q wandb
# !pip install -q optuna
# ====================================================
# Competition specific libraries
# ====================================================
!pip install -q timm
!pip install -q albumentations==0.4.6
!touch init.txt
```
# 📚 Library
```
# General libraries
import collections
import glob
import json
import math
import os
import random
import re
import statistics
import time
import warnings
from contextlib import contextmanager
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
import torch
import torch.cuda.amp as amp
import torch.nn as nn
import torch.nn.functional as F
import wandb
# from cosine_annealing_warmup import CosineAnnealingWarmupRestarts
# from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.model_selection import KFold, StratifiedKFold # , StratifiedGroupKFold
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
from torch.utils.data import DataLoader, Dataset
from tqdm.notebook import tqdm
# Competition specific libraries
import albumentations as A
import cv2
import timm
from albumentations.pytorch import ToTensorV2
warnings.filterwarnings("ignore")
netrc = "/content/drive/MyDrive/.netrc" if 'google.colab' in sys.modules else "../input/wandbtoken/.netrc"
!cp -f {netrc} ~/
!wandb login
wandb_tags = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
wandb_tags.append(torch.cuda.get_device_name(0))
```
# Load Data
```
DATA_DIR = "./" if 'google.colab' in sys.modules else "../input/xxx/"
OUTPUT_DIR = "./"
MODEL_DIR = "./models/"
!rm -rf {MODEL_DIR}
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(MODEL_DIR, exist_ok=True)
train = pd.read_csv(DATA_DIR + "train.csv")
# test = pd.read_csv(DATA_DIR + "test.csv")
sub = pd.read_csv(DATA_DIR + "sample_submission.csv")
TRAIN_IMAGE_PATH = DATA_DIR + "train_images/"
TEST_IMAGE_PATH = DATA_DIR + "test_images/"
```
# 🤔 Config
```
# seed = random.randrange(10000)
seed = 440
print(seed)
class Config:
wandb_entity = "imokuri"
wandb_project = "baseline"
print_freq = 100
train = True
validate = False
inference = False
debug = False
num_debug_data = 1000
amp = True
config_defaults = {
"seed": seed,
"n_class": 5,
"n_fold": 5,
"epochs": 10,
"batch_size": 32,
"gradient_accumulation_steps": 1,
"max_grad_norm": 1000,
"criterion": "CrossEntropyLoss",
"optimizer": "Adam",
"scheduler": "CosineAnnealingWarmRestarts",
"lr": 1e-4,
"min_lr": 5e-6,
"weight_decay": 1e-6,
"model_name": "resnext50_32x4d", # "vit_base_patch16_384", "tf_efficientnetv2_m_in21k",
"size": 512,
}
if Config.debug:
config_defaults["n_fold"] = 3
config_defaults["epochs"] = 1
Config.print_freq = 10
if Config.train:
wandb_job_type = "training"
elif Config.inference:
wandb_job_type = "inference"
elif Config.validate:
wandb_job_type = "validation"
else:
wandb_job_type = ""
if Config.debug:
wandb_tags.append("debug")
if Config.amp:
wandb_tags.append("amp")
if Config.debug:
run = wandb.init(
entity=Config.wandb_entity,
project=Config.wandb_project,
config=config_defaults,
tags=wandb_tags,
mode="disabled",
)
else:
run = wandb.init(
entity=Config.wandb_entity,
project=Config.wandb_project,
config=config_defaults,
job_type=wandb_job_type,
tags=wandb_tags,
save_code=True,
)
config = wandb.config
```
# EDA 1
```
# for df in [train, test, sub]:
for df in [train, sub]:
print(f"=" * 120)
df.info()
display(df.head())
sns.distplot(train["label"], kde=False)
```
# Preprocess
```
def get_transforms(*, data):
if data == "train":
return A.Compose(
[
# A.Resize(config.size, config.size),
A.RandomResizedCrop(config.size, config.size),
A.Transpose(p=0.5),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(p=0.5),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
elif data == "valid":
return A.Compose(
[
A.Resize(config.size, config.size),
# A.CenterCrop(config.size, config.size),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
```
# EDA 2
```
```
# 👑 Load Artifacts
```
if Config.debug:
train = train.sample(n=Config.num_debug_data, random_state=config.seed).reset_index(drop=True)
if len(sub) > Config.num_debug_data:
# test = test.sample(n=Config.num_debug_data, random_state=config.seed).reset_index(drop=True)
sub = sub.sample(n=Config.num_debug_data, random_state=config.seed).reset_index(drop=True)
```
# Utils
```
@contextmanager
def timer(name):
t0 = time.time()
LOGGER.info(f"[{name}] start")
yield
LOGGER.info(f"[{name}] done in {time.time() - t0:.0f} s.")
def init_logger(log_file=OUTPUT_DIR + "train.log"):
from logging import INFO, FileHandler, Formatter, StreamHandler, getLogger
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
LOGGER = init_logger()
def seed_torch(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
seed_torch(seed=config.seed)
```
# Make Fold
```
Fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
for n, (train_index, val_index) in enumerate(Fold.split(train, train["label"])):
train.loc[val_index, "fold"] = int(n)
train["fold"] = train["fold"].astype(np.int8)
print(train.groupby(["fold", "label"]).size())
```
# Dataset
```
class BaseDataset(Dataset):
def __init__(self, df, transform=None, label=True):
self.df = df
self.file_names = df["image_id"].values
self.transform = transform
self.use_label = label
if self.use_label:
self.path = TRAIN_IMAGE_PATH
self.labels = df["label"].values
else:
self.path = TEST_IMAGE_PATH
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
file_name = self.file_names[idx]
file_path = f"{self.path}/{file_name}"
image = cv2.imread(file_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented["image"]
if self.use_label:
label = torch.tensor(self.labels[idx])
return image, label
return image
train_ds = BaseDataset(train)
image, label = train_ds[0]
plt.imshow(image)
plt.title(f"label: {label}")
plt.show()
train_ds = BaseDataset(train, transform=get_transforms(data="train"))
image, label = train_ds[0]
plt.imshow(image[0])
plt.title(f"label: {label}")
plt.show()
```
# 🚗 Model
```
class BaseModel(nn.Module):
def __init__(self, model_name, pretrained=True):
super().__init__()
self.model_name = model_name
self.model = timm.create_model(model_name, pretrained=pretrained)
if "resnext50_32x4d" in model_name:
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, config.n_class)
elif model_name.startswith("tf_efficientnet"):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, config.n_class)
elif model_name.startswith("vit_"):
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, config.n_class)
def forward(self, x):
x = self.model(x)
return x
if config.model_name != "":
model = BaseModel(config.model_name)
print(model)
train_ds = BaseDataset(train, transform=get_transforms(data="train"))
train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=4, drop_last=True)
for image, label in train_loader:
output = model(image)
print(output)
break
```
# Optimizer
```
```
# Loss
```
```
# Scoring
```
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def get_result(result_df, fold=config.n_fold):
preds = result_df["preds"].values
labels = result_df["label"].values
score = get_score(labels, preds)
LOGGER.info(f"Score: {score:<.5f}")
if fold == config.n_fold:
wandb.log({"CV": score})
else:
wandb.log({f"CV_fold{fold}": score})
```
# Helper functions
```
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return "%s (remain %s)" % (asMinutes(s), asMinutes(rs))
def compute_grad_norm(parameters, norm_type=2.0):
"""Refer to torch.nn.utils.clip_grad_norm_"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def train_fn(train_loader, model, criterion, optimizer, scheduler, scaler, epoch, device):
losses = AverageMeter()
# switch to train mode
model.train()
start = time.time()
optimizer.zero_grad()
for step, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
batch_size = labels.size(0)
with amp.autocast(enabled=Config.amp):
y_preds = model(images)
loss = criterion(y_preds, labels)
losses.update(loss.item(), batch_size)
loss = loss / config.gradient_accumulation_steps
scaler.scale(loss).backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
optimizer.zero_grad()
else:
grad_norm = compute_grad_norm(model.parameters())
end = time.time()
if step % Config.print_freq == 0 or step == (len(train_loader) - 1):
print(
f"Epoch: [{epoch + 1}][{step}/{len(train_loader)}] "
f"Elapsed {timeSince(start, float(step + 1) / len(train_loader)):s} "
f"Loss: {losses.avg:.4f} "
f"Grad: {grad_norm:.4f} "
f"LR: {scheduler.get_lr()[0]:.6f} "
)
return losses.avg
def valid_fn(valid_loader, model, criterion, device):
losses = AverageMeter()
# switch to evaluation mode
model.eval()
preds = []
start = time.time()
for step, (images, labels) in enumerate(valid_loader):
images = images.to(device)
labels = labels.to(device)
batch_size = labels.size(0)
with torch.no_grad():
y_preds = model(images)
loss = criterion(y_preds, labels)
losses.update(loss.item(), batch_size)
preds.append(y_preds.softmax(1).to("cpu").numpy())
# preds.append(y_preds.to("cpu").numpy())
end = time.time()
if step % Config.print_freq == 0 or step == (len(valid_loader) - 1):
print(
f"EVAL: [{step}/{len(valid_loader)}] "
f"Elapsed {timeSince(start, float(step + 1) / len(valid_loader)):s} "
f"Loss: {losses.avg:.4f} "
)
predictions = np.concatenate(preds)
return losses.avg, predictions
```
# Postprocess
```
```
# 🏃♂️ Train loop
```
def train_loop(df, fold):
LOGGER.info(f"========== fold: {fold} training ==========")
# ====================================================
# Data Loader
# ====================================================
trn_idx = df[df["fold"] != fold].index
val_idx = df[df["fold"] == fold].index
train_folds = df.loc[trn_idx].reset_index(drop=True)
valid_folds = df.loc[val_idx].reset_index(drop=True)
train_dataset = BaseDataset(train_folds, transform=get_transforms(data="train"))
valid_dataset = BaseDataset(valid_folds, transform=get_transforms(data="valid"))
train_loader = DataLoader(
train_dataset,
batch_size=config.batch_size,
shuffle=True,
num_workers=4,
pin_memory=True,
drop_last=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True,
drop_last=False,
)
# ====================================================
# Optimizer
# ====================================================
def get_optimizer(model):
if config.optimizer == "Adam":
optimizer = Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
elif config.optimizer == "AdamW":
optimizer = T.AdamW(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
return optimizer
# ====================================================
# Scheduler
# ====================================================
def get_scheduler(optimizer, train_dataset):
num_data = len(train_dataset)
num_steps = num_data // (config.batch_size * config.gradient_accumulation_steps) * config.epochs
if config.scheduler == "CosineAnnealingWarmRestarts":
scheduler = CosineAnnealingWarmRestarts(optimizer, T_0=num_steps, T_mult=1, eta_min=config.min_lr, last_epoch=-1)
elif config.scheduler == "CosineAnnealingLR":
scheduler = CosineAnnealingLR(optimizer, T_max=num_steps, eta_min=config.min_lr, last_epoch=-1)
elif config.scheduler == "CosineAnnealingWarmupRestarts":
scheduler = CosineAnnealingWarmupRestarts(
optimizer, first_cycle_steps=num_steps, max_lr=config.lr, min_lr=config.min_lr, warmup_steps=(num_steps // 10)
)
return scheduler
# ====================================================
# Model
# ====================================================
model = BaseModel(config.model_name)
model.to(device)
optimizer = get_optimizer(model)
scaler = amp.GradScaler(enabled=Config.amp)
scheduler = get_scheduler(optimizer, train_dataset)
# ====================================================
# Criterion
# ====================================================
def get_criterion():
if config.criterion == "CrossEntropyLoss":
criterion = nn.CrossEntropyLoss()
elif config.criterion == "BCEWithLogitsLoss":
criterion = nn.BCEWithLogitsLoss()
elif config.criterion == "MSELoss":
criterion = nn.MSELoss()
return criterion
criterion = get_criterion()
# ====================================================
# Loop
# ====================================================
best_score = -1
best_loss = np.inf
best_preds = None
for epoch in range(config.epochs):
start_time = time.time()
# train
avg_loss = train_fn(train_loader, model, criterion, optimizer, scheduler, scaler, epoch, device)
# eval
avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device)
valid_labels = valid_folds["label"].values
# if config.criterion == "BCEWithLogitsLoss":
# preds = 1 / (1 + np.exp(-preds))
# scoring
score = get_score(valid_labels, preds.argmax(1))
# score = get_score(valid_labels, preds)
elapsed = time.time() - start_time
LOGGER.info(f"Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s")
LOGGER.info(f"Epoch {epoch+1} - Score: {score}")
wandb.log({
"epoch": epoch + 1,
f"loss/train_fold{fold}": avg_loss,
f"loss/valid_fold{fold}": avg_val_loss,
f"score/fold{fold}": score,
})
if avg_val_loss < best_loss:
best_score = score
best_loss = avg_val_loss
best_preds = preds
LOGGER.info(f"Epoch {epoch+1} - Save Best Model. score: {best_score:.4f}, loss: {best_loss:.4f}")
torch.save(
{"model": model.state_dict(), "preds": preds}, MODEL_DIR + f"{config.model_name.replace('/', '-')}_fold{fold}_best.pth"
)
# use artifacts instead
# wandb.save(MODEL_DIR + f"{config.model_name.replace('/', '-')}_fold{fold}_best.pth")
valid_folds[[str(c) for c in range(config.n_class)]] = best_preds
valid_folds["preds"] = best_preds.argmax(1)
# valid_folds["preds"] = best_preds
return valid_folds, best_score, best_loss
```
# Main function
```
def main():
# ====================================================
# Training
# ====================================================
if Config.train:
oof_df = pd.DataFrame()
oof_result = []
for fold in range(config.n_fold):
seed_torch(seed + fold)
_oof_df, score, loss = train_loop(train, fold)
oof_df = pd.concat([oof_df, _oof_df])
oof_result.append([fold, score, loss])
LOGGER.info(f"========== fold: {fold} result ==========")
get_result(_oof_df, fold)
# CV result
LOGGER.info(f"========== CV ==========")
get_result(oof_df)
loss = statistics.mean([d[2] for d in oof_result])
wandb.log({"loss": loss})
table = wandb.Table(data=oof_result, columns = ["fold", "score", "loss"])
run.log({"Fold Result": table})
# save result
oof_df.to_csv(OUTPUT_DIR + "oof_df.csv", index=False)
wandb.save(OUTPUT_DIR + "oof_df.csv")
artifact = wandb.Artifact(config.model_name, type='model')
artifact.add_dir(MODEL_DIR)
run.log_artifact(artifact)
```
# 🚀 Run
```
main()
wandb.finish()
```
| github_jupyter |
# Iris dataset example
Example of functional keras model with named inputs/outputs for compatability with the keras/tensorflow toolkit.
```
from sklearn.datasets import load_iris
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
import pandas as pd
iris = load_iris()
data = pd.DataFrame(iris.data, columns=iris.feature_names)
data.columns = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"]
data["Species"] = iris.target
data
train_dataset = data.sample(frac=0.8,random_state=0)
test_dataset = data.drop(train_dataset.index)
train_labels = train_dataset.pop('Species')
test_labels = test_dataset.pop('Species')
train_dataset.keys()
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(train_labels)
encoded_Y = encoder.transform(train_labels)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = to_categorical(encoded_Y)
# define model
def build_model():
# DEFINE INPUTS
sepal_length_input = keras.Input(shape=(1,), name="SepalLength")
sepal_width_input = keras.Input(shape=(1,), name="SepalWidth")
petal_length_input = keras.Input(shape=(1,), name="PetalLength")
petal_width_input = keras.Input(shape=(1,), name="PetalWidth")
# concatenate layer
inputs = [sepal_length_input, sepal_width_input, petal_length_input, petal_width_input]
merged = keras.layers.concatenate(inputs)
dense1 = Dense(8, activation='relu')(merged)
output = Dense(3, activation='softmax', name="Species")(dense1)
# Compile model
model = keras.Model(inputs=inputs, outputs=[output])
optimizer = tf.keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model = build_model()
train_stats = train_dataset.describe()
train_stats = train_stats.transpose()
train_stats
train_x = train_dataset.to_dict("series")
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=15)
history = model.fit(train_x, dummy_y, epochs=1000,
validation_split = 0.2, verbose=1, callbacks=[early_stop])
model.save("files/iris_model.h5")
#access input names
model.input_names
#access output names
model.output_names
test_item = train_dataset.iloc[[20]].to_dict("series")
# Output type softmax
model.predict([test_item])
```
| github_jupyter |
# Can we find zero in less than 20 iterations ?
## The Quest for the Ultimate Optimizer - Episode 2
-------------------------------------------------------------------
This notebook is a continuation of the first episode of my Quest for the Ultimate Optimizer series of notebooks, which was inspired by DeepMind’s paper [“Learning to learn by gradient descent by gradient descent”](https://arxiv.org/abs/1606.04474) and [Llion Jones's article on this paper](https://hackernoon.com/learning-to-learn-by-gradient-descent-by-gradient-descent-4da2273d64f2). I encourage you to read all of these if you want to understand how the following is set-up.
Being a continuation of the first episode, it contains quite a lot of setting-up that comes directly from it, and that I have positionned at the end to avoid repeating myself too much. This means that:
1. If you want to run this notebook, you need to start by running the appendix at the end before the rest of the notebook
2. If you haven't read the first notebook yet, go check it out, this one will make more sense if you start there.
### Appendix
I'll start with the same disclaimer as in the first notebook : the code in this section draws heavily from [Llion Jones's article](https://hackernoon.com/learning-to-learn-by-gradient-descent-by-gradient-descent-4da2273d64f2).
I encourage you to read, it if you want to understand how it is set-up.
For the rest you'll find a bit more explanations in the first notebook.
```
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import os
# If you have tensorflow for GPU but want to use your CPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=""
DIMS = 2 # Dimensions of the quadratic function, the simplest application problem in DeepMind's paper
scale = tf.random_uniform([DIMS], 0.5, 1.5)
# The scale vector gives a different shape to the quadratic function at each initialization
def quadratic(x):
x = scale*x
return tf.reduce_sum(tf.square(x))
# Some reference optimizers for benchmarking
def g_sgd(gradients, state, learning_rate=0.1):
# Vanilla Stochastic Gradient Descent
return -learning_rate*gradients, state
def g_rms(gradients, state, learning_rate=0.1, decay_rate=0.99):
# RMSProp
if state is None:
state = tf.zeros(DIMS)
state = decay_rate*state + (1-decay_rate)*tf.pow(gradients, 2)
update = -learning_rate*gradients / (tf.sqrt(state)+1e-5)
return update, state
TRAINING_STEPS = 20 # This is 100 in the paper
initial_pos = tf.random_uniform([DIMS], -1., 1.)
def learn(optimizer):
losses = []
x = initial_pos
state = None
# The loop below unrolls the 20 steps of the optimizer into a single tensorflow graph
for _ in range(TRAINING_STEPS):
loss = quadratic(x)
losses.append(loss)
grads, = tf.gradients(loss, x)
update, state = optimizer(grads, state)
x += update
return losses
sgd_losses = learn(g_sgd)
rms_losses = learn(g_rms)
# Now let's define the RNN optimizer
LAYERS = 2
STATE_SIZE = 20
cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.LSTMCell(STATE_SIZE) for _ in range(LAYERS)])
cell = tf.contrib.rnn.InputProjectionWrapper(cell, STATE_SIZE)
cell = tf.contrib.rnn.OutputProjectionWrapper(cell, 1)
cell = tf.make_template('cell', cell)
def optimize(loss, learning_rate=0.1):
# "Meta optimizer" to be applied on the RNN defined above
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.)
return optimizer.apply_gradients(zip(gradients, v))
def print_dict(*args):
# Prints variables in a dict format for easier reading
dict_name = dict((name,eval(name)) for name in args)
print(dict_name)
def graph_optimizers(f1, f2, f3, it=3, training_steps=TRAINING_STEPS):
# Graph to compare RNN to the 2 baseline optimizers
x = np.arange(training_steps)
for _ in range(it):
sgd_l, rms_l, rnn_l = sess.run([f1, f2, f3])
p1, = plt.semilogy(x, sgd_l, label='SGD')
p2, = plt.semilogy(x, rms_l, label='RMS')
p3, = plt.semilogy(x, rnn_l, label='RNN')
plt.legend(handles=[p1, p2, p3])
plt.title('Losses')
plt.show()
def rolling_log_average(array, L):
# Rolling average of the log of the array over a length of L
rolling_av = np.array([])
for i in range(array.size):
rolling_av = np.append(rolling_av, 10**(np.log10(array[:i+1][-L:] + 1e-38).mean()))
return rolling_av
import warnings
def draw_convergence(*args):
"""Draws the convergence of one or several meta optimizations
transparent area is the raw results, the plain line is the 500 rolling 'log average'"""
it = 0
for f in args:
it = max(eval(f).size, it)
handles = []
for f in args:
flist = eval(f)[np.logical_not(np.isnan(eval(f)))] #removes NaN
flist_rolling = rolling_log_average(flist, 500)
flist_size = flist.size
#matplotlib doesn't like graphs of different length so we fill the shorter graphs with None
if flist_size < it:
flist = np.append(flist, [None]*(it-flist_size))
flist_rolling = np.append(flist_rolling, [None]*(it-flist_size))
c1, = plt.semilogy(range(it), flist, alpha=0.3)
c2, = plt.semilogy(range(it), flist_rolling, color=c1.get_color(), label=f)
handles = handles + [c2]
plt.legend(handles=handles)
plt.title('End result of the optimizer')
#matplotlib still doesn't like graphs of different length so we filter associated warnings
warnings.filterwarnings("ignore",category =RuntimeWarning)
plt.show()
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
```
### Recap of the previous episode
In the first notebook, we reused [Llion Jones's implementation](https://hackernoon.com/learning-to-learn-by-gradient-descent-by-gradient-descent-4da2273d64f2) of [“Learning to learn by gradient descent by gradient descent”](https://arxiv.org/abs/1606.04474) which set-up a RNN (recurrent neural network) in tensorflow for use as an optimizer that works similarly to SGD or RMSProp, then unrolls 20 of its iterations (we'll call those the "base" iterations) within a single tensorflow graph so that we can iteratively modify the parameters of the RNN to minimize the result, thus optimizing the RNN optimizer (we'll call those the "meta" iterations).
The target was to beat RMSProp performance on the most simple problem you can think of : find the zero of a 2 dimension quadratic function.
To do that we tweaked a little bit the objective function to avoid the problem of vanishing gradient as the RNN gets better, finally settling on log of the RNN result as our objective function to minimize.
We also implemented what I will describe as a "liberal" interpretation of the preprocessing scheme for the RNN proposed in DeepMind's paper which is basically to also apply the log function on the gradients of the quadritic function before feeding the result into the RNN.
Let's re-run the last 2 RNNs proposed in the previous notebook.
```
def g_rnn_log15(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
# Casting of gradients from [exp(-15), 1] to [0, 1] and [-1, -exp(-15)] to [-1, 0]
min_log_gradient = -15
log_gradients = tf.log(tf.abs(gradients) + np.exp(min_log_gradient-5)) # residual added to avoid log(0)
sign_gradients = tf.sign(gradients)
trans_gradients = tf.multiply(sign_gradients,((log_gradients - min_log_gradient) / (- min_log_gradient)))
if state is None:
state = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
update, state = cell(trans_gradients, state)
# Casting of output from [0, 1] to [exp(-15), 1] and [-1, 0] to [-1, -exp(-15)]
abs_update = tf.abs(update)
sign_update = tf.sign(update)
update = tf.multiply(sign_update, tf.exp(abs_update * (- min_log_gradient) + min_log_gradient))
return tf.squeeze(update, axis=[1]), state
rnn_losses = learn(g_rnn_log15)
log_loss = tf.log(rnn_losses[-1])
apply_update = optimize(log_loss, learning_rate=0.0003)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
for it in range(50001):
errors, _ = sess.run([rnn_losses, apply_update])
list_result = np.append(list_result, errors[-1])
if it % 50000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
average_log_result = '{:.2f}'.format(np.log10(list_result[-5000:]).mean())
print_dict('it', 'optim_result', 'average_log_result')
RNN_log15_end_log_res = list_result
def g_rnn_log30(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
# Casting of gradients from [exp(-30), 1] to [0, 1] and [-1, -exp(-30)] to [-1, 0]
min_log_gradient = -30
log_gradients = tf.log(tf.abs(gradients) + np.exp(min_log_gradient-5))
sign_gradients = tf.sign(gradients)
trans_gradients = tf.multiply(sign_gradients,((log_gradients - min_log_gradient) / (- min_log_gradient)))
if state is None:
state = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
update, state = cell(trans_gradients, state)
# Casting of output from [0, 1] to [exp(-30), 1] and [-1, 0] to [-1, -exp(-30)]
abs_update = tf.abs(update)
sign_update = tf.sign(update)
update = tf.multiply(sign_update, tf.exp(abs_update * (- min_log_gradient) + min_log_gradient))
return tf.squeeze(update, axis=[1]), state
rnn_losses = learn(g_rnn_log30)
log_loss = tf.log(rnn_losses[-1])
apply_update = optimize(log_loss, learning_rate=0.0003)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
for it in range(100001):
errors, _ = sess.run([rnn_losses, apply_update])
list_result = np.append(list_result, errors[-1])
if it % 50000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
average_log_result = '{:.2f}'.format(np.log10(list_result[-5000:]).mean())
print_dict('it', 'optim_result', 'average_log_result')
RNN_log30_end_log_res = list_result
list_rms_errors = np.array([])
for it in range(1000):
sgd_errors, rms_errors = sess.run([sgd_losses, rms_losses])
list_rms_errors = np.append(list_rms_errors, rms_errors[-1])
Target_RMS = np.full(100001, rolling_log_average(list_rms_errors, 1000)[-1])
draw_convergence('Target_RMS', 'RNN_log15_end_log_res', 'RNN_log30_end_log_res')
graph_optimizers(sgd_losses, rms_losses, rnn_losses)
```
We concluded the last episode by declaring victory over RMSProp ... but is it the best we can do ?
### How about actually finding 0 ?
To do that we need to define what is 0 in our context.
It turns out Numpy offers an easy way to do that :
```
print(np.finfo(np.float32).tiny)
```
Now that we know what's our next target, let's give another look to the graph of convergences :
```
draw_convergence('RNN_log15_end_log_res', 'RNN_log30_end_log_res')
```
The obvious way to improve our RNN_log optimizer would be to continue the trend and try RNN_log50, however we can already see that the RNN_log30 had hard time starting it's convergence, so let's try to see if we can pinpoint what is the exact range of gradient we need to cast to [-1, 1] in the preprocessing of our RNN when we reach y = 1e-38.
We might even be able to explain why RNN_log15 and RNN_log30 both seems to be hitting a floor at respectively 1e-14 and 1e-28
```
# What's the log(gradient) when y = 1e-14, 1e-28 or 1 ? y = x**2 so y' = 2x = 2*(y**0.5)
print("log_gradient for 1e-14 : ", np.log(2*(1e-14**0.5)))
print("log_gradient for 1e-28 : ", np.log(2*(1e-28**0.5)))
print("log_gradient for 1e-38 : ", np.log(2*(1e-38**0.5)))
```
Bingo!
The floor that RNN_log15 is hitting (y = 1e-14) corresponds to when the log(gradient) of x reaches -15. Same for RNN_log15. So it looks like we need to go for -43 as our next min_log_gradient
```
def g_rnn_log43(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
# Casting of gradients from [exp(-43), 1] to [0, 1] and [-1, -exp(-43)] to [-1, 0]
min_log_gradient = -43
log_gradients = tf.log(tf.abs(gradients) + np.exp(min_log_gradient))
sign_gradients = tf.sign(gradients)
trans_gradients = tf.multiply(sign_gradients,((log_gradients - min_log_gradient) / (- min_log_gradient)))
if state is None:
state = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
update, state = cell(trans_gradients, state)
# Casting of output from [0, 1] to [exp(-43), 1] and [-1, 0] to [-1, -exp(-43)]
abs_update = tf.abs(update)
sign_update = tf.sign(update)
update = tf.multiply(sign_update, tf.exp(abs_update * (- min_log_gradient) + min_log_gradient))
return tf.squeeze(update, axis=[1]), state
rnn_losses = learn(g_rnn_log43)
log_loss = tf.log(rnn_losses[-1])
apply_update = optimize(log_loss, learning_rate=0.0003)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
for it in range(50001):
errors, _ = sess.run([rnn_losses, apply_update])
list_result = np.append(list_result, errors[-1])
if it % 5000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
average_log_result = '{:.2f}'.format(np.log10(list_result[-5000:]).mean())
print_dict('it', 'optim_result', 'average_log_result')
# Let's store the convergence for later comparison
RNN_log43_end_log_res = list_result
draw_convergence('RNN_log15_end_log_res', 'RNN_log30_end_log_res', 'RNN_log43_end_log_res')
```
The RNN is having trouble kick starting the convergence. The result is the same for RNN_log40, so we need some other way that extends the range of log(gradients) being correctly interpreted without completely freezing the convergence.
#### Variable gradient casting
The approach proposed below adapts the gradient range during the optimization, gradually lowering the floor of log(gradients) as the RNN gets more precise.
```
class Log_casting:
### Class used to cast logarithmically vectors from a variable range of scales below one to [-1, 1]
def __init__(self, init):
# scalar of the minimum log(gradient) encountered, initialized with init
self.min_log_value = tf.Variable(float(init), name="min_log_value", trainable=False)
# vector identity multiplied by min_log_value, initialized as None
self.min_log = None
def update_min_log(self, vector):
# This method is called at each iteration of the meta optimizer to adapt the min_log_value based on the
# last gradient (iteration 20) returned by the learn2 function (defined below)
log_vector = tf.log(tf.abs(vector) + 1e-38)
# update proposal based on the gradient, the factor 0.01 is to avoid confusing the RNN with a sudden big shift
update_proposal = 0.01*tf.reduce_min(log_vector) + 0.99*self.min_log_value
# the update is applied only if it is lower than the current value
new_value = tf.assign(self.min_log_value, tf.minimum(update_proposal, self.min_log_value))
return new_value
def preprocess(self, gradients):
# Casting of gradients from [exp(min_log_value), 1] to [0, 1] and [-1, -exp(min_log_value)] to [-1, 0]
self.min_log = tf.ones_like(gradients, name='MIN_LOG')*self.min_log_value
log_gradients = tf.log(tf.abs(gradients) + 1e-38)
sign_gradients = tf.sign(gradients)
inputs = tf.multiply(sign_gradients,((log_gradients - self.min_log) / (- self.min_log)))
return inputs
def postprocess(self, outputs):
# Casting back RNN output from [0, 1] to [exp(min_log_value), 1] and [-1, 0] to [-1, -exp(min_log_value)]
self.min_log = tf.ones_like(outputs, name='MIN_LOG')*self.min_log_value
abs_outputs = tf.abs(outputs)
sign_outputs = tf.sign(outputs)
update = tf.multiply(sign_outputs, tf.exp(abs_outputs * (- self.min_log) + self.min_log))
return update
def learn2(optimizer):
losses = []
x = initial_pos
state = None
# The loop below unrolls the 20 steps of the optimizer into a single tensorflow graph
for _ in range(TRAINING_STEPS):
loss = quadratic(x)
losses.append(loss)
grads, = tf.gradients(loss, x)
update, state = optimizer(grads, state)
x += update
return losses, grads # the last gradient is added to the ouptut for use by Log_casting
Log_casting_ = Log_casting(-5) # initializes our "log caster"
def g_rnn_logv(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
if state is None:
state = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
inputs = Log_casting_.preprocess(gradients)
outputs, state = cell(inputs, state)
update = Log_casting_.postprocess(outputs)
return tf.squeeze(update, axis=[1]), state
rnn_losses, grads = learn2(g_rnn_logv) # grads output added for use by log_casting
log_loss = tf.log(rnn_losses[-1] + 1e-37) # residual added to prevent a log(0)... the price of success
apply_update = optimize(log_loss, learning_rate=0.0003)
# operation below added to gradually adapt the min_log value to the lowest gardient
update_log_casting = Log_casting_.update_min_log(grads)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
list_sum_log_res = np.array([])
for it in range(100001):
errors, _, min_log = sess.run([rnn_losses, apply_update, update_log_casting])
list_result = np.append(list_result, errors[-1])
list_sum_log_res = np.append(list_sum_log_res, np.log10(np.array(errors) + 1e-38).sum()/20)
if it % 10000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
av_log_res = '{:.2f}'.format(np.log10(list_result[-10000:] + 1e-38).mean())
av_sum_log_res = '{:.2f}'.format(list_sum_log_res.mean())
min_log = '{:.2f}'.format(min_log)
print_dict('it', 'optim_result', 'av_log_res', 'av_sum_log_res', 'min_log')
RNN_logv_end_log_res = list_result
draw_convergence('RNN_log15_end_log_res', 'RNN_log30_end_log_res', 'RNN_logv_end_log_res')
```
So, on the plus side, the convergence is initially faster. We also seem to have removed the barrier preventing the RNN going lower than 1e-28. This allows the average result to continue improving, albeit very slowly.
On the minus side, well, we are still nowhere near 0 (ie 1e-38) on average.
Before exploring new RNN configurations, let's try one last trick : instead of minimizing the log of the last result, we can minimize the sum of log of all the iteration.
```
def optimize(loss, learning_rate=0.1):
# "Meta optimizer" to be applied on the RNN defined above
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.)
checked_gradients = []
# The following loop is to remove any NaNs from the gradient as it would be introduced
# in the RNN weights and everything would stop working
for g in clipped_gradients:
checked_g = tf.where(tf.is_nan(g), tf.zeros_like(g), g)
checked_gradients = checked_gradients + [checked_g]
return optimizer.apply_gradients(zip(checked_gradients, v))
rnn_losses, grads = learn2(g_rnn_logv)
sum_log_losses = tf.reduce_sum(tf.log(tf.add(rnn_losses,1e-38)))
apply_update = optimize(sum_log_losses, learning_rate=0.0003)
update_log_casting = Log_casting_.update_min_log(grads)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
list_sum_log_res = np.array([])
for it in range(100001):
errors, _, min_log = sess.run([rnn_losses, apply_update, update_log_casting])
list_result = np.append(list_result, errors[-1])
list_sum_log_res = np.append(list_sum_log_res, np.log10(np.array(errors) + 1e-38).sum()/20)
if it % 10000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
av_log_res = '{:.2f}'.format(np.log10(list_result[-10000:] + 1e-38).mean())
av_sum_log_res = '{:.2f}'.format(list_sum_log_res.mean())
min_log = '{:.2f}'.format(min_log)
print_dict('it', 'optim_result', 'av_log_res', 'av_sum_log_res', 'min_log')
RNN_logv_sum_log_res = list_result
draw_convergence('RNN_log15_end_log_res', 'RNN_log30_end_log_res', 'RNN_logv_end_log_res',
'RNN_logv_sum_log_res')
```
We are getting closer. Let's have a look at what the convergence looks like.
```
graph_optimizers(sgd_losses, rms_losses, rnn_losses)
```
Better, but we are still far from 1e-38
#### Scale Invariant RNN
The first problem we highlighted is that we are trying to design a RNN that works as well at y=1 as at y=1e-38, with gradient varying between 1 and exp(-43)≈1e-19 (I should mention that this python confusing convention of writting small numbers like 10-5 with 1e-5 is most unfortunate in our context).
The different implementations of logaritmic preprocessing of the gradients proposed above sort of address the problem by rescaling this huge variation of scale into a linear segment between 0 and 1 so that it is more or less interpretable by the RNN, but it's never truly scale invariant.
There is probably a much better implementation of this idea of logaritmic preprocessing, but instead of sinking more time into fine tuning this (or digging into Deepmind's code to see how they cracked this :-), we can try a simpler approach : since the RNN is being fed the past 20 inputs, why not feed it only the ratios of gradients between one step and the next and let it deal with it.
It's actually the first idea I tried. However, I was using the direct result of the RNN as the function to be minimized, and as we have seen, this leads to vanishing gradient if you're not applying log to the function.
```
def g_rnn_div(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
if state is None:
state_nn = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
state = [state_nn, gradients]
inputs = tf.divide(gradients, tf.abs(state[1]) + 1e-38)
update, state[0] = cell(inputs, state[0])
outputs = tf.multiply(update, tf.abs(state[1]) + 1e-38)
state[1] = gradients
return tf.squeeze(outputs, axis=[1]), state
rnn_losses = learn(g_rnn_div)
end_loss = rnn_losses[-1]
apply_update = optimize(end_loss, learning_rate=0.0003)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
list_sum_log_res = np.array([])
for it in range(50001):
errors, _ = sess.run([rnn_losses, apply_update])
list_result = np.append(list_result, errors[-1])
list_sum_log_res = np.append(list_sum_log_res, np.log10(np.array(errors) + 1e-38).sum()/20)
if it % 5000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
av_log_res = '{:.2f}'.format(np.log10(list_result[-5000:] + 1e-38).mean())
av_sum_log_res = '{:.2f}'.format(list_sum_log_res.mean())
print_dict('it', 'optim_result', 'av_log_res', 'av_sum_log_res')
RNN_div_end_res = list_result
draw_convergence('RNN_logv_sum_log_res', 'RNN_div_end_res')
rnn_losses = learn(g_rnn_div)
sum_log_losses = tf.reduce_sum(tf.log(tf.add(rnn_losses,1e-38)))
apply_update = optimize(sum_log_losses, learning_rate=0.0003)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
list_sum_log_res = np.array([])
for it in range(100001):
errors, _ = sess.run([rnn_losses, apply_update])
list_result = np.append(list_result, errors[-1])
list_sum_log_res = np.append(list_sum_log_res, np.log10(np.array(errors) + 1e-38).sum()/20)
if it % 10000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
av_log_res = '{:.2f}'.format(np.log10(list_result[-10000:] + 1e-38).mean())
av_sum_log_res = '{:.2f}'.format(list_sum_log_res.mean())
print_dict('it', 'optim_result', 'av_log_res', 'av_sum_log_res')
RNN_div_sum_log_res = list_result
draw_convergence('RNN_logv_sum_log_res', 'RNN_div_end_res', 'RNN_div_sum_log_res')
graph_optimizers(sgd_losses, rms_losses, rnn_losses)
```
To be noted : the implementation above devides the gradient by the norm of the previous gradient. Dividing by the gradients yields more or less the same results
```
def optimize(loss, learning_rate=0.1):
# "Meta optimizer" to be applied on the RNN defined above
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.)
checked_gradients = []
for g in clipped_gradients:
checked_g = tf.where(tf.is_nan(g), tf.zeros_like(g), g)
checked_gradients = checked_gradients + [checked_g]
return optimizer.apply_gradients(zip(checked_gradients, v))
Log_casting_ = Log_casting(-5)
def g_rnn_logdiv(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
if state is None:
state_nn = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
state = [state_nn, gradients, gradients]
inputs_ = tf.divide(gradients, tf.abs(state[1]) + 1e-38)
inputs = Log_casting_.preprocess(inputs_)
outputs, state[0] = cell(inputs, state[0])
outputs_ = Log_casting_.postprocess(outputs)
update = tf.multiply(outputs_, tf.abs(state[1]) + 1e-38)
state[1] = gradients
state[2] = inputs_
return tf.squeeze(update, axis=[1]), state
def learn3(optimizer):
losses = []
x = initial_pos
state = None
for _ in range(TRAINING_STEPS):
loss = quadratic(x)
losses.append(loss)
grads, = tf.gradients(loss, x)
update, state = optimizer(grads, state)
x += update
return losses, state[2] # the last RNN input is added to the ouptut for use by Log_casting
rnn_losses, RNN_inputs = learn3(g_rnn_logdiv) # grads output added for use by log_casting
sum_log_losses = tf.reduce_sum(tf.log(tf.add(rnn_losses,1e-38)))
apply_update = optimize(sum_log_losses, learning_rate=0.0003)
# the operation below gradually adapts the min_log value to the lowest gardient ratio
update_log_casting = Log_casting_.update_min_log(RNN_inputs)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
list_sum_log_res = np.array([])
for it in range(100001):
errors, _, min_log = sess.run([rnn_losses, apply_update, update_log_casting])
list_result = np.append(list_result, errors[-1])
list_sum_log_res = np.append(list_sum_log_res, np.log10(np.array(errors) + 1e-38).sum()/20)
if it % 10000 == 0 :
list_result_ = list_result[~np.isnan(list_result)]
list_sum_log_res_ = list_sum_log_res[~np.isnan(list_sum_log_res)]
NaN = list_result.size - list_result_.size
optim_result = '{:.2E}'.format(errors[-1])
av_log_res = '{:.2f}'.format(np.log10(list_result[-10000:] + 1e-38).mean())
av_sum_log_res = '{:.2f}'.format(list_sum_log_res.mean())
min_log = '{:.2f}'.format(min_log)
print_dict('it', 'optim_result', 'av_log_res', 'av_sum_log_res', 'min_log', 'NaN')
RNN_logdiv_sum_log_res = list_result
draw_convergence('RNN_logv_sum_log_res', 'RNN_div_sum_log_res', 'RNN_logdiv_sum_log_res')
graph_optimizers(sgd_losses, rms_losses, rnn_losses)
def g_rnn_log7div(gradients, state):
gradients = tf.expand_dims(gradients, axis=1)
if state is None:
state_nn = [[tf.zeros([DIMS, STATE_SIZE])] * 2] * LAYERS
state = [state_nn, gradients]
inputs_ = tf.divide(gradients, tf.abs(state[1]) + 1e-37)
# Casting of inputs from [exp(-43), 1] to [0, 1] and [-1, -exp(-43)] to [-1, 0]
min_log_gradient = -7
log_inputs_ = tf.log(tf.abs(inputs_) + np.exp(min_log_gradient-2))
sign_gradients = tf.sign(gradients)
inputs = tf.multiply(sign_gradients,((log_inputs_ - min_log_gradient) / (- min_log_gradient)))
outputs, state[0] = cell(inputs, state[0])
# Casting of output from [0, 1] to [exp(-43), 1] and [-1, 0] to [-1, -exp(-43)]
abs_outputs = tf.abs(outputs)
sign_outputs = tf.sign(outputs)
outputs_ = tf.multiply(sign_outputs, tf.exp(abs_outputs * (- min_log_gradient) + min_log_gradient))
update = tf.multiply(outputs_, tf.abs(state[1]) + 1e-37)
state[1] = gradients
return tf.squeeze(update, axis=[1]), state
rnn_losses = learn(g_rnn_logdiv)
sum_log_losses = tf.reduce_sum(tf.log(tf.add(rnn_losses,1e-37)))
apply_update = optimize(sum_log_losses, learning_rate=0.0001)
sess.run(tf.global_variables_initializer())
list_result = np.array([])
list_sum_log_res = np.array([])
for it in range(100001):
errors, _ = sess.run([rnn_losses, apply_update])
list_result = np.append(list_result, errors[-1])
list_sum_log_res = np.append(list_sum_log_res, np.log10(np.array(errors) + 1e-37).sum()/20)
if it % 10000 == 0 :
optim_result = '{:.2E}'.format(errors[-1])
av_log_res = '{:.2f}'.format(np.log10(list_result[-10000:] + 1e-37).mean())
av_sum_log_res = '{:.2f}'.format(list_sum_log_res.mean())
print_dict('it', 'optim_result', 'av_log_res', 'av_sum_log_res')
RNN_log7div_sum_log_res = list_result
draw_convergence('RNN_logv_sum_log_res', 'RNN_div_sum_log_res', 'RNN_logdiv_sum_log_res', 'RNN_log7div_sum_log_res')
```
| github_jupyter |
# Plot Deseq2
```
library("ggplot2")
library(tidyr)
```
## T2 PvL
```
sigtab = read.csv("pvl_t2.csv", row.names = 1)
sigtab = as.data.frame.matrix(sigtab)
#sigtab <- sigtab %>% drop_na(Genus)
sigtab
summary(sigtab$log2FoldChange)
x = 1
new = data.frame()
vector = c()
for (i in row.names(sigtab)){
temp = sigtab[i,]
if (temp[13] != " g__"){
new = rbind(new,temp)
name = temp[,13]
vector[x] = name
#new = cbind(new, vector[x])
x = x+1
} else if (temp[13] == " g__" && temp[12] != " f__"){
new = rbind(new,temp)
name = temp[,12]
vector[x] = name
#new = cbind(new,vector[x])
x = x+1
} else {
next
}
}
print(vector)
x = 1
new = data.frame()
vector = c()
for (i in row.names(sigtab)){
temp = sigtab[i,]
if (temp[13] != " g__"){
new = rbind(new,temp)
name = temp[,13]
vector[x] = name
#new = cbind(new, vector[x])
x = x+1
}else if (x>=25){
break
}
else if (temp[13] == " g__" && temp[12] != " f__"){
new = rbind(new,temp)
name = temp[,12]
vector[x] = name
#new = cbind(new,vector[x])
x = x+1
} else if (x>=25){
break
}else {
next
}
}
new[["new.col"]] <- vector
x
write.table(new, file='pvl_t2_all.csv', quote=FALSE, sep=',', col.names = TRUE, row.names = TRUE)
threshold = new
threshold
#write.table(threshold, file='pvl_t2_top10.csv', quote=FALSE, sep=',', col.names = TRUE, row.names = TRUE)
pdf('T2_filtered_PvsL_DESEQ2_top10_family_SV_clustered.pdf', width=7, height=4)
theme_set(theme_bw())
ggplot(threshold, aes(y=factor(threshold$new.col,levels= rev(levels(factor(threshold$new.col)))), x=threshold$log2FoldChange)) +
geom_vline(xintercept = 0.0, color = "gray", size = 0.5) + coord_fixed(ratio=1) +
geom_point(size=2.3) +
ggtitle("P/L T2 ") +
xlab("log2FoldChange") +
ylab("16s Species") +
theme(text= element_text(size=10), axis.text.x = element_text(angle = -90, hjust = 0, vjust=0.5),
legend.position = 'none', plot.title = element_text(hjust = 0.5), axis.text=element_text(size=12))
dev.off()
```
## T5 PvL
```
sigtab = read.csv("pvl_t5.csv", row.names = 1)
sigtab = as.data.frame.matrix(sigtab)
#sigtab <- sigtab %>% drop_na(Genus)
sigtab
summary(sigtab$log2FoldChange)
x = 1
new = data.frame()
vector = c()
for (i in row.names(sigtab)){
temp = sigtab[i,]
if (temp[13] != " g__"){
new = rbind(new,temp)
name = temp[,13]
vector[x] = name
#new = cbind(new, vector[x])
x = x+1
}else if (x>=25){
break
}
else if (temp[13] == " g__" && temp[12] != " f__"){
new = rbind(new,temp)
name = temp[,12]
vector[x] = name
#new = cbind(new,vector[x])
x = x+1
} else if (x>=25){
break
}else {
next
}
}
new[["new.col"]] <- vector
x
write.table(new, file='pvl_t5_all.csv', quote=FALSE, sep=',', col.names = TRUE, row.names = TRUE)
threshold = new
threshold
pdf('T5_filtered_PvsL_DESEQ2_top10_family_SV_clustered.pdf', width=6.5, height=4)
theme_set(theme_bw())
ggplot(threshold, aes(y=factor(threshold$new.col,levels= rev(levels(factor(threshold$new.col)))), x=threshold$log2FoldChange)) +
geom_vline(xintercept = 0.0, color = "gray", size = 0.5) + coord_fixed(ratio=1.65) +
geom_point(size=2.3) +
ggtitle("P/L T5") +
xlab("log2FoldChange") +
ylab("16s Species") +
theme(text= element_text(size=10), axis.text.x = element_text(angle = -90, hjust = 0, vjust=0.5),
legend.position = 'none', plot.title = element_text(hjust = 0.5), axis.text=element_text(size=12))
dev.off()
```
## T2 LvCoL
```
sigtab = read.csv("lvcol_t2.csv", row.names = 1)
sigtab = as.data.frame.matrix(sigtab)
library(tidyr)
#sigtab <- sigtab %>% drop_na(Genus)
sigtab
summary(sigtab$log2FoldChange)
x = 1
new = data.frame()
vector = c()
for (i in row.names(sigtab)){
temp = sigtab[i,]
if (temp[13] != " g__"){
new = rbind(new,temp)
name = temp[,13]
vector[x] = name
#new = cbind(new, vector[x])
x = x+1
}else if (x>=25){
break
}
else if (temp[13] == " g__" && temp[12] != " f__"){
new = rbind(new,temp)
name = temp[,12]
vector[x] = name
#new = cbind(new,vector[x])
x = x+1
} else if (x>=25){
break
}else {
next
}
}
new[["new.col"]] <- vector
x
write.table(new, file='lvcol_t2_all.csv', quote=FALSE, sep=',', col.names = TRUE, row.names = TRUE)
threshold = new
threshold
pdf('T2_filtered_LvsCoL_DESEQ2_top10_family_clustered.pdf', width=6, height=5)
theme_set(theme_bw())
ggplot(threshold, aes(y=factor(threshold$new.col,levels= rev(levels(factor(threshold$new.col)))), x=threshold$log2FoldChange)) +
geom_vline(xintercept = 0.0, color = "gray", size = 0.5) + coord_fixed(ratio=1.35) +
geom_point(size=2.3) +
ggtitle("L/LCoL T2") +
xlab("log2FoldChange") +
ylab("16s Species") +
theme(text= element_text(size=10), axis.text.x = element_text(angle = -90, hjust = 0, vjust=0.5),
legend.position = 'none', plot.title = element_text(hjust = 0.5), axis.text=element_text(size=12))
dev.off()
```
## T5 LvCoL
```
sigtab = read.csv("lvcol_t5.csv", row.names = 1)
sigtab = as.data.frame.matrix(sigtab)
#sigtab <- sigtab %>% drop_na(Genus)
sigtab
summary(sigtab$log2FoldChange)
x = 1
new = data.frame()
vector = c()
for (i in row.names(sigtab)){
temp = sigtab[i,]
if (temp[13] != " g__"){
new = rbind(new,temp)
name = temp[,13]
vector[x] = name
#new = cbind(new, vector[x])
x = x+1
}else if (x>25){
break
}
else if (temp[13] == " g__" && temp[12] != " f__"){
new = rbind(new,temp)
name = temp[,12]
vector[x] = name
#new = cbind(new,vector[x])
x = x+1
} else if (x>25){
break
}else {
next
}
}
new[["new.col"]] <- vector
x
write.table(new, file='lvcol_t5_all.csv', quote=FALSE, sep=',', col.names = TRUE, row.names = TRUE)
threshold = new
threshold
pdf('T5_filtered_LvsCoL_DESEQ2_top10_family_clustered.pdf', width=6, height=5)
theme_set(theme_bw())
ggplot(threshold, aes(y=factor(threshold$new.col,levels= rev(levels(factor(threshold$new.col)))), x=threshold$log2FoldChange)) +
geom_vline(xintercept = 0.0, color = "gray", size = 0.5) + coord_fixed(ratio=2.35) +
geom_point(size=2.3) +
ggtitle("L/LCoL T5") +
xlab("log2FoldChange") +
ylab("16s Species") +
theme(text= element_text(size=11), axis.text.x = element_text(angle = -90, hjust = 0, vjust=0.5),
legend.position = 'none', plot.title = element_text(hjust = 0.5), axis.text=element_text(size=12))
dev.off()
```
| github_jupyter |
## Vorbereitung
```
import pandas as pd
# Platz für weitere Libraries, die Sie brauchen möchten...
import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
```
Wir scrapen die hundert All-Time High Songs der Schweizer Hitparade
Quelle: https://hitparade.ch/charts/best/singles
Und suchen uns dann die Songtexte dazu:
Quelle: https://www.songtexte.com/
### Song-Infos
```
url = 'https://hitparade.ch/charts/best/singles'
response = requests.get(url)
doc2 = BeautifulSoup(response.text, "lxml")
trs = doc2.find_all('table', class_='content')[0].find_all('tr')[2:]
songs = []
for tr in trs:
song = {}
tds = tr.find_all('td')
song['Rang'] = tds[0].text
song['Link'] = tds[2].find('a')['href']
song['Artist'] = tds[2].find('a').text
song['Titel'] = tds[2].contents[-1]
song['Eintritt'] = tds[3].text
song['Peak'] = tds[4].text
song['Wochen'] = tds[5].text
song['Punkte'] = tds[6].text
songs.append(song)
df = pd.DataFrame(songs)
df['Rang'] = df.index + 1
df['Punkte'] = df['Punkte'].astype(int)
df['Peak'] = df['Peak'].astype(int)
df['Rang'] = df['Rang'].astype(int)
df['Wochen'] = df['Wochen'].astype(int)
df = df[['Rang', 'Artist', 'Titel', 'Eintritt', 'Wochen', 'Peak', 'Punkte', 'Link']]
df.head(2)
df.to_csv('charts.csv', index=False)
df = pd.read_csv('charts.csv')
df.head(10)
```
### Song-Texte
```
url_start = 'https://www.songtexte.com/search?q='
url_end = '&c=all'
```
Ein Probesong
```
url = url_start + "DJ+Ötzi+%26+Nik+P.+Ein+Stern+(...+der+deinen+Namen+trägt)" + url_end
url = url_start + "DJ+Ötzi+&+Nik+P.+Ein+Stern+(...+der+deinen+Namen+trägt)
response = requests.get(url)
doc = BeautifulSoup(response.text, "lxml")
link = doc.find('a', class_='topHitLink')['href']
link
url2 = 'https://www.songtexte.com/' + link
response = requests.get(url2)
doc2 = BeautifulSoup(response.text, "lxml")
lyrics = doc2.find('div', {'id': 'lyrics'}).text
lyrics
```
Alle Songtexte:
```
def scrape_songtexts(artist, title):
searchstr = (artist + " " + title).replace(' ', '+')
searchstr = quote(searchstr)
url_start = 'https://www.songtexte.com/search?q='
url_end = '&c=all'
url = url_start + searchstr + url_end
response = requests.get(url)
doc = BeautifulSoup(response.text, "lxml")
if doc.find('a', class_='topHitLink'):
link = doc.find('a', class_='topHitLink')['href']
else:
return ""
url2 = 'https://www.songtexte.com/' + link
response = requests.get(url2)
doc2 = BeautifulSoup(response.text, "lxml")
if doc2.find('div', {'id': 'lyrics'}):
return doc2.find('div', {'id': 'lyrics'}).text
else:
return ""
df['Songtext'] = df.apply(lambda row: scrape_songtexts(row['Artist'], row['Titel']), axis=1)
df.head()
df[df['Songtext'] == '']
df.to_csv('dataprojects/Songtexte/charts_mit_texten.csv', index=False)
df = pd.read_csv('dataprojects/Songtexte/charts_mit_texten.csv')
df.head()
```
| github_jupyter |
```
# Import Libraries
import numpy as np
import pandas as pd
from scipy.stats import iqr
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pickle
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
from sklearn import tree, metrics
import sklearn.metrics as metrics
from sklearn.model_selection import RandomizedSearchCV
from xgboost import XGBClassifier
from sklearn.inspection import permutation_importance
from sklearn.metrics import plot_confusion_matrix, classification_report
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from pdpbox.pdp import pdp_isolate, pdp_plot, pdp_interact, pdp_interact_plot
import shap
# Read In csv
df = pd.read_csv('ks-projects-201801.csv')
df.head()
```
## Doing some EDA
```
# Printing information about variables in Dataset
df.info()
# Checking for unique values for every column
for col in df:
print(df[col].unique())
# Base accuracy: failure rate is ~62%, success rate is 36%
df['state'].value_counts(normalize=True)
```
## Wrangle function to read and clean data
```
# loading data and cleaning dataset
def wrangle(file_path):
#reading in data, parsing the two date columns
df = pd.read_csv(file_path, parse_dates=['deadline', 'launched'], na_values=['N,0"'])
#dropping any live campaigns
df = df.query('state != "live"')
#creating new column 'success' will have a 1 if state is succesfull, else 0
df.loc[df['state'] == 'successful', 'success'] = 1
df.loc[df['state'] != 'successful', 'success'] = 0
#creating new columns for the dates
df = df.assign(hour=df.launched.dt.hour,
day=df.launched.dt.day,
month=df.launched.dt.month,
year=df.launched.dt.year)
return df
df = wrangle("ks-projects-201801.csv");
```
## Doing EDA on new Dataset
```
df.head()
# Get top 10 most frequent names
n=10
df['name'].value_counts()[:n].index.tolist()
# Summary statisticts about Dataset
df.describe()
```
## Checking for outliers
```
plt.boxplot(df['pledged'])
fig = plt.figure(figsize =(10, 7))
plt.show()
sns.boxplot(x=df['goal'])
Q1 = df['usd_pledged_real'].quantile(0.25)
Q3 = df['usd_pledged_real'].quantile(0.75)
IQR = Q3 - Q1
# Filtering values between Q1-1.5IQR and Q3+1.5IQR to exclude outliers
filtered = df.query('(@Q1 - 1.5 * @IQR) <= usd_pledged_real <= (@Q3 + 1.5 * @IQR)')
# Print a measure of the asymmetry of the probability distribution of a real-valued random variable about its mean
print(filtered.skew())
filtered.head()
# Building scatterplot to see the correlation between two variables
fig, ax = plt.subplots(figsize=(20,10))
ax.scatter(filtered['goal'], filtered['usd_pledged_real'])
ax.set_xlabel('Goal')
ax.set_ylabel('Pledged')
plt.show()
#seeing how many unique values are there in the category column
filtered['category'].nunique()
# Base accuracy at this point: failure rate is ~72%, success rate is ~28%
filtered['success'].value_counts(normalize=True)
```
## Creating new columns with seasons and seasons_encoded values
```
def seasons(date_ranges):
season = ""
if date_ranges is pd.NaT:
return "NAN"
else:
#print(date_ranges)
str_date_range = date_ranges.strftime("%m-%d")
#print(date_ranges.strftime("%m-%d"))
#print(date_ranges.strftime("%m-%d") > "08-26")
if str_date_range >= "12-21" or str_date_range <= "03-20":
season = "Winter"
if str_date_range >= "03-21" and str_date_range <="06-20":
season = "Spring"
if str_date_range >="06-21" and str_date_range <="09-20":
season = "Summer"
if str_date_range >="09-21" and str_date_range <="12-20":
season = "Fall"
return season
filtered['launch_season'] = filtered['launched'].apply(seasons)
filtered['deadline_season'] = filtered['deadline'].apply(seasons)
def season_encoder(values):
inter = 0
if values == "Spring":
inter = 1
elif values == "Summer":
inter = 2
elif values == "Fall":
inter = 3
elif values == "Winter":
inter = 4
else:
inter = "NAN"
return inter
filtered['launch_season_encode'] = filtered['launch_season'].apply(season_encoder)
filtered['deadline_season_encode'] = filtered['deadline_season'].apply(season_encoder)
```
## Creating new column with duration in days
```
filtered['launched'] = pd.to_datetime(filtered['launched'])
filtered['deadline'] = pd.to_datetime(filtered['deadline'])
filtered['duration'] = filtered['deadline'] - filtered['launched']
filtered['duration'] = filtered['duration'].dt.days
filtered.head()
```
## After doing some feature selection below, creating new DataFrame
```
#choosing categorical features to be in the model
cat_features = ['country', 'currency', 'main_category']
#label encoding and creating new dataframe with encoded columns
encoder = LabelEncoder()
encoded = filtered[cat_features].apply(encoder.fit_transform)
encoded.head()
#choosing data columns to be in model and joining with categorical col above
data_features = ['goal', 'month', 'year', 'success', 'duration']
baseline = filtered[data_features].join(encoded)
baseline.head()
baseline_index = filtered['name']
baseline = baseline.join(baseline_index).set_index('name')
baseline.head()
# Creates a csv
baseline.to_csv(r'ks-projects-201801.csv')
# Printing the pairwise correlation of all columns in the DataFrame
baseline.corr()
# Creating target and feature variables
target = 'success'
X = baseline.drop(columns=target)
y = baseline[target]
# Splitting data into training and test data
X_train, X_val, y_train, y_val = train_test_split(X,y, test_size = .2, random_state = 42)
```
## Establishing Baseline
```
# The Baseline accuracy is the majority class in y_val and what percentage of the training observations it represents
baseline_acc = y_train.value_counts(normalize=True)[0]
print('Baseline Accuracy Score:', baseline_acc)
```
## Building Models
### We are building not one but few different models to see which one is the best to make predictions
```
# Creating and fitting model1 = XGBoost
model = XGBClassifier(label_encoder = False, random_state=42, n_estimators=50, n_jobs=-1, max_depth=15)
model.fit(X_train, y_train)
# Calculate the training and validation accuracy scores for model
training_accuracy = model.score(X_train, y_train)
val_accuracy = model.score(X_val, y_val)
print('Training Accuracy Score:', training_accuracy)
print('Validation Accuracy Score:', val_accuracy)
# Creating and fitting model2 = LogisticRegression
model2 = LogisticRegression(random_state=42, solver='newton-cg')
model2.fit(X_train, y_train)
# Calculate the training and validation accuracy scores for model2
training_accuracy2 = model2.score(X_train, y_train)
val_accuracy2 = model2.score(X_val, y_val)
print('Training Accuracy Score:', training_accuracy2)
print('Validation Accuracy Score:', val_accuracy2)
# Creating and fitting model3 = DecisionTree
model3 = tree.DecisionTreeClassifier(random_state=42)
model3.fit(X_train, y_train)
# Calculate the training and validation accuracy scores for model3
training_accuracy3 = model3.score(X_train, y_train)
val_accuracy3 = model3.score(X_val, y_val)
print('Training Accuracy Score:', training_accuracy3)
print('Validation Accuracy Score:', val_accuracy3)
# Creating and fitting model4 = RandomForestClassifer
model4 = RandomForestClassifier(random_state=42, n_estimators=50, n_jobs=-1)
model4.fit(X_train, y_train);
# Calculate the training and validation accuracy scores for model4
training_accuracy4 = model4.score(X_train, y_train)
val_accuracy4 = model4.score(X_val, y_val)
print('Training Accuracy Score:', training_accuracy4)
print('Validation Accuracy Score:', val_accuracy4)
# Creating and fitting model5 = GradientBoostingClassifer
model5 = GradientBoostingClassifier(random_state=42, n_estimators=150, min_samples_leaf=5, max_leaf_nodes=350, max_depth=4, learning_rate=0.25)
model5.fit(X_train, y_train);
# Calculate the training and validation accuracy scores for model5
training_accuracy5 = model5.score(X_train, y_train)
val_accuracy5 = model5.score(X_val, y_val)
print('Training Accuracy Score:', training_accuracy5)
print('Validation Accuracy Score:', val_accuracy5)
```
## Models Tuning
### To get better results we did hyperparameter tuning for each model and based on that we picked the model with the best score
```
#RandomizedSearchCV
#instead of choosing the hyperparameters manually, this helps you choose it
param_grid = {
'max_depth': [3, 4, 5],
'n_estimators': [150],
'min_samples_leaf': [3, 4, 5],
'max_leaf_nodes': [350, 370, 400],
'learning_rate': [0.25, 0.3,]
}
search = RandomizedSearchCV(model5,
param_distributions=param_grid,
n_iter=5,
n_jobs=-1,
cv=3,
verbose=1)
search.fit(X_train, y_train);
search.best_score_
search.best_params_
```
### Making some predictions
```
y_pred = model5.predict(X_val)
y_pred
```
## Communicate Results
## Making plot to show feature importances
```
coef = pd.DataFrame(data=model5.feature_importances_, index=X_train.columns, columns=['coefficients'])
# Interested in the most extreme coefficients
coef['coefficients'].abs().sort_values().plot(kind='barh');
plt.title('Most Important Features')
plt.show()
```
## Classification Report
```
print(classification_report(y_val, model5.predict(X_val), target_names=['Not successful', 'Successful']))
```
## Confusion Matrix to see how many predictions were made correct
```
plot_confusion_matrix(
model5,
X_val,
y_val,
values_format = '.0f',
display_labels = ['Not successful','Successful']
);
```
## Creating a partial dependence plot to show how a model prediction partially depends on two most important values of the input variables of interest
```
features = ['goal', 'main_category']
interact = pdp_interact(model5,
dataset=X_val,
model_features=X_val.columns,
features=features)
pdp_interact_plot(interact, plot_type='grid', feature_names=features);
```
## Shapley Plot
### - Showing the influence of features in *individual* predictions.
```
explainer = shap.TreeExplainer(model5)
shap_values = explainer.shap_values(X_val)
shap_values
shap.initjs()
shap.force_plot(explainer.expected_value[0], shap_values[-3], X_val.iloc[0,:])
```
## Picking most important features for model
```
#perm_imp is set to be permutation importance of boosting model on X_val and y_val
perm_imp = permutation_importance(model5, X_val, y_val, random_state=42)
#setting data as dict of the permutation importances mean and std.
data = {'imp_mean':perm_imp['importances_mean'],
'imp_std':perm_imp['importances_std']}
#setting permutation_importances to be data frame with columns in X val to be index and 'data' to be the other columns. Sorting by the mean importance from data.
permutation_importances5 = pd.DataFrame(data,index=X_val.columns).sort_values(by='imp_mean')
permutation_importances5
# Get feature importances
importances5 = pd.Series(model5.feature_importances_, X_train.columns)
# Plot feature importances
%matplotlib inline
import matplotlib.pyplot as plt
n = 20
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances5.sort_values()[-n:].plot.barh(color='grey');
```
## Saving the model
```
pickle.dump(model5, open('Model', 'wb'))
```
| github_jupyter |
# Plagiarism Detection Model
Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model.
This task will be broken down into a few discrete steps:
* Upload your data to S3.
* Define a binary classification model and a training script.
* Train your model and deploy it.
* Evaluate your deployed classifier and answer some questions about your approach.
To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook.
> All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**.
It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset.
---
## Load Data to S3
In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data.
>The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3.
Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved.
```
import pandas as pd
import boto3
import sagemaker
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# session and role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# create an S3 bucket
bucket = sagemaker_session.default_bucket()
```
## EXERCISE: Upload your training data to S3
Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples.
You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file.
```
# should be the name of directory you created to save your features data
data_dir = None
# set prefix, a descriptive name for a directory
prefix = None
# upload all data to S3
```
### Test cell
Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook).
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
```
---
# Modeling
Now that you've uploaded your training data, it's time to define and train a model!
The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes:
* Use a built-in classification algorithm, like LinearLearner.
* Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html).
* Define a custom PyTorch neural network classifier.
It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model.
---
## EXERCISE: Complete a training script
To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model.
A typical training script:
* Loads training data from a specified directory
* Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.)
* Instantiates a model of your design, with any specified hyperparams
* Trains that model
* Finally, saves the model so that it can be hosted/deployed, later
### Defining and training a model
Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will:
1. Import any extra libraries you need
2. Define any additional model training hyperparameters using `parser.add_argument`
2. Define a model in the `if __name__ == '__main__':` section
3. Train the model in that same section
Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments.
**Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library.
```
# directory can be changed to: source_sklearn or source_pytorch
!pygmentize source_sklearn/train.py
```
### Provided code
If you read the code above, you can see that the starter code includes a few things:
* Model loading (`model_fn`) and saving code
* Getting SageMaker's default hyperparameters
* Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y`
If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links.
---
# Create an Estimator
When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments:
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`.
* **role**: Role ARN, which was specified, above.
* **train_instance_count**: The number of training instances (should be left at 1).
* **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* **sagemaker_session**: The session used to train on Sagemaker.
* **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters.
Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`.
## EXERCISE: Define a Scikit-learn or PyTorch estimator
To import your desired estimator, use one of the following lines:
```
from sagemaker.sklearn.estimator import SKLearn
```
```
from sagemaker.pytorch import PyTorch
```
```
# your import and estimator code, here
```
## EXERCISE: Train the estimator
Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console.
```
%%time
# Train your estimator on S3 training data
```
## EXERCISE: Deploy the trained model
After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point.
To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments:
* **initial_instance_count**: The number of deployed instances (1).
* **instance_type**: The type of SageMaker instance for deployment.
Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used.
```
%%time
# uncomment, if needed
# from sagemaker.pytorch import PyTorchModel
# deploy your model to create a predictor
predictor = None
```
---
# Evaluating Your Model
Once your model is deployed, you can see how it performs when applied to our test data.
The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
```
## EXERCISE: Determine the accuracy of your model
Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation.
**To pass this project, your model should get at least 90% test accuracy.**
```
# First: generate predicted, class labels
test_y_preds = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test that your model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
# Second: calculate the test accuracy
accuracy = None
print(accuracy)
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
```
### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is?
** Answer**:
### Question 2: How did you decide on the type of model to use?
** Answer**:
----
## EXERCISE: Clean up Resources
After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below.
```
# uncomment and fill in the line below!
# <name_of_deployed_predictor>.delete_endpoint()
```
### Deleting S3 bucket
When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again.
```
# deleting bucket, uncomment lines below
# bucket_to_delete = boto3.resource('s3').Bucket(bucket)
# bucket_to_delete.objects.all().delete()
```
### Deleting all your models and instances
When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally.
---
## Further Directions
There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below:
* Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0).
* Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism.
* Use language or character-level analysis to find different (and more) similarity features.
* Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not.
* Use API Gateway and a lambda function to deploy your model to a web application.
These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
| github_jupyter |
```
import pathlib
import json
import shutil
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
import pydicom
# Makes it so any changes in pymedphys is automatically
# propagated into the notebook without needing a kernel reset.
from IPython.lib.deepreload import reload
%load_ext autoreload
%autoreload 2
import pymedphys
from pymedphys.labs.autosegmentation import pipeline, filtering, indexing, mask, tfrecord
# Put all of the DICOM data within a directory called 'dicom'
# organised by 'training', 'validation', and 'testing' in here:
data_path_root = pathlib.Path.home().joinpath('.data/dicom-ct-and-structures')
dicom_directory = data_path_root.joinpath('dicom')
training_directory = dicom_directory.joinpath('training')
validation_directory = dicom_directory.joinpath('validation')
testing_directory = dicom_directory.joinpath('testing')
# Of note, the DICOM file directory structure need not have any further
# organisation beyond being placed somewhere within one of the three
# 'training', 'validation', or 'testing'. They can be organised into
# directories by patient but that is not a requirement.
name_mappings_path = data_path_root.joinpath('name_mappings.json')
dicom_paths = pymedphys.zenodo_data_paths("auto-segmentation")
for path in dicom_paths:
if path.suffix == '.dcm':
dataset_id = path.parent.name
parent_and_file = path.parts[-2::]
if int(dataset_id) < 4:
new_path = validation_directory.joinpath(*parent_and_file)
elif int(dataset_id) < 12:
new_path = testing_directory.joinpath(*parent_and_file)
else:
new_path = training_directory.joinpath(*parent_and_file)
elif path.name == 'name_mappings.json':
new_path = name_mappings_path
else:
raise ValueError(f"Unexpected file found. {path}.")
if not new_path.exists():
new_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(path, new_path)
# The following names_map is used to standardise the structure names
names_map = filtering.load_names_mapping(name_mappings_path)
# Create masks for the following structures, in the following order
structures_to_learn = [
'lens_left', 'lens_right', 'eye_left', 'eye_right', 'patient']
# Use the following to filter the slices used for training, validation,
# and testing
filters = {
"study_set_must_have_all_of": structures_to_learn,
"slice_at_least_one_of": [
'lens_left', 'lens_right', 'eye_left', 'eye_right'
],
"slice_must_have": ['patient'],
"slice_cannot_have": []
}
(
ct_image_paths,
structure_set_paths,
ct_uid_to_structure_uid,
structure_uid_to_ct_uids,
) = indexing.get_uid_cache(data_path_root)
(
structure_names_by_ct_uid,
structure_names_by_structure_set_uid,
) = indexing.get_cached_structure_names_by_uids(
data_path_root, structure_set_paths, names_map
)
datasets = pipeline.create_datasets(
data_path_root, names_map, structures_to_learn, filters)
# Create all npz files and build ct_uid to training type map
ct_uid_to_training_type = {}
ct_uids_by_training_type = {}
for dataset_type, dataset in datasets.items():
for ct_uid, x_grid, y_grid, input_array, output_array in dataset:
ct_uid = ct_uid.numpy().decode()
ct_uid_to_training_type[ct_uid] = dataset_type
try:
ct_uids_by_training_type[dataset_type].append(ct_uid)
except KeyError:
ct_uids_by_training_type[dataset_type] = []
ct_uids_by_training_type
structures_to_learn_path = data_path_root.joinpath("structures_to_learn.json")
with open(structures_to_learn_path, "w") as f:
json.dump(structures_to_learn, f)
ct_uids_by_training_type_path = data_path_root.joinpath("ct_uids_by_training_type.json")
with open(ct_uids_by_training_type_path, "w") as f:
json.dump(ct_uids_by_training_type, f)
```
| github_jupyter |
# CASPER example
In this jupyter notebook we will give an example of how some of the functions contained with Casper can be used to predict the concentration and shape parameter for a given cosmology as a function of mass and redshift. Additional functions can be used to plot the resulting density and circular velocity profiles. For a detailed description of the model and how it was developed see the original [paper](arxiv_link).
First let us import the necessary module. For this tutorial you will need to have the following modules installed (if using binder these will automatically be available):
- [numpy](https://numpy.org/)
- [matplotlib](https://matplotlib.org/)
- [PyCAMB](https://camb.readthedocs.io/en/latest/) (Any method that reliably predicts the linear power spectra can be used, here we use CAMB)
- [Casper]()
```
import numpy as np
import matplotlib.pyplot as plt
from casper import casper
import camb
```
First let us define the cosmological variables we are using. In this example we are going to use the Planck 2018 best fit LCDM parameters. Here we have used k_pivot and A_s to normalise the power spectrum correctly, these are the inputs that CAMB requires as well as being the values presented in the Plank 2018 results. However, it is common to normalise the power spectrum using sigma_8. We discuss how to do this in the [Appendix](#appendix).
```
ns = 0.967
A_s = 2.105*10**(-9)
k_pivot = 0.05
omega_ch2 = 0.119
omega_bh2 = 0.0224
h = 0.677
omega_m = omega_ch2/h**2 + omega_bh2/h**2
rho_crit = casper.Critical_density() # calculate the critical density, in units of h^2 M_sun Mpc^-1
```
Now that we have defined the appropriate cosmological parameters we can generate the linear power spectra at the redshifts we want. Here we are going to look at z=0, 1 and 2.
Be aware that this cell will take a while to run, due to the high k-scale we have asked CAMB to calculate the linear power spectrum to. For your own code it may be worth running camb once
and writing the power spectrum output (e.g., as a text file).
```
pars = camb.CAMBparams()
pars.set_cosmology(H0 = h*100, ombh2 = omega_bh2, omch2 = omega_ch2)
pars.InitPower.set_params(ns = ns, As = A_s, pivot_scalar = k_pivot)
pars.set_matter_power(redshifts = [2.0, 1.0, 0.0],kmax = 1e3/h)
results = camb.get_results(pars)
kh, z, pk = results.get_matter_power_spectrum(minkh = 1e-3, maxkh = 1e3, npoints = 1000)
```
We can have a quick look at what the power spectra for this cosmology looks like.
```
plt.figure()
plt.plot(kh, pk[0,:])
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$k$ [$h$ Mpc$^{-1}$]')
plt.ylabel('$P(k)$ [$h^3$ Mpc$^{-3}$]')
```
With the background cosmological parameters set and the power spectra generated we can now use CASPER to predict c and alpha for the masses we want.
```
#define mass range (in M_200c [h^-1 M_sun]) to calculate the density paramters
M=np.logspace(12,14,100)
c0,alpha0=casper.casper(M, np.vstack((kh, pk[0,:])).T, omega_m, return_peak_height=False)
c1,alpha1=casper.casper(M, np.vstack((kh, pk[1,:])).T, omega_m, return_peak_height=False)
c2,alpha2=casper.casper(M, np.vstack((kh, pk[2,:])).T, omega_m, return_peak_height=False)
```
If we had wanted to know the resulting peak height values used to model c and alpha we just needed to set the keyword 'return_peak_height' to True. The function would then return (c, alpha, nu_c, nu_alpha).
Let us now plot the resulting concentration and shape parameter mass relations for z=0, 1 and 2.
```
plt.figure()
plt.plot(M, c0, label='z=0')
plt.plot(M, c1, label='z=1')
plt.plot(M, c2, label='z=2')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$M_{200c} [h^{-1} M_{\\odot}]$')
plt.ylabel('$c$')
plt.legend(frameon=False)
plt.figure()
plt.plot(M, alpha0, label='z=0')
plt.plot(M, alpha1, label='z=1')
plt.plot(M, alpha2, label='z=2')
plt.xscale('log')
plt.xlabel('$M_{200c} [h^{-1} M_{\\odot}]$')
plt.ylabel('$\\alpha$')
plt.legend(frameon=False)
```
As well as using CASPER to generate the concentration and shape parameter for the given cosmology, redshift and mass (which is the primary use of the module) we can also plot what the resulting circular velocity and density profiles look like.
Let's compare a 10^12 to a 10^15 h^-1 M_sun mass halo for the same cosmology at z=0.
First we need to calculate the values of c and alpha for these haloes.
```
M200 = np.array([10**12, 10**15])
R200 = (M200 /(200*rho_crit*4/3*np.pi))**(1/3) # need to calculate the size of the halo, which we use later
c,alpha = casper.casper(M200, np.vstack((kh, pk[0,:])).T, omega_m, return_peak_height=False)
```
We also need to define the radial range (in units of R200c) that we want to plot over.
```
r = np.logspace(-1,0,100)
```
To be able to plot the resulting Einasto profiles we need to know the normalisation, rho_2. This can be constrained by ensuring that the integrated mass of the density profile is indeed the mass of the halo. We can calculate the mass within a given radius using the function *Einasto_mass*(r, rho_2, c, alpha, R200).
```
rho_2 = np.empty(len(M200))
for i in range(len(M200)):
rho_2[i] = M200[i] /casper.Einasto_mass(1, 1, c[i], alpha[i], R200[i])
```
We can now plot the resulting circular velocity and density profiles.
```
plt.figure()
for i in range(len(M200)):
rho = casper.Einasto_density(r, rho_2[i], c[i], alpha0[i])
plt.plot(r, rho, label='$M_{200c}=%.0e$, $z=0$'% M200[i])
plt.xlabel('$r/R_{200c}$')
plt.ylabel('$\\rho$ $[h^2 M_{\\odot} Mpc^{-3}]$')
plt.legend(frameon=False)
plt.xscale('log')
plt.yscale('log')
plt.figure()
for i in range(len(M200)):
#We do not need the factor of G here as we will normalise by the maximum circular velocity
v_circ = np.sqrt( casper.Einasto_mass(r, rho_2[i], c[i], alpha0[i], R200[i]) /r)
plt.plot(r, v_circ/np.max(v_circ), label='$M_{200c}=%.0e$, $z=0$'% M200[i])
plt.xlabel('$r/R_{200c}$')
plt.ylabel('$v_{circ}/v_{circ,max}$')
plt.legend(frameon=False)
```
The above example covers most of the useful functionality of casper. With the main function being casper(), that predicts c and alpha. The functionality of the module is relatively straightforward and is essentially a collection of functions that can be used to calculate density profiles and peak height values for a given cosmology. A full list of the available functions with basic documentation can be found with the following.
```
help(casper)
```
If you have any issues or questions then please feel free to contact me at S.T.Brown@2018.ljmu.ac.uk.
Additionally, if the results of this model to predict c and alpha are used in any published work can you please cite the [paper](link) appropriately.
<a id='appendix'></a>
## Appendix
In this section we discuss how you can use CAMB to normalise the power spectrum with respect to a given value of sigma_8. We first need to select a choice of A_s and k_pivot and the sigma_8 value we want. These could in principle be any values, though it's best to select something not too extreme and close to those above to avoid rounding errors, etc.
```
A_s_1 = 10**(-9)
k_pivot = 0.05
sigma_8 = 0.8
```
Here we have used the plack pivot point, and a value of A_s that is roughly the correct order of magnitude to what we expect. Next we need to calculate what sigma_8 is for this particular cosmology.
```
pars = camb.CAMBparams()
pars.set_cosmology(H0 = h*100, ombh2 = omega_bh2, omch2 = omega_ch2)
pars.InitPower.set_params(ns = ns, As = A_s_1, pivot_scalar = k_pivot)
pars.set_matter_power(redshifts = [0.0],kmax = 2)
results = camb.get_results(pars)
sigma_8_1 = results.get_sigma8()
print(sigma_8_1)
```
Here we see that for the cosmology we've chosen the sigma_8~0.5, representing a universe with less structure than we observe in our own. This value is close to what we want, though a little lower. We can use the fact that sigma_8 is proportional to (A_s)^0.5 to calculate the value of A_s that will give us sigma_8=0.8.
```
A_s = A_s_1 * (sigma_8 /sigma_8_1)**2
```
We can then rerun camb with all the same parameters, except now using the properly normalised value for A_s, to generate the correctly normalised power spectrum. We can also confirm that we do indeed recover the value of sigma_8 we want.
```
pars = camb.CAMBparams()
pars.set_cosmology(H0 = h*100, ombh2 = omega_bh2, omch2 = omega_ch2)
pars.InitPower.set_params(ns = ns, As = A_s, pivot_scalar = k_pivot)
pars.set_matter_power(redshifts = [0.0],kmax = 2)
results = camb.get_results(pars)
print(results.get_sigma8())
```
| github_jupyter |

Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Logging
_**This notebook showcases various ways to use the Azure Machine Learning service run logging APIs, and view the results in the Azure portal.**_
---
---
## Table of Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. Validate Azure ML SDK installation
1. Initialize workspace
1. Set experiment
1. [Logging](#Logging)
1. Starting a run
1. Viewing a run in the portal
1. Viewing the experiment in the portal
1. Logging metrics
1. Logging string metrics
1. Logging numeric metrics
1. Logging vectors
1. Logging tables
1. Uploading files
1. [Analyzing results](#Analyzing-results)
1. Tagging a run
1. [Next steps](#Next-steps)
## Introduction
Logging metrics from runs in your experiments allows you to track results from one run to another, determining trends in your outputs and understand how your inputs correspond to your model and script performance. Azure Machine Learning services (AzureML) allows you to track various types of metrics including images and arbitrary files in order to understand, analyze, and audit your experimental progress.
Typically you should log all parameters for your experiment and all numerical and string outputs of your experiment. This will allow you to analyze the performance of your experiments across multiple runs, correlate inputs to outputs, and filter runs based on interesting criteria.
The experiment's Run History report page automatically creates a report that can be customized to show the KPI's, charts, and column sets that are interesting to you.
|  |  |
|:--:|:--:|
| *Run Details* | *Run History* |
---
## Setup
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) Notebook first if you haven't already to establish your connection to the AzureML Workspace. Also make sure you have tqdm and matplotlib installed in the current kernel.
```
(myenv) $ conda install -y tqdm matplotlib
```
### Validate Azure ML SDK installation and get version number for debugging purposes
```
from azureml.core import Experiment, Workspace, Run
import azureml.core
import numpy as np
from tqdm import tqdm
# Check core SDK version number
print("This notebook was created using SDK version 1.1.5, you are currently running version", azureml.core.VERSION)
```
### Initialize workspace
Initialize a workspace object from persisted configuration.
```
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
```
### Set experiment
Create a new experiment (or get the one with the specified name). An *experiment* is a container for an arbitrary set of *runs*.
```
experiment = Experiment(workspace=ws, name='logging-api-test')
```
---
## Logging
In this section we will explore the various logging mechanisms.
### Starting a run
A *run* is a singular experimental trial. In this notebook we will create a run directly on the experiment by calling `run = exp.start_logging()`. If you were experimenting by submitting a script file as an experiment using ``experiment.submit()``, you would call `run = Run.get_context()` in your script to access the run context of your code. In either case, the logging methods on the returned run object work the same.
This cell also stores the run id for use later in this notebook. The run_id is not necessary for logging.
```
# start logging for the run
run = experiment.start_logging()
# access the run id for use later
run_id = run.id
# change the scale factor on different runs to see how you can compare multiple runs
scale_factor = 2
# change the category on different runs to see how to organize data in reports
category = 'Red'
```
#### Viewing a run in the Portal
Once a run is started you can see the run in the portal by simply typing ``run``. Clicking on the "Link to Portal" link will take you to the Run Details page that shows the metrics you have logged and other run properties. You can refresh this page after each logging statement to see the updated results.
```
run
```
### Viewing an experiment in the portal
You can also view an experiement similarly by typing `experiment`. The portal link will take you to the experiment's Run History page that shows all runs and allows you to analyze trends across multiple runs.
```
experiment
```
## Logging metrics
Metrics are visible in the run details page in the AzureML portal and also can be analyzed in experiment reports. The run details page looks as below and contains tabs for Details, Outputs, Logs, and Snapshot.
* The Details page displays attributes about the run, plus logged metrics and images. Metrics that are vectors appear as charts.
* The Outputs page contains any files, such as models, you uploaded into the "outputs" directory from your run into storage. If you place files in the "outputs" directory locally, the files are automatically uploaded on your behald when the run is completed.
* The Logs page allows you to view any log files created by your run. Logging runs created in notebooks typically do not generate log files.
* The Snapshot page contains a snapshot of the directory specified in the ''start_logging'' statement, plus the notebook at the time of the ''start_logging'' call. This snapshot and notebook can be downloaded from the Run Details page to continue or reproduce an experiment.
### Logging string metrics
The following cell logs a string metric. A string metric is simply a string value associated with a name. A string metric String metrics are useful for labelling runs and to organize your data. Typically you should log all string parameters as metrics for later analysis - even information such as paths can help to understand how individual experiements perform differently.
String metrics can be used in the following ways:
* Plot in hitograms
* Group by indicators for numerical plots
* Filtering runs
String metrics appear in the **Tracked Metrics** section of the Run Details page and can be added as a column in Run History reports.
```
# log a string metric
run.log(name='Category', value=category)
```
### Logging numerical metrics
The following cell logs some numerical metrics. Numerical metrics can include metrics such as AUC or MSE. You should log any parameter or significant output measure in order to understand trends across multiple experiments. Numerical metrics appear in the **Tracked Metrics** section of the Run Details page, and can be used in charts or KPI's in experiment Run History reports.
```
# log numerical values
run.log(name="scale factor", value = scale_factor)
run.log(name='Magic Number', value=42 * scale_factor)
```
### Logging vectors
Vectors are good for recording information such as loss curves. You can log a vector by creating a list of numbers, calling ``log_list()`` and supplying a name and the list, or by repeatedly logging a value using the same name.
Vectors are presented in Run Details as a chart, and are directly comparable in experiment reports when placed in a chart.
**Note:** vectors logged into the run are expected to be relatively small. Logging very large vectors into Azure ML can result in reduced performance. If you need to store large amounts of data associated with the run, you can write the data to file that will be uploaded.
```
fibonacci_values = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
scaled_values = (i * scale_factor for i in fibonacci_values)
# Log a list of values. Note this will generate a single-variable line chart.
run.log_list(name='Fibonacci', value=scaled_values)
for i in tqdm(range(-10, 10)):
# log a metric value repeatedly, this will generate a single-variable line chart.
run.log(name='Sigmoid', value=1 / (1 + np.exp(-i)))
```
### Logging tables
Tables are good for recording related sets of information such as accuracy tables, confusion matrices, etc.
You can log a table in two ways:
* Create a dictionary of lists where each list represents a column in the table and call ``log_table()``
* Repeatedly call ``log_row()`` providing the same table name with a consistent set of named args as the column values
Tables are presented in Run Details as a chart using the first two columns of the table
**Note:** tables logged into the run are expected to be relatively small. Logging very large tables into Azure ML can result in reduced performance. If you need to store large amounts of data associated with the run, you can write the data to file that will be uploaded.
```
# create a dictionary to hold a table of values
sines = {}
sines['angle'] = []
sines['sine'] = []
for i in tqdm(range(-10, 10)):
angle = i / 2.0 * scale_factor
# log a 2 (or more) values as a metric repeatedly. This will generate a 2-variable line chart if you have 2 numerical columns.
run.log_row(name='Cosine Wave', angle=angle, cos=np.cos(angle))
sines['angle'].append(angle)
sines['sine'].append(np.sin(angle))
# log a dictionary as a table, this will generate a 2-variable chart if you have 2 numerical columns
run.log_table(name='Sine Wave', value=sines)
```
### Logging images
You can directly log _matplotlib_ plots and arbitrary images to your run record. This code logs a _matplotlib_ pyplot object. Images show up in the run details page in the Azure ML Portal.
```
%matplotlib inline
# Create a plot
import matplotlib.pyplot as plt
angle = np.linspace(-3, 3, 50) * scale_factor
plt.plot(angle,np.tanh(angle), label='tanh')
plt.legend(fontsize=12)
plt.title('Hyperbolic Tangent', fontsize=16)
plt.grid(True)
# Log the plot to the run. To log an arbitrary image, use the form run.log_image(name, path='./image_path.png')
run.log_image(name='Hyperbolic Tangent', plot=plt)
```
### Uploading files
Files can also be uploaded explicitly and stored as artifacts along with the run record. These files are also visible in the *Outputs* tab of the Run Details page.
```
file_name = 'outputs/myfile.txt'
with open(file_name, "w") as f:
f.write('This is an output file that will be uploaded.\n')
# Upload the file explicitly into artifacts
run.upload_file(name = file_name, path_or_stream = file_name)
```
### Completing the run
Calling `run.complete()` marks the run as completed and triggers the output file collection. If for any reason you need to indicate the run failed or simply need to cancel the run you can call `run.fail()` or `run.cancel()`.
```
run.complete()
```
---
## Analyzing results
You can refresh the run in the Azure portal to see all of your results. In many cases you will want to analyze runs that were performed previously to inspect the contents or compare results. Runs can be fetched from their parent Experiment object using the ``Run()`` constructor or the ``experiment.get_runs()`` method.
```
fetched_run = Run(experiment, run_id)
fetched_run
```
Call ``run.get_metrics()`` to retrieve all the metrics from a run.
```
fetched_run.get_metrics()
```
Call ``run.get_metrics(name = <metric name>)`` to retrieve a metric value by name. Retrieving a single metric can be faster, especially if the run contains many metrics.
```
fetched_run.get_metrics(name = "scale factor")
```
See the files uploaded for this run by calling ``run.get_file_names()``
```
fetched_run.get_file_names()
```
Once you know the file names in a run, you can download the files using the ``run.download_file()`` method
```
import os
os.makedirs('files', exist_ok=True)
for f in run.get_file_names():
dest = os.path.join('files', f.split('/')[-1])
print('Downloading file {} to {}...'.format(f, dest))
fetched_run.download_file(f, dest)
```
### Tagging a run
Often when you analyze the results of a run, you may need to tag that run with important personal or external information. You can add a tag to a run using the ``run.tag()`` method. AzureML supports valueless and valued tags.
```
fetched_run.tag("My Favorite Run")
fetched_run.tag("Competition Rank", 1)
fetched_run.get_tags()
```
## Next steps
To experiment more with logging and to understand how metrics can be visualized, go back to the *Start a run* section, try changing the category and scale_factor values and going through the notebook several times. Play with the KPI, charting, and column selection options on the experiment's Run History reports page to see how the various metrics can be combined and visualized.
After learning about all of the logging options, go to the [train on remote vm](..\train-on-remote-vm\train-on-remote-vm.ipynb) notebook and experiment with logging from remote compute contexts.
| github_jupyter |
# Federated Learning of a Recurrent Neural Network for text classification
In this tutorial, you are going to learn how to train a Recurrent Neural Network (RNN) in a federated way with the purpose of *classifying* a person's surname to its most likely language of origin.
We will train two Recurrent Neural Networks residing on two remote workers based on a dataset containing approximately 20.000 surnames from 18 languages of origin, and predict to which language a name belongs based on the name's spelling.
A **character-level RNN** treats words as a series of characters - outputting a prediction and “hidden state” per character, feeding its previous hidden state into each next step. We take the final prediction to be the output, i.e. which class the word belongs to. Hence the training process proceeds sequentially character-by-character through the different hidden layers.
Following distributed training, we are going to be able to perform predictions of a surname's language of origin, as in the following example:
```python
predict(model_pointers["bob"], "Qing", alice) #alice is our worker
Qing
(-1.43) Korean
(-1.74) Vietnamese
(-2.18) Arabic
predict(model_pointers["alice"], "Daniele", alice)
Daniele
(-1.58) French
(-2.04) Scottish
(-2.07) Dutch
```
The present example is inspired by an official Pytorch [tutorial](https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html), which I ported to PySyft with the purpose of learning a Recurrent Neural Network in a federated way.The present tutorial is self-contained, so there are no dependencies on external pieces of code apart from a few Python libraries.
**RNN Tutorial's author**: Daniele Gadler. [@DanyEle](https://github.com/danyele) on Github.
## 1. Step: Dependencies!
Make sure you have all the requires packages installed, or install them via the following command (assuming you didn't move the current Jupyter Notebook from its initial directory).
After installing new packages, you may have to restart this Jupyter Notebook from the tool bar Kernel -> Restart
```
!pip install -r "../../../requirements.txt"
from __future__ import unicode_literals, print_function, division
from torch.utils.data import Dataset
import torch
from io import open
import glob
import os
import numpy as np
import unicodedata
import string
import random
import torch.nn as nn
import time
import math
import pandas as pd
import random
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import urllib.request
from zipfile import ZipFile
#hide TF-related warnings in PySyft
import warnings
warnings.filterwarnings("ignore")
import syft as sy
from syft.frameworks.torch.federated import utils
from syft.workers.websocket_client import WebsocketClientWorker
```
## 2. Step: Data pre-processing and transformation
We are going to train our neural network based on a dataset containing surnames from 18 languages of origin. So let's run the following lines to automatically download the dataset and extract it. Afterwards, you'll be able to parse the dataset in Python following the initialization of a few basic functions for parsing the data
```
#create a function for checking if the dataset does indeed exist
def dataset_exists():
return (os.path.isfile('./data/eng-fra.txt') and
#check if all 18 files are indeed in the ./data/names/ directory
os.path.isdir('./data/names/') and
os.path.isfile('./data/names/Arabic.txt') and
os.path.isfile('./data/names/Chinese.txt') and
os.path.isfile('./data/names/Czech.txt') and
os.path.isfile('./data/names/Dutch.txt') and
os.path.isfile('./data/names/English.txt') and
os.path.isfile('./data/names/French.txt') and
os.path.isfile('./data/names/German.txt') and
os.path.isfile('./data/names/Greek.txt') and
os.path.isfile('./data/names/Irish.txt') and
os.path.isfile('./data/names/Italian.txt') and
os.path.isfile('./data/names/Japanese.txt') and
os.path.isfile('./data/names/Korean.txt') and
os.path.isfile('./data/names/Polish.txt') and
os.path.isfile('./data/names/Portuguese.txt') and
os.path.isfile('./data/names/Russian.txt') and
os.path.isfile('./data/names/Scottish.txt') and
os.path.isfile('./data/names/Spanish.txt') and
os.path.isfile('./data/names/Vietnamese.txt'))
#If the dataset does not exist, then proceed to download the dataset anew
if not dataset_exists():
#If the dataset does not already exist, let's download the dataset directly from the URL where it is hosted
print('Downloading the dataset with urllib2 to the current directory...')
url = 'https://download.pytorch.org/tutorial/data.zip'
urllib.request.urlretrieve(url, './data.zip')
print("The dataset was successfully downloaded")
print("Unzipping the dataset...")
with ZipFile('./data.zip', 'r') as zipObj:
# Extract all the contents of the zip file in current directory
zipObj.extractall()
print("Dataset successfully unzipped")
else:
print("Not downloading the dataset because it was already downloaded")
#Load all the files in a certain path
def findFiles(path):
return glob.glob(path)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
#convert a string 's' in unicode format to ASCII format
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
#dictionary containing the nation as key and the names as values
#Example: category_lines["italian"] = ["Abandonato","Abatangelo","Abatantuono",...]
category_lines = {}
#List containing the different categories in the data
all_categories = []
for filename in findFiles('data/names/*.txt'):
print(filename)
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
print("Amount of categories:" + str(n_categories))
```
Now we are going to format the data so as to make it compliant with the format requested by PySyft and Pytorch. Firstly, we define a dataset class, specifying how batches ought to be extracted from the dataset in order for them to be assigned to the different workers.
```
class LanguageDataset(Dataset):
#Constructor is mandatory
def __init__(self, text, labels, transform=None):
self.data = text
self.targets = labels #categories
#self.to_torchtensor()
self.transform = transform
def to_torchtensor(self):
self.data = torch.from_numpy(self.text, requires_grad=True)
self.labels = torch.from_numpy(self.targets, requires_grad=True)
def __len__(self):
#Mandatory
'''Returns:
Length [int]: Length of Dataset/batches
'''
return len(self.data)
def __getitem__(self, idx):
#Mandatory
'''Returns:
Data [Torch Tensor]:
Target [ Torch Tensor]:
'''
sample = self.data[idx]
target = self.targets[idx]
if self.transform:
sample = self.transform(sample)
return sample,target
#The list of arguments for our program. We will be needing most of them soon.
class Arguments():
def __init__(self):
self.batch_size = 1
self.learning_rate = 0.005
self.epochs = 10000
self.federate_after_n_batches = 15000
self.seed = 1
self.print_every = 200
self.plot_every = 100
self.use_cuda = False
args = Arguments()
```
We now need to unwrap data samples so as to have them all in one single list instead of a dictionary,where different categories were addressed by key.From now onwards, **categories** will be the languages of origin (Y) and **names** will be the data points (X).
```
%%latex
\begin{split}
names\_list = [d_1,...,d_n] \\
category\_list = [c_1,...,c_n]
\end{split}
Where $n$ is the total amount of data points
#Set of names(X)
names_list = []
#Set of labels (Y)
category_list = []
#Convert into a list with corresponding label.
for nation, names in category_lines.items():
#iterate over every single name
for name in names:
names_list.append(name) #input data point
category_list.append(nation) #label
#let's see if it was successfully loaded. Each data sample(X) should have its own corresponding category(Y)
print(names_list[1:20])
print(category_list[1:20])
print("\n \n Amount of data points loaded: " + str(len(names_list)))
```
We now need to turn our categories into numbers, as PyTorch cannot really understand plain text
For an example category: "Greek" ---> 0
```
#Assign an integer to every category
categories_numerical = pd.factorize(category_list)[0]
#Let's wrap our categories with a tensor, so that it can be loaded by LanguageDataset
category_tensor = torch.tensor(np.array(categories_numerical), dtype=torch.long)
#Ready to be processed by torch.from_numpy in LanguageDataset
categories_numpy = np.array(category_tensor)
#Let's see a few resulting categories
print(names_list[1200:1210])
print(categories_numpy[1200:1210])
```
We now need to turn every single character in each input line string into a vector, with a "1" marking the character present in that very character.
For example, in the case of a single character, we have:
"a" = array([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype=float32)
A word is just a vector of such character vectors: our Recurrent Neural Network will process every single character vector in the word, producing an output after passing through each of its hidden layers.
This technique, involving the encoding of a word as a vector of character vectors, is known as *word embedding*, as we embed a word into a vector of vectors.
```
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters) #Daniele: len(max_line_size) was len(line)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
#Daniele: add blank elements over here
return tensor
def list_strings_to_list_tensors(names_list):
lines_tensors = []
for index, line in enumerate(names_list):
lineTensor = lineToTensor(line)
lineNumpy = lineTensor.numpy()
lines_tensors.append(lineNumpy)
return(lines_tensors)
lines_tensors = list_strings_to_list_tensors(names_list)
print(names_list[0])
print(lines_tensors[0])
print(lines_tensors[0].shape)
```
Let's now identify the longest word in the dataset, as all tensors need to have the same shape in order to fit into a numpy array. So, we append vectors containing just "0"s into our words up to the maximum word size, such that all word embeddings have the same shape.
```
max_line_size = max(len(x) for x in lines_tensors)
def lineToTensorFillEmpty(line, max_line_size):
tensor = torch.zeros(max_line_size, 1, n_letters) #notice the difference between this method and the previous one
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
#Vectors with (0,0,.... ,0) are placed where there are no characters
return tensor
def list_strings_to_list_tensors_fill_empty(names_list):
lines_tensors = []
for index, line in enumerate(names_list):
lineTensor = lineToTensorFillEmpty(line, max_line_size)
lines_tensors.append(lineTensor)
return(lines_tensors)
lines_tensors = list_strings_to_list_tensors_fill_empty(names_list)
#Let's take a look at what a word now looks like
print(names_list[0])
print(lines_tensors[0])
print(lines_tensors[0].shape)
#And finally, from a list, we can create a numpy array with all our word embeddings having the same shape:
array_lines_tensors = np.stack(lines_tensors)
#However, such operation introduces one extra dimension (look at the dimension with index=2 having size '1')
print(array_lines_tensors.shape)
#Because that dimension just has size 1, we can get rid of it with the following function call
array_lines_proper_dimension = np.squeeze(array_lines_tensors, axis=2)
print(array_lines_proper_dimension.shape)
```
### Data unbalancing and batch randomization:
You may have noticed that our dataset is strongly unbalanced and contains a lot of data points in the "russian.txt" dataset. However, we would still like to take a random batch during our training procedure at every iteration. In order to prevent our neural network from classifying a data point as always belonging to the "Russian" category, we first pick a random category and then select a data point from that category. To do that, we construct a dictionary mapping a certain category to the corresponding starting index in the list of data points (e.g.: lines). Afterwards, we will take a datapoint starting from the starting_index identified
```
def find_start_index_per_category(category_list):
categories_start_index = {}
#Initialize every category with an empty list
for category in all_categories:
categories_start_index[category] = []
#Insert the start index of each category into the dictionary categories_start_index
#Example: "Italian" --> 203
# "Spanish" --> 19776
last_category = None
i = 0
for name in names_list:
cur_category = category_list[i]
if(cur_category != last_category):
categories_start_index[cur_category] = i
last_category = cur_category
i = i + 1
return(categories_start_index)
categories_start_index = find_start_index_per_category(category_list)
print(categories_start_index)
```
Let's define a few functions to take a random index from from the dataset, so that we'll be able to select a random data point and a random category.
```
def randomChoice(l):
rand_value = random.randint(0, len(l) - 1)
return l[rand_value], rand_value
def randomTrainingIndex():
category, rand_cat_index = randomChoice(all_categories) #cat = category, it's not a random animal
#rand_line_index is a relative index for a data point within the random category rand_cat_index
line, rand_line_index = randomChoice(category_lines[category])
category_start_index = categories_start_index[category]
absolute_index = category_start_index + rand_line_index
return(absolute_index)
```
## 3. Step: Model - Recurrent Neural Network
Hey, I must admit that was indeed a lot of data preprocessing and transformation, but it was well worth it!
We have defined almost all the function we'll be needing during the training procedure and our data is ready
to be fed into the neural network, which we're creating now:
```
#Two hidden layers, based on simple linear layers
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
#Let's instantiate the neural network already:
n_hidden = 128
#Instantiate RNN
device = torch.device("cuda" if args.use_cuda else "cpu")
model = RNN(n_letters, n_hidden, n_categories).to(device)
#The final softmax layer will produce a probability for each one of our 18 categories
print(model)
#Now let's define our workers. You can either use remote workers or virtual workers
hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
alice = sy.VirtualWorker(hook, id="alice")
bob = sy.VirtualWorker(hook, id="bob")
#charlie = sy.VirtualWorker(hook, id="charlie")
workers_virtual = [alice, bob]
#If you have your workers operating remotely, like on Raspberry PIs
#kwargs_websocket_alice = {"host": "ip_alice", "hook": hook}
#alice = WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket_alice)
#kwargs_websocket_bob = {"host": "ip_bob", "hook": hook}
#bob = WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket_bob)
#workers_virtual = [alice, bob]
#array_lines_proper_dimension = our data points(X)
#categories_numpy = our labels (Y)
langDataset = LanguageDataset(array_lines_proper_dimension, categories_numpy)
#assign the data points and the corresponding categories to workers.
federated_train_loader = sy.FederatedDataLoader(
langDataset
.federate(workers_virtual),
batch_size=args.batch_size)
```
## 4. Step - Model Training!
It's now time to train our Recurrent Neural Network based on the processed data. To do that, we need to define a few more functions
```
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def fed_avg_every_n_iters(model_pointers, iter, federate_after_n_batches):
models_local = {}
if(iter % args.federate_after_n_batches == 0):
for worker_name, model_pointer in model_pointers.items():
# #need to assign the model to the worker it belongs to.
models_local[worker_name] = model_pointer.copy().get()
model_avg = utils.federated_avg(models_local)
for worker in workers_virtual:
model_copied_avg = model_avg.copy()
model_ptr = model_copied_avg.send(worker)
model_pointers[worker.id] = model_ptr
return(model_pointers)
def fw_bw_pass_model(model_pointers, line_single, category_single):
#get the right initialized model
model_ptr = model_pointers[line_single.location.id]
line_reshaped = line_single.reshape(max_line_size, 1, len(all_letters))
line_reshaped, category_single = line_reshaped.to(device), category_single.to(device)
#Firstly, initialize hidden layer
hidden_init = model_ptr.initHidden()
#And now zero grad the model
model_ptr.zero_grad()
hidden_ptr = hidden_init.send(line_single.location)
amount_lines_non_zero = len(torch.nonzero(line_reshaped.copy().get()))
#now need to perform forward passes
for i in range(amount_lines_non_zero):
output, hidden_ptr = model_ptr(line_reshaped[i], hidden_ptr)
criterion = nn.NLLLoss()
loss = criterion(output, category_single)
loss.backward()
model_got = model_ptr.get()
#Perform model weights' updates
for param in model_got.parameters():
param.data.add_(-args.learning_rate, param.grad.data)
model_sent = model_got.send(line_single.location.id)
model_pointers[line_single.location.id] = model_sent
return(model_pointers, loss, output)
def train_RNN(n_iters, print_every, plot_every, federate_after_n_batches, list_federated_train_loader):
current_loss = 0
all_losses = []
model_pointers = {}
#Send the initialized model to every single worker just before the training procedure starts
for worker in workers_virtual:
model_copied = model.copy()
model_ptr = model_copied.send(worker)
model_pointers[worker.id] = model_ptr
#extract a random element from the list and perform training on it
for iter in range(1, n_iters + 1):
random_index = randomTrainingIndex()
line_single, category_single = list_federated_train_loader[random_index]
#print(category_single.copy().get())
line_name = names_list[random_index]
model_pointers, loss, output = fw_bw_pass_model(model_pointers, line_single, category_single)
#model_pointers = fed_avg_every_n_iters(model_pointers, iter, args.federate_after_n_batches)
#Update the current loss a
loss_got = loss.get().item()
current_loss += loss_got
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
if(iter % print_every == 0):
output_got = output.get() #Without copy()
guess, guess_i = categoryFromOutput(output_got)
category = all_categories[category_single.copy().get().item()]
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss_got, line_name, guess, correct))
return(all_losses, model_pointers)
```
In order for the defined randomization process to work, we need to wrap the data points and categories into a list, from that we're going to take a batch at a random index.
```
#This may take a few seconds to complete.
print("Generating list of batches for the workers...")
list_federated_train_loader = list(federated_train_loader)
```
And finally,let's launch our training
```
start = time.time()
all_losses, model_pointers = train_RNN(args.epochs, args.print_every, args.plot_every, args.federate_after_n_batches, list_federated_train_loader)
#Let's plot the loss we got during the training procedure
plt.figure()
plt.ylabel("Loss")
plt.xlabel('Epochs (100s)')
plt.plot(all_losses)
```
## 5. Step - Predict!
Great! We have successfully created our two models for bob and alice in parallel using federated learning! I experimented with federated averaging of the two models, but it turned out that for a batch size of 1, as in the present case, the model loss was diverging. Let's try using our models for prediction now, shall we? This is the final reward for our endeavours.
```
def predict(model, input_line, worker, n_predictions=3):
model = model.copy().get()
print('\n> %s' % input_line)
model_remote = model.send(worker)
line_tensor = lineToTensor(input_line)
line_remote = line_tensor.copy().send(worker)
#line_tensor = lineToTensor(input_line)
#output = evaluate(model, line_remote)
# Get top N categories
hidden = model_remote.initHidden()
hidden_remote = hidden.copy().send(worker)
with torch.no_grad():
for i in range(line_remote.shape[0]):
output, hidden_remote = model_remote(line_remote[i], hidden_remote)
topv, topi = output.copy().get().topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
```
Notice how the different models learned may perform different predictions, based on the data that was shown to them.
```
predict(model_pointers["alice"], "Qing", alice)
predict(model_pointers["alice"], "Daniele", alice)
predict(model_pointers["bob"], "Qing", alice)
predict(model_pointers["bob"], "Daniele", alice)
```
You may try experimenting with this example right now, for example by increasing or decreasing the amount of epochs and seeing how the two models perform. You may also try to de-commenting the part about federating averaging and check the new resulting loss function. There can be lots of other optimizations we may think of as well!
# Congratulations!!! - Time to Join the Community!
Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
### Star PySyft on GitHub
The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building.
- [Star PySyft](https://github.com/OpenMined/PySyft)
### Join our Slack!
The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org)
### Join a Code Project!
The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft GitHub Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for GitHub issues marked "good first issue".
- [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
- [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
### Donate
If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
[OpenMined's Open Collective Page](https://opencollective.com/openmined)
| github_jupyter |
# T81-558: Applications of Deep Neural Networks
**Module 3: Introduction to TensorFlow**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 3 Material
* **Part 3.1: Deep Learning and Neural Network Introduction** [[Video]](https://www.youtube.com/watch?v=zYnI4iWRmpc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_1_neural_net.ipynb)
* Part 3.2: Introduction to Tensorflow & Keras [[Video]](https://www.youtube.com/watch?v=PsE73jk55cE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_2_keras.ipynb)
* Part 3.3: Saving and Loading a Keras Neural Network [[Video]](https://www.youtube.com/watch?v=-9QfbGM1qGw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_3_save_load.ipynb)
* Part 3.4: Early Stopping in Keras to Prevent Overfitting [[Video]](https://www.youtube.com/watch?v=m1LNunuI2fk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_4_early_stop.ipynb)
* Part 3.5: Extracting Weights and Manual Calculation [[Video]](https://www.youtube.com/watch?v=7PWgx16kH8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_5_weights.ipynb)
# Part 3.1: Deep Learning and Neural Network Introduction
Neural networks were one of the first machine learning models. Their popularity has fallen twice and is now on its third rise. Deep learning implies the use of neural networks. The "deep" in deep learning refers to a neural network with many hidden layers. Because neural networks have been around for so long, they have quite a bit of baggage. Many different training algorithms, activation/transfer functions, and structures have been added over the years. This course is only concerned with the latest, most current state of the art techniques for deep neural networks. I am not going to spend any time discussing the history of neural networks. If you would like to learn about some of the more classic structures of neural networks, there are several chapters dedicated to this in your course book. For the latest technology, I wrote an article for the Society of Actuaries on deep learning as the [third generation of neural networks](https://www.soa.org/Library/Newsletters/Predictive-Analytics-and-Futurism/2015/december/paf-iss12.pdf).
Neural networks accept input and produce output. The input to a neural network is called the feature vector. The size of this vector is always a fixed length. Changing the size of the feature vector means recreating the entire neural network. Though the feature vector is called a "vector," this is not always the case. A vector implies a 1D array. Historically the input to a neural network was always 1D. However, with modern neural networks you might see inputs, such as:
* **1D Vector** - Classic input to a neural network, similar to rows in a spreadsheet. Common in predictive modeling.
* **2D Matrix** - Grayscale image input to a convolutional neural network (CNN).
* **3D Matrix** - Color image input to a convolutional neural network (CNN).
* **nD Matrix** - Higher order input to a CNN.
Prior to CNN's, the image input was sent to a neural network simply by squashing the image matrix into a long array by placing the image's rows side-by-side. CNNs are different, as the nD matrix literally passes through the neural network layers.
Initially this course will focus upon 1D input to neural networks. However, later sessions will focus more heavily upon higher dimension input.
**Dimensions** The term dimension can be confusing in neural networks. In the sense of a 1D input vector, dimension refers to how many elements are in that 1D array. For example, a neural network with 10 input neurons has 10 dimensions. However, now that we have CNN's, the input has dimensions too. The input to the neural network will *usually* have 1, 2 or 3 dimensions. 4 or more dimensions is unusual. You might have a 2D input to a neural network that has 64x64 pixels. This would result in 4,096 input neurons. This network is either 2D or 4,096D, depending on which set of dimensions you are talking about!
### Classification or Regression
Like many models, neural networks can function in classification or regression:
* **Regression** - You expect a number as your neural network's prediction.
* **Classification** - You expect a class/category as your neural network's prediction.
The following shows a classification and regression neural network:

Notice that the output of the regression neural network is numeric and the output of the classification is a class. Regression, or two-class classification, networks always have a single output. Classification neural networks have an output neuron for each class.
The following diagram shows a typical neural network:

There are usually four types of neurons in a neural network:
* **Input Neurons** - Each input neuron is mapped to one element in the feature vector.
* **Hidden Neurons** - Hidden neurons allow the neural network to abstract and process the input into the output.
* **Output Neurons** - Each output neuron calculates one part of the output.
* **Context Neurons** - Holds state between calls to the neural network to predict.
* **Bias Neurons** - Work similar to the y-intercept of a linear equation.
These neurons are grouped into layers:
* **Input Layer** - The input layer accepts feature vectors from the dataset. Input layers usually have a bias neuron.
* **Output Layer** - The output from the neural network. The output layer does not have a bias neuron.
* **Hidden Layers** - Layers that occur between the input and output layers. Each hidden layer will usually have a bias neuron.
### Neuron Calculation
The output from a single neuron is calculated according to the following formula:
$ f(x,\theta) = \phi(\sum_i(\theta_i \cdot x_i)) $
The input vector ($x$) represents the feature vector and the vector $\theta$ (theta) represents the weights. To account for the bias neuron, a value of 1 is always appended to the end of the input feature vector. This causes the last weight to be interpreted as a bias value that is simply added to the summation. The $\phi$ (phi) is the transfer/activation function.
Consider using the above equation to calculate the output from the following neuron:

The above neuron has 2 inputs plus the bias as a third. This neuron might accept the following input feature vector:
```
[1,2]
```
To account for the bias neuron, a 1 is appended, as follows:
```
[1,2,1]
```
The weights for a 3-input layer (2 real inputs + bias) will always have an additional weight, for the bias. A weight vector might be:
```
[ 0.1, 0.2, 0.3]
```
To calculate the summation, perform the following:
```
0.1*1 + 0.2*2 + 0.3*1 = 0.8
```
The value of 0.8 is passed to the $\phi$ (phi) function, which represents the activation function.
### Activation Functions
Activation functions, also known as transfer functions, are used to calculate the output of each layer of a neural network. Historically neural networks have used a hyperbolic tangent, sigmoid/logistic, or linear activation function. However, modern deep neural networks primarily make use of the following activation functions:
* **Rectified Linear Unit (ReLU)** - Used for the output of hidden layers.
* **Softmax** - Used for the output of classification neural networks. [Softmax Example](http://www.heatonresearch.com/aifh/vol3/softmax.html)
* **Linear** - Used for the output of regression neural networks (or 2-class classification).
The ReLU function is calculated as follows:
$ \phi(x) = \max(0, x) $
The Softmax is calculated as follows:
$ \phi_i(z) = \frac{e^{z_i}}{\sum\limits_{j \in group}e^{z_j}} $
The Softmax activation function is only useful with more than one output neuron. It ensures that all of the output neurons sum to 1.0. This makes it very useful for classification where it shows the probability of each of the classes as being the correct choice.
To experiment with the Softmax, click [here](http://www.heatonresearch.com/aifh/vol3/softmax.html).
The linear activation function is essentially no activation function:
$ \phi(x) = x $
For regression problems, this is the activation function of choice.
### Why ReLU?
Why is the ReLU activation function so popular? It was one of the key improvements to neural networks that makes deep learning work. Prior to deep learning, the sigmoid activation function was very common:
$ \phi(x) = \frac{1}{1 + e^{-x}} $
The graph of the sigmoid function is shown here:

Neural networks are often trained using gradient descent. To make use of gradient descent, it is necessary to take the derivative of the activation function. This allows the partial derivatives of each of the weights to be calculated with respect to the error function. A derivative is the instantaneous rate of change:

The derivative of the sigmoid function is given here:
$ \phi'(x)=\phi(x)(1-\phi(x)) $
This derivative is often given in other forms. The above form is used for computational efficiency. To see how this derivative was taken, see [this](http://www.heatonresearch.com/aifh/vol3/deriv_sigmoid.html).
The graph of the sigmoid derivative is given here:

The derivative quickly saturates to zero as *x* moves from zero. This is not a problem for the derivative of the ReLU, which is given here:
$ \phi'(x) = \begin{cases} 1 & x > 0 \\ 0 & x \leq 0 \end{cases} $
### Why are Bias Neurons Needed?
The activation functions seen in the previous section specifies the output of a single neuron. Together, the weight and bias of a neuron shape the output of the activation to produce the desired output. To see how this process occurs, consider the following equation. It represents a single-input sigmoid activation neural network.
$ f(x,w,b) = \frac{1}{1 + e^{-(wx+b)}} $
The *x* variable represents the single input to the neural network. The *w* and *b* variables specify the weight and bias of the neural network. The above equation is a combination of the weighted sum of the inputs and the sigmoid activation function. For this section, we will consider the sigmoid function because it clearly demonstrates the effect that a bias neuron has.
The weights of the neuron allow you to adjust the slope or shape of the activation function. The following figure shows the effect on the output of the sigmoid activation function if the weight is varied:

The above diagram shows several sigmoid curves using the following parameters:
```
f(x,0.5,0.0)
f(x,1.0,0.0)
f(x,1.5,0.0)
f(x,2.0,0.0)
```
To produce the curves, we did not use bias, which is evident in the third parameter of 0 in each case. Using four weight values yields four different sigmoid curves in the above figure. No matter the weight, we always get the same value of 0.5 when x is 0 because all of the curves hit the same point when x is 0. We might need the neural network to produce other values when the input is near 0.5.
Bias does shift the sigmoid curve, which allows values other than 0.5 when x is near 0. The following figure shows the effect of using a weight of 1.0 with several different biases:

The above diagram shows several sigmoid curves with the following parameters:
```
f(x,1.0,1.0)
f(x,1.0,0.5)
f(x,1.0,1.5)
f(x,1.0,2.0)
```
We used a weight of 1.0 for these curves in all cases. When we utilized several different biases, sigmoid curves shifted to the left or right. Because all the curves merge together at the top right or bottom left, it is not a complete shift.
When we put bias and weights together, they produced a curve that created the necessary output from a neuron. The above curves are the output from only one neuron. In a complete network, the output from many different neurons will combine to produce complex output patterns.
# Module 3 Assignment
You can find the first assignment here: [assignment 3](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class3.ipynb)
| github_jupyter |
# Initial_t_rad Bug
The purpose of this notebook is to demonstrate the bug associated with setting the initial_t_rad tardis.plasma property.
```
pwd
import tardis
import numpy as np
```
# Density and Abundance test files
Below are the density and abundance data from the test files used for demonstrating this bug.
```
density_dat = np.loadtxt('data/density.txt',skiprows=1)
abund_dat = np.loadtxt('data/abund.dat', skiprows=1)
print(density_dat)
print(abund_dat)
```
# No initial_t_rad
Below we run a simple tardis simulation where `initial_t_rad` is not set. The simulation has v_inner_boundary = 3350 km/s and v_outer_boundary = 3750 km/s, both within the velocity range in the density file. The simulation runs fine.
```
no_init_trad = tardis.run_tardis('data/config_no_init_trad.yml')
no_init_trad.model.velocity
no_init_trad.model.no_of_shells, no_init_trad.model.no_of_raw_shells
print('raw velocity: \n',no_init_trad.model.raw_velocity)
print('raw velocity shape: ',no_init_trad.model.raw_velocity.shape)
print('(v_boundary_inner, v_boundary_outer) = (%i, %i)'%
(no_init_trad.model.v_boundary_inner.to('km/s').value, no_init_trad.model.v_boundary_outer.to('km/s').value))
print('v_boundary_inner_index: ', no_init_trad.model.v_boundary_inner_index)
print('v_boundary_outer_index: ', no_init_trad.model.v_boundary_outer_index)
print('t_rad', no_init_trad.model.t_rad)
```
# Debugging
```
%%debug
init_trad = tardis.run_tardis('data/config_init_trad.yml')
init_trad = tardis.run_tardis('data/config_init_trad.yml')
```
# Debugging
## Debugging No initial_t_radiative run to compare with Yes initial_t_radiative run
We place two breakpoints:
break 1. tardis/base:37 --> Stops in the run_tardis() function when the simulation is initialized.
break 2. tardis/simulation/base:436 --> Stops after the Radial1DModel has been built from the config file, but before the plasma has been initialized.
# IMPORTANT:
We check the model.t_radiative property INSIDE the assemble_plasma function. Notice that it has len(model.t_radiative) = model.no_of_shells = 5
```
%%debug
no_init_trad = tardis.run_tardis('config_no_init_trad.yml')
```
## Debugging Yes initial_t_radiative run
We place the same two breakpoints as above:
break 1. tardis/base:37 --> Stops in the run_tardis() function when the simulation is initialized.
break 2. tardis/simulation/base:436 --> Stops after the Radial1DModel has been built from the config file, but before the plasma has been initialized.
# IMPORTANT:
We check the model.t_radiative property INSIDE the assemble_plasma function. Notice that it has len(model.t_radiative) = 6 which is NOT EQUAL to model.no_of_shells = 5
```
%%debug
init_trad = tardis.run_tardis('config_init_trad.yml')
```
# Checking model.t_radiative initialization when YES initial_t_rad
In the above debugging blocks, we have identified the following discrepancy INSIDE assemble_plasma():
### len(model.t_radiative) = 6 when YES initial_t_rad
### len(model.t_radiative) = 5 when NO initial_t_rad
Therefore, we investigate in the following debugging block how model.t_radiative is initialized. We place a breakpoint at tardis/simulation/base:432 and step INSIDE the Radial1DModel initialization.
Breakpoints:
break 1. tardis/simulation/base:432 --> Stops so that we can step INSIDE Radial1DModel initialization from_config().
break 2. tardis/model/base:330 --> Where temperature is handled INSIDE Radial1DModel initialization from_config().
break 3. tardis/model/base:337 --> `t_radiative` is initialized. It has the same length as `velocity` which is the raw velocities from the density file.
break 4. tardis/model/base:374 --> init() for Radial1DModel is called. We check values of relevant variables.
break 5. tardis/model/base:76 --> Stops at first line of Radial1DModel init() function.
break 6. tardis/model/base:101 --> self.\_t\_radiative is set.
break 7. tardis/model/base:140 --> Stops at first line of self.t_radiative setter.
break 8. tardis/model/base:132 --> Stops at first line of self.t_radiative getter.
break 9. tardis/model/base:108 --> Stop right after self.\_t\_radiative is set. NOTICE that neither the setter nor the getter was called. __IMPORTANT:__ at line 108, we have len(self.\_t\_radiative) = 10. __TO DO:__ Check len(self.\_t\_radiative) at line 108 in the NO initial\_t\_rad case.
```
%%debug
init_trad = tardis.run_tardis('config_init_trad.yml')
```
# Checking self.\_t\_radiative initialization when NO initial_t_rad at line 108
__IMPORTANT:__ We find that len(self.\_t\_radiative) = 5. This is a DISCREPANCY with the YES initial_t_rad case.
```
%%debug
no_init_trad = tardis.run_tardis('config_no_init_trad.yml')
```
# CODE CHANGE:
We propose the following change to tardis/model/base:106
__Line 106 Before Change:__ `self._t_radiative = t_radiative`
__Line 106 After Change:__ `self._t_radiative = t_radiative[1:1 + self.no_of_shells]`
t_radiative\[0\] corresponds to the temperature within the inner boundary, and so should be ignored.
```
init_trad = tardis.run_tardis('config_init_trad.yml')
import numpy as np
a = np.array([1,2,3,4,5,6,7,8])
a[3:8]
a
2 in a
np.argwhere(a==6)[0][0]
np.searchsorted(a, 6.5)
if (2 in a) and (3.5 in a):
print('hi')
assert 1==1.2, "test"
a[3:6]
```
| github_jupyter |
# Release 0.4.1 powered by heart zone metrics!
> New release of runpandas comes with heart training zone metrics and minor changes to the CI build actions.
- toc: false
- badges: true
- comments: true
- author: Marcel Caraciolo
- categories: [general, jupyter, releases]
- image: images/trainingpeaks.png
> This current state of the project is `early beta`, which means that features can be added, removed or changed in backwards incompatible ways.
We published this minor release with some new metrics and changes to better improve our build process. In this release of [RunPandas 0.4.1](https://pypi.org/project/runpandas/), we include:
- The Activity now provides some special methods in `runpandas.acessors` that computes the heart training zones for each record and the time spent for each training zone.
- We published for the first time our package to the [Anaconda](https://anaconda.org/marcelcaraciolo/runpandas) scientific package repository.
- Finally, we have changed our CI build implementation from Travis CI to Github actions. Unfortunately, the Travis CI became highly limited to open-source projects, which resulted into several builds to not run anymore due to lack of credits.
## What is Runpandas?
Runpandas is a python package based on ``pandas`` data analysis library, that makes it easier to perform data analysis from your running sessions stored at tracking files from cellphones and GPS smartwatches or social sports applications such as Strava, MapMyRUn, NikeRunClub, etc. It is designed to enable reading, transforming and running metrics analytics from several tracking files and apps.
## Main Features
### Support to heart training zones metrics
Now ``runpandas`` comes with useful methods to data runners who desires to explore their heart rate data and check the heart rate range variation and the respective training zones or the time ellapsed through each training zone during the workout.
```
#Disable Warnings for a better visualization
import warnings
warnings.filterwarnings('ignore')
import runpandas
activity = runpandas.read_file('./data/11km.tcx')
print('Start', activity.index[0],'End:', activity.index[-1])
print(activity.iloc[0]['lat'], activity.iloc[-1]['lat'])
```
First, let's perform a QC evaluation on the data, to check if there's any invalid or missing data required for the analysis. As you can see in the cell below, there are 5 records with heart rate data missing. We will replace all these with the first HR sensor data available.
```
import numpy as np
group_hr = activity['hr'].isnull().sum()
print("There are nan records: %d" % group_hr)
#There is 5 missing values in HR. Let's see the positions where they are placed in the frame.
print(activity[activity['hr'].isnull()])
#We will replace all NaN values with the first HR sensor data available
activity['hr'].fillna(activity.iloc[5]['hr'], inplace=True)
print('Total nan after fill:', activity['hr'].isnull().sum())
```
Let's see how to add a column with the heart rate zone label to the data frame. For this task, we will use the special method `runpandas.compute.heart_zone` . The parameters are the bins argument which contains the left and right bounds for each training zone and the labels argument corresponding to the zone labels
```
activity['heartrate_zone'] = activity.compute.heart_zone(
labels=["Rest", "Z1", "Z2", "Z3", "Z4", "Z5"],
bins=[0, 92, 110, 129, 147, 166, 184])
activity["heartrate_zone"].tail()
```
To calculate the time in zone, there is also a special method `runpandas.compute.time_in_zone` which computes the time spent for each training zone.
```
time_in_zone = activity.compute.time_in_zone(
labels=["Rest", "Z1", "Z2", "Z3", "Z4", "Z5"],
bins=[0, 92, 110, 129, 147, 166, 184])
time_in_zone
```
### Anaconda Package
We decided to publish our runpandas packages at one of the most popular pythonic scientific package repositories : [Anaconda](https://www.anaconda.com/) . There are more millions data science packages published focusing on scientific areas. In this release we published at the owner's package repository (https://anaconda.org/marcelcaraciolo/runpandas), but the goal is to publish it at the conda-forge main repository. We will work on this task to submit our package as a release candidate.
### Changing the Build script to suppor the Github Actions
Since last year the CI/CD provider TravisCI started to put several limitations to their free tier quotes , specially to open-source projects with had a specific ammount of time-credits for builds. We understood that for them it was a big decision , because building open source products and maintain them is extremely difficult. Since runpandas is a fully open source package, I decided to find other CI provider.
Github Actions came to the rescue, since it remains free for any open-source project hosted in Github. Finally, I moved in this release all the build scripts to the Github actions.
<img src="./images/github-actions.png">
For further information about the Github Actions and see how it works, please check this [article](https://github.com/features/actions) and this [post](https://www.section.io/engineering-education/setting-up-cicd-for-python-packages-using-github-actions/).
## What is coming next ?
The next releases will come with power metrics and support to the workout summary visualization. So stay tunned!
## Thanks
We are constantly developing Runpandas improving its existing features and adding new ones. We will be glad to hear from you about what you like or don’t like, what features you may wish to see in upcoming releases. Please feel free to contact us.
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Bonus Tutorial 4: The Kalman Filter, part 2
**Week 3, Day 2: Hidden Dynamics**
**By Neuromatch Academy**
##**Note: This is bonus material, included from NMA 2020. It has not been substantially revised for 2021.**
This means that the notation and standards are slightly different. We include it here because it provides additional information about how the Kalman filter works in two dimensions.
__Content creators:__ Caroline Haimerl and Byron Galbraith
__Content reviewers:__ Jesse Livezey, Matt Krause, Michael Waskom, and Xaq Pitkow
**Useful reference:**
- Roweis, Ghahramani (1998): A unifying review of linear Gaussian Models
- Bishop (2006): Pattern Recognition and Machine Learning
**Acknowledgement**
This tutorial is in part based on code originally created by Caroline Haimerl for Dr. Cristina Savin's *Probabilistic Time Series* class at the Center for Data Science, New York University
```
#@title Video 1: Introduction
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="6f_51L3i5aQ", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
---
# Tutorial Objectives
In the previous tutorial we gained intuition for the Kalman filter in one dimension. In this tutorial, we will examine the **two-dimensional** Kalman filter and more of its mathematical foundations.
In this tutorial, you will:
* Review linear dynamical systems
* Implement the Kalman filter
* Explore how the Kalman filter can be used to smooth data from an eye-tracking experiment
```
import sys
!conda install -c conda-forge ipywidgets --yes
!conda install numpy matplotlib scipy requests --yes
# Install PyKalman (https://pykalman.github.io/)
!pip install pykalman --quiet
# Imports
import numpy as np
import matplotlib.pyplot as plt
import pykalman
from scipy import stats
#@title Figure settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Data retrieval and loading
import io
import os
import hashlib
import requests
fname = "W2D3_mit_eyetracking_2009.npz"
url = "https://osf.io/jfk8w/download"
expected_md5 = "20c7bc4a6f61f49450997e381cf5e0dd"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
elif hashlib.md5(r.content).hexdigest() != expected_md5:
print("!!! Data download appears corrupted !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
def load_eyetracking_data(data_fname=fname):
with np.load(data_fname, allow_pickle=True) as dobj:
data = dict(**dobj)
images = [plt.imread(io.BytesIO(stim), format='JPG')
for stim in data['stimuli']]
subjects = data['subjects']
return subjects, images
#@title Helper functions
np.set_printoptions(precision=3)
def plot_kalman(state, observation, estimate=None, label='filter', color='r-',
title='LDS', axes=None):
if axes is None:
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6))
ax1.plot(state[:, 0], state[:, 1], 'g-', label='true latent')
ax1.plot(observation[:, 0], observation[:, 1], 'k.', label='data')
else:
ax1, ax2 = axes
if estimate is not None:
ax1.plot(estimate[:, 0], estimate[:, 1], color=color, label=label)
ax1.set(title=title, xlabel='X position', ylabel='Y position')
ax1.legend()
if estimate is None:
ax2.plot(state[:, 0], observation[:, 0], '.k', label='dim 1')
ax2.plot(state[:, 1], observation[:, 1], '.', color='grey', label='dim 2')
ax2.set(title='correlation', xlabel='latent', ylabel='measured')
else:
ax2.plot(state[:, 0], estimate[:, 0], '.', color=color,
label='latent dim 1')
ax2.plot(state[:, 1], estimate[:, 1], 'x', color=color,
label='latent dim 2')
ax2.set(title='correlation',
xlabel='real latent',
ylabel='estimated latent')
ax2.legend()
return ax1, ax2
def plot_gaze_data(data, img=None, ax=None):
# overlay gaze on stimulus
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
xlim = None
ylim = None
if img is not None:
ax.imshow(img, aspect='auto')
ylim = (img.shape[0], 0)
xlim = (0, img.shape[1])
ax.scatter(data[:, 0], data[:, 1], c='m', s=100, alpha=0.7)
ax.set(xlim=xlim, ylim=ylim)
return ax
def plot_kf_state(kf, data, ax):
mu_0 = np.ones(kf.n_dim_state)
mu_0[:data.shape[1]] = data[0]
kf.initial_state_mean = mu_0
mu, sigma = kf.smooth(data)
ax.plot(mu[:, 0], mu[:, 1], 'limegreen', linewidth=3, zorder=1)
ax.scatter(mu[0, 0], mu[0, 1], c='orange', marker='>', s=200, zorder=2)
ax.scatter(mu[-1, 0], mu[-1, 1], c='orange', marker='s', s=200, zorder=2)
```
---
# Section 1: Linear Dynamical System (LDS)
```
#@title Video 2: Linear Dynamical Systems
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2SWh639YgEg", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
## Kalman filter definitions:
The latent state $s_t$ evolves as a stochastic linear dynamical system in discrete time, with a dynamics matrix $D$:
$$s_t = Ds_{t-1}+w_t$$
Just as in the HMM, the structure is a Markov chain where the state at time point $t$ is conditionally independent of previous states given the state at time point $t-1$.
Sensory measurements $m_t$ (observations) are noisy linear projections of the latent state:
$$m_t = Hs_{t}+\eta_t$$
Both states and measurements have Gaussian variability, often called noise: 'process noise' $w_t$ for the states, and 'measurement' or 'observation noise' $\eta_t$ for the measurements. The initial state is also Gaussian distributed. These quantites have means and covariances:
\begin{eqnarray}
w_t & \sim & \mathcal{N}(0, Q) \\
\eta_t & \sim & \mathcal{N}(0, R) \\
s_0 & \sim & \mathcal{N}(\mu_0, \Sigma_0)
\end{eqnarray}
As a consequence, $s_t$, $m_t$ and their joint distributions are Gaussian. This makes all of the math analytically tractable using linear algebra, so we can easily compute the marginal and conditional distributions we will use for inferring the current state given the entire history of measurements.
_**Please note**: we are trying to create uniform notation across tutorials. In some videos created in 2020, measurements $m_t$ were denoted $y_t$, and the Dynamics matrix $D$ was denoted $F$. We apologize for any confusion!_
## Section 1.1: Sampling from a latent linear dynamical system
The first thing we will investigate is how to generate timecourse samples from a linear dynamical system given its parameters. We will start by defining the following system:
```
# task dimensions
n_dim_state = 2
n_dim_obs = 2
# initialize model parameters
params = {
'D': 0.9 * np.eye(n_dim_state), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': 1.0 * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
```
**Coding note**: We used a parameter dictionary `params` above. As the number of parameters we need to provide to our functions increases, it can be beneficial to condense them into a data structure like this to clean up the number of inputs we pass in. The trade-off is that we have to know what is in our data structure to use those values, rather than looking at the function signature directly.
### Exercise 1: Sampling from a linear dynamical system
In this exercise you will implement the dynamics functions of a linear dynamical system to sample both a latent space trajectory (given parameters set above) and noisy measurements.
```
def sample_lds(n_timesteps, params, seed=0):
""" Generate samples from a Linear Dynamical System specified by the provided
parameters.
Args:
n_timesteps (int): the number of time steps to simulate
params (dict): a dictionary of model paramters: (D, Q, H, R, mu_0, sigma_0)
seed (int): a random seed to use for reproducibility checks
Returns:
ndarray, ndarray: the generated state and observation data
"""
n_dim_state = params['D'].shape[0]
n_dim_obs = params['H'].shape[0]
# set seed
np.random.seed(seed)
# precompute random samples from the provided covariance matrices
# mean defaults to 0
mi = stats.multivariate_normal(cov=params['Q']).rvs(n_timesteps)
eta = stats.multivariate_normal(cov=params['R']).rvs(n_timesteps)
# initialize state and observation arrays
state = np.zeros((n_timesteps, n_dim_state))
obs = np.zeros((n_timesteps, n_dim_obs))
###################################################################
## TODO for students: compute the next state and observation values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the next state and observation values")
###################################################################
# simulate the system
for t in range(n_timesteps):
# write the expressions for computing state values given the time step
if t == 0:
state[t] = ...
else:
state[t] = ...
# write the expression for computing the observation
obs[t] = ...
return state, obs
# Uncomment below to test your function
# state, obs = sample_lds(100, params)
# print('sample at t=3 ', state[3])
# plot_kalman(state, obs, title='sample')
# to_remove solution
def sample_lds(n_timesteps, params, seed=0):
""" Generate samples from a Linear Dynamical System specified by the provided
parameters.
Args:
n_timesteps (int): the number of time steps to simulate
params (dict): a dictionary of model paramters: (D, Q, H, R, mu_0, sigma_0)
seed (int): a random seed to use for reproducibility checks
Returns:
ndarray, ndarray: the generated state and observation data
"""
n_dim_state = params['D'].shape[0]
n_dim_obs = params['H'].shape[0]
# set seed
np.random.seed(seed)
# precompute random samples from the provided covariance matrices
# mean defaults to 0
mi = stats.multivariate_normal(cov=params['Q']).rvs(n_timesteps)
eta = stats.multivariate_normal(cov=params['R']).rvs(n_timesteps)
# initialize state and observation arrays
state = np.zeros((n_timesteps, n_dim_state))
obs = np.zeros((n_timesteps, n_dim_obs))
# simulate the system
for t in range(n_timesteps):
# write the expressions for computing state values given the time step
if t == 0:
state[t] = stats.multivariate_normal(mean=params['mu_0'],
cov=params['sigma_0']).rvs(1)
else:
state[t] = params['D'] @ state[t-1] + mi[t]
# write the expression for computing the observation
obs[t] = params['H'] @ state[t] + eta[t]
return state, obs
state, obs = sample_lds(100, params)
print('sample at t=3 ', state[3])
with plt.xkcd():
plot_kalman(state, obs, title='sample')
```
### Interactive Demo: Adjusting System Dynamics
To test your understanding of the parameters of a linear dynamical system, think about what you would expect if you made the following changes:
1. Reduce observation noise $R$
2. Increase respective temporal dynamics $D$
Use the interactive widget below to vary the values of $R$ and $D$.
```
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(R=widgets.FloatLogSlider(1., min=-2, max=2),
D=widgets.FloatSlider(0.9, min=0.0, max=1.0, step=.01))
def explore_dynamics(R=0.1, D=0.5):
params = {
'D': D * np.eye(n_dim_state), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': R * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean,
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
state, obs = sample_lds(100, params)
plot_kalman(state, obs, title='sample')
```
---
# Section 2: Kalman Filtering
```
#@title Video 3: Kalman Filtering
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="VboZOV9QMOI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
We want to infer the latent state variable $s_t$ given the measured (observed) variable $m_t$.
$$P(s_t|m_1, ..., m_t, m_{t+1}, ..., m_T)\sim \mathcal{N}(\hat{\mu}_t, \hat{\Sigma_t})$$
First we obtain estimates of the latent state by running the filtering from $t=0,....T$.
$$s_t^{\rm pred}\sim \mathcal{N}(\hat{\mu}_t^{\rm pred},\hat{\Sigma}_t^{\rm pred})$$
Where $\hat{\mu}_t^{\rm pred}$ and $\hat{\Sigma}_t^{\rm pred}$ are derived as follows:
\begin{eqnarray}
\hat{\mu}_1^{\rm pred} & = & D\hat{\mu}_{0} \\
\hat{\mu}_t^{\rm pred} & = & D\hat{\mu}_{t-1}
\end{eqnarray}
This is the prediction for $s_t$ obtained simply by taking the expected value of $s_{t-1}$ and projecting it forward one step using the transition matrix $D$.
We do the same for the covariance, taking into account the noise covariance $Q$ and the fact that scaling a variable by $D$ scales its covariance $\Sigma$ as $D\Sigma D^T$:
\begin{eqnarray}
\hat{\Sigma}_0^{\rm pred} & = & D\hat{\Sigma}_{0}D^T+Q \\
\hat{\Sigma}_t^{\rm pred} & = & D\hat{\Sigma}_{t-1}D^T+Q
\end{eqnarray}
We then use a Bayesian update from the newest measurements to obtain $\hat{\mu}_t^{\rm filter}$ and $\hat{\Sigma}_t^{\rm filter}$
Project our prediction to observational space:
$$m_t^{\rm pred}\sim \mathcal{N}(H\hat{\mu}_t^{\rm pred}, H\hat{\Sigma}_t^{\rm pred}H^T+R)$$
update prediction by actual data:
\begin{eqnarray}
s_t^{\rm filter} & \sim & \mathcal{N}(\hat{\mu}_t^{\rm filter}, \hat{\Sigma}_t^{\rm filter}) \\
\hat{\mu}_t^{\rm filter} & = & \hat{\mu}_t^{\rm pred}+K_t(m_t-H\hat{\mu}_t^{\rm pred}) \\
\hat{\Sigma}_t^{\rm filter} & = & (I-K_tH)\hat{\Sigma}_t^{\rm pred}
\end{eqnarray}
Kalman gain matrix:
$$K_t=\hat{\Sigma}_t^{\rm pred}H^T(H\hat{\Sigma}_t^{\rm pred}H^T+R)^{-1}$$
We use the latent-only prediction to project it to the observational space and compute a correction proportional to the error $m_t-HDz_{t-1}$ between prediction and data. The coefficient of this correction is the Kalman gain matrix.
**Interpretations**
If measurement noise is small and dynamics are fast, then estimation will depend mostly on currently observed data.
If the measurement noise is large, then the Kalman filter uses past observations as well, combining them as long as the underlying state is at least somewhat predictable.
In order to explore the impact of filtering, we will use the following noisy oscillatory system:
```
# task dimensions
n_dim_state = 2
n_dim_obs = 2
T=100
# initialize model parameters
params = {
'D': np.array([[1., 1.], [-(2*np.pi/20.)**2., .9]]), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': 100.0 * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
state, obs = sample_lds(T, params)
plot_kalman(state, obs, title='sample')
```
## Exercise 2: Implement Kalman filtering
In this exercise you will implement the Kalman filter (forward) process. Your focus will be on writing the expressions for the Kalman gain, filter mean, and filter covariance at each time step (refer to the equations above).
```
def kalman_filter(data, params):
""" Perform Kalman filtering (forward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (D, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the filtered system means and noise covariance values
"""
# pulled out of the params dict for convenience
D = params['D']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = D.shape[0]
n_dim_obs = H.shape[0]
I = np.eye(n_dim_state) # identity matrix
# state tracking arrays
mu = np.zeros((len(data), n_dim_state))
sigma = np.zeros((len(data), n_dim_state, n_dim_state))
# filter the data
for t, y in enumerate(data):
if t == 0:
mu_pred = params['mu_0']
sigma_pred = params['sigma_0']
else:
mu_pred = D @ mu[t-1]
sigma_pred = D @ sigma[t-1] @ D.T + Q
###########################################################################
## TODO for students: compute the filtered state mean and covariance values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the filtered state mean and covariance values")
###########################################################################
# write the expression for computing the Kalman gain
K = ...
# write the expression for computing the filtered state mean
mu[t] = ...
# write the expression for computing the filtered state noise covariance
sigma[t] = ...
return mu, sigma
# Uncomment below to test your function
# filtered_state_means, filtered_state_covariances = kalman_filter(obs, params)
# plot_kalman(state, obs, filtered_state_means, title="my kf-filter",
# color='r', label='my kf-filter')
# to_remove solution
def kalman_filter(data, params):
""" Perform Kalman filtering (forward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (D, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the filtered system means and noise covariance values
"""
# pulled out of the params dict for convenience
D = params['D']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = D.shape[0]
n_dim_obs = H.shape[0]
I = np.eye(n_dim_state) # identity matrix
# state tracking arrays
mu = np.zeros((len(data), n_dim_state))
sigma = np.zeros((len(data), n_dim_state, n_dim_state))
# filter the data
for t, y in enumerate(data):
if t == 0:
mu_pred = params['mu_0']
sigma_pred = params['sigma_0']
else:
mu_pred = D @ mu[t-1]
sigma_pred = D @ sigma[t-1] @ D.T + Q
# write the expression for computing the Kalman gain
K = sigma_pred @ H.T @ np.linalg.inv(H @ sigma_pred @ H.T + R)
# write the expression for computing the filtered state mean
mu[t] = mu_pred + K @ (y - H @ mu_pred)
# write the expression for computing the filtered state noise covariance
sigma[t] = (I - K @ H) @ sigma_pred
return mu, sigma
filtered_state_means, filtered_state_covariances = kalman_filter(obs, params)
with plt.xkcd():
plot_kalman(state, obs, filtered_state_means, title="my kf-filter",
color='r', label='my kf-filter')
```
---
# Section 3: Fitting Eye Gaze Data
```
#@title Video 4: Fitting Eye Gaze Data
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="M7OuXmVWHGI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
Tracking eye gaze is used in both experimental and user interface applications. Getting an accurate estimation of where someone is looking on a screen in pixel coordinates can be challenging, however, due to the various sources of noise inherent in obtaining these measurements. A main source of noise is the general accuracy of the eye tracker device itself and how well it maintains calibration over time. Changes in ambient light or subject position can further reduce accuracy of the sensor. Eye blinks introduce a different form of noise as interruptions in the data stream which also need to be addressed.
Fortunately we have a candidate solution for handling noisy eye gaze data in the Kalman filter we just learned about. Let's look at how we can apply these methods to a small subset of data taken from the [MIT Eyetracking Database](http://people.csail.mit.edu/tjudd/WherePeopleLook/index.html) [[Judd et al. 2009](http://people.csail.mit.edu/tjudd/WherePeopleLook/Docs/wherepeoplelook.pdf)]. This data was collected as part of an effort to model [visual saliency](http://www.scholarpedia.org/article/Visual_salience) -- given an image, can we predict where a person is most likely going to look.
```
# load eyetracking data
subjects, images = load_eyetracking_data()
```
## Interactive Demo: Tracking Eye Gaze
We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlayed gaze trace.
Note that the images are rescaled below for display purposes, they were in their original aspect ratio during the task itself.
```
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(subject_id=widgets.IntSlider(-1, min=-1, max=4),
image_id=widgets.IntSlider(0, min=0, max=2))
def plot_subject_trace(subject_id=-1, image_id=0):
if subject_id == -1:
subject = np.zeros((3, 0, 2))
else:
subject = subjects[subject_id]
data = subject[image_id]
img = images[image_id]
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto')
ax.scatter(data[:, 0], data[:, 1], c='m', s=100, alpha=0.7)
ax.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0))
```
## Section 3.1: Fitting data with `pykalman`
Now that we have data, we'd like to use Kalman filtering to give us a better estimate of the true gaze. Up until this point we've known the parameters of our LDS, but here we need to estimate them from data directly. We will use the `pykalman` package to handle this estimation using the EM algorithm, a useful and influential learning algorithm described briefly in the bonus material.
Before exploring fitting models with `pykalman` it's worth pointing out some naming conventions used by the library:
$$
\begin{align}
D&: \texttt{transition_matrices} &
Q &: \texttt{transition_covariance}\\
H &:\texttt{observation_matrices} &
R &:\texttt{observation_covariance}\\
\mu_0 &: \texttt{initial_state_mean} & \Sigma_0 &: \texttt{initial_state_covariance}
\end{align}
$$
The first thing we need to do is provide a guess at the dimensionality of the latent state. Let's start by assuming the dynamics line-up directly with the observation data (pixel x,y-coordinates), and so we have a state dimension of 2.
We also need to decide which parameters we want the EM algorithm to fit. In this case, we will let the EM algorithm discover the dynamics parameters i.e. the $D$, $Q$, $H$, and $R$ matrices.
We set up our `pykalman` `KalmanFilter` object with these settings using the code below.
```
# set up our KalmanFilter object and tell it which parameters we want to
# estimate
np.random.seed(1)
n_dim_obs = 2
n_dim_state = 2
kf = pykalman.KalmanFilter(
n_dim_state=n_dim_state,
n_dim_obs=n_dim_obs,
em_vars=['transition_matrices', 'transition_covariance',
'observation_matrices', 'observation_covariance']
)
```
Because we know from the reported experimental design that subjects fixated in the center of the screen right before the image appears, we can set the initial starting state estimate $\mu_0$ as being the center pixel of the stimulus image (the first data point in this sample dataset) with a correspondingly low initial noise covariance $\Sigma_0$. Once we have everything set, it's time to fit some data.
```
# Choose a subject and stimulus image
subject_id = 1
image_id = 2
data = subjects[subject_id][image_id]
# Provide the initial states
kf.initial_state_mean = data[0]
kf.initial_state_covariance = 0.1*np.eye(n_dim_state)
# Estimate the parameters from data using the EM algorithm
kf.em(data)
print(f'D=\n{kf.transition_matrices}')
print(f'Q =\n{kf.transition_covariance}')
print(f'H =\n{kf.observation_matrices}')
print(f'R =\n{kf.observation_covariance}')
```
We see that the EM algorithm has found fits for the various dynamics parameters. One thing you will note is that both the state and observation matrices are close to the identity matrix, which means the x- and y-coordinate dynamics are independent of each other and primarily impacted by the noise covariances.
We can now use this model to smooth the observed data from the subject. In addition to the source image, we can also see how this model will work with the gaze recorded by the same subject on the other images as well, or even with different subjects.
Below are the three stimulus images overlayed with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers.
```
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(subject_id=widgets.IntSlider(1, min=0, max=4))
def plot_smoothed_traces(subject_id=0):
subject = subjects[subject_id]
fig, axes = plt.subplots(ncols=3, figsize=(18, 4))
for data, img, ax in zip(subject, images, axes):
ax = plot_gaze_data(data, img=img, ax=ax)
plot_kf_state(kf, data, ax)
```
## Discussion questions:
Why do you think one trace from one subject was sufficient to provide a decent fit across all subjects? If you were to go back and change the subject_id and/or image_id for when we fit the data using EM, do you think the fits would be different?
We don't think the eye is exactly following a linear dynamical system. Nonetheless that is what we assumed for this exercise when we applied a Kalman filter. Despite the mismatch, these algorithms do perform well. Discuss what differences we might find between the true and assumed processes. What mistakes might be likely consequences of these differences?
Finally, recall that the original task was to use this data to help devlop models of visual salience. While our Kalman filter is able to provide smooth estimates of observed gaze data, it's not telling us anything about *why* the gaze is going in a certain direction. In fact, if we sample data from our parameters and plot them, we get what amounts to a random walk.
```
kf_state, kf_data = kf.sample(len(data))
ax = plot_gaze_data(kf_data, img=images[2])
plot_kf_state(kf, kf_data, ax)
```
This should not be surprising, as we have given the model no other observed data beyond the pixels at which gaze was detected. We expect there is some other aspect driving the latent state of where to look next other than just the previous fixation location.
In summary, while the Kalman filter is a good option for smoothing the gaze trajectory itself, especially if using a lower-quality eye tracker or in noisy environmental conditions, a linear dynamical system may not be the right way to approach the much more challenging task of modeling visual saliency.
## Handling Eye Blinks
In the MIT Eyetracking Database, raw tracking data includes times when the subject blinked. The way this is represented in the data stream is via negative pixel coordinate values.
We could try to mitigate these samples by simply deleting them from the stream, though this introduces other issues. For instance, if each sample corresponds to a fixed time step, and you arbitrarily remove some samples, the integrity of that consistent timestep between samples is lost. It's sometimes better to flag data as missing rather than to pretend it was never there at all, especially with time series data.
Another solution is to used masked arrays. In `numpy`, a [masked array](https://numpy.org/doc/stable/reference/maskedarray.generic.html#what-is-a-masked-array) is an `ndarray` with an additional embedded boolean masking array that indicates which elements should be masked. When computation is performed on the array, the masked elements are ignored. Both `matplotlib` and `pykalman` work with masked arrays, and, in fact, this is the approach taken with the data we explore in this notebook.
In preparing the dataset for this noteook, the original dataset was preprocessed to set all gaze data as masked arrays, with the mask enabled for any pixel with a negative x or y coordinate.
# Bonus
## Review on Gaussian joint, marginal and conditional distributions
Assume
\begin{eqnarray}
z & = & \begin{bmatrix}x \\y\end{bmatrix}\sim N\left(\begin{bmatrix}a \\b\end{bmatrix}, \begin{bmatrix}A & C \\C^T & B\end{bmatrix}\right)
\end{eqnarray}
then the marginal distributions are
\begin{eqnarray}
x & \sim & \mathcal{N}(a, A) \\
y & \sim & \mathcal{N}(b,B)
\end{eqnarray}
and the conditional distributions are
\begin{eqnarray}
x|y & \sim & \mathcal{N}(a+CB^{-1}(y-b), A-CB^{-1}C^T) \\
y|x & \sim & \mathcal{N}(b+C^TA^{-1}(x-a), B-C^TA^{-1}C)
\end{eqnarray}
*important take away: given the joint Gaussian distribution we can derive the conditionals*
## Kalman Smoothing
```
#@title Video 5: Kalman Smoothing and the EM Algorithm
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="4Ar2mYz1Nms", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
Obtain estimates by propagating from $y_T$ back to $y_0$ using results of forward pass ($\hat{\mu}_t^{\rm filter}, \hat{\Sigma}_t^{\rm filter}, P_t=\hat{\Sigma}_{t+1}^{\rm pred}$)
\begin{eqnarray}
s_t & \sim & \mathcal{N}(\hat{\mu}_t^{\rm smooth}, \hat{\Sigma}_t^{\rm smooth}) \\
\hat{\mu}_t^{\rm smooth} & = & \hat{\mu}_t^{\rm filter}+J_t(\hat{\mu}_{t+1}^{\rm smooth}-D\hat{\mu}_t^{\rm filter}) \\
\hat{\Sigma}_t^{\rm smooth} & = & \hat{\Sigma}_t^{\rm filter}+J_t(\hat{\Sigma}_{t+1}^{\rm smooth}-P_t)J_t^T \\
J_t & = & \hat{\Sigma}_t^{\rm filter}D^T P_t^{-1}
\end{eqnarray}
This gives us the final estimate for $z_t$.
\begin{eqnarray}
\hat{\mu}_t & = & \hat{\mu}_t^{\rm smooth} \\
\hat{\Sigma}_t & = & \hat{\Sigma}_t^{\rm smooth}
\end{eqnarray}
### Exercise 3: Implement Kalman smoothing
In this exercise you will implement the Kalman smoothing (backward) process. Again you will focus on writing the expressions for computing the smoothed mean, smoothed covariance, and $J_t$ values.
```
def kalman_smooth(data, params):
""" Perform Kalman smoothing (backward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (D, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the smoothed system means and noise covariance values
"""
# pulled out of the params dict for convenience
D= params['D']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = D.shape[0]
n_dim_obs = H.shape[0]
# first run the forward pass to get the filtered means and covariances
mu, sigma = kalman_filter(data, params)
# initialize state mean and covariance estimates
mu_hat = np.zeros_like(mu)
sigma_hat = np.zeros_like(sigma)
mu_hat[-1] = mu[-1]
sigma_hat[-1] = sigma[-1]
# smooth the data
for t in reversed(range(len(data)-1)):
sigma_pred = D@ sigma[t] @ D.T + Q # sigma_pred at t+1
###########################################################################
## TODO for students: compute the smoothed state mean and covariance values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the smoothed state mean and covariance values")
###########################################################################
# write the expression to compute the Kalman gain for the backward process
J = ...
# write the expression to compute the smoothed state mean estimate
mu_hat[t] = ...
# write the expression to compute the smoothed state noise covariance estimate
sigma_hat[t] = ...
return mu_hat, sigma_hat
# Uncomment once the kalman_smooth function is complete
# smoothed_state_means, smoothed_state_covariances = kalman_smooth(obs, params)
# axes = plot_kalman(state, obs, filtered_state_means, color="r",
# label="my kf-filter")
# plot_kalman(state, obs, smoothed_state_means, color="b",
# label="my kf-smoothed", axes=axes)
# to_remove solution
def kalman_smooth(data, params):
""" Perform Kalman smoothing (backward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (D, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the smoothed system means and noise covariance values
"""
# pulled out of the params dict for convenience
D= params['D']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = D.shape[0]
n_dim_obs = H.shape[0]
# first run the forward pass to get the filtered means and covariances
mu, sigma = kalman_filter(data, params)
# initialize state mean and covariance estimates
mu_hat = np.zeros_like(mu)
sigma_hat = np.zeros_like(sigma)
mu_hat[-1] = mu[-1]
sigma_hat[-1] = sigma[-1]
# smooth the data
for t in reversed(range(len(data)-1)):
sigma_pred = D@ sigma[t] @ D.T + Q # sigma_pred at t+1
# write the expression to compute the Kalman gain for the backward process
J = sigma[t] @ D.T @ np.linalg.inv(sigma_pred)
# write the expression to compute the smoothed state mean estimate
mu_hat[t] = mu[t] + J @ (mu_hat[t+1] - D@ mu[t])
# write the expression to compute the smoothed state noise covariance estimate
sigma_hat[t] = sigma[t] + J @ (sigma_hat[t+1] - sigma_pred) @ J.T
return mu_hat, sigma_hat
smoothed_state_means, smoothed_state_covariances = kalman_smooth(obs, params)
with plt.xkcd():
axes = plot_kalman(state, obs, filtered_state_means, color="r",
label="my kf-filter")
plot_kalman(state, obs, smoothed_state_means, color="b",
label="my kf-smoothed", axes=axes)
```
**Forward vs Backward**
Now that we have implementations for both, let's compare their peformance by computing the MSE between the filtered (forward) and smoothed (backward) estimated states and the true latent state.
```
print(f"Filtered MSE: {np.mean((state - filtered_state_means)**2):.3f}")
print(f"Smoothed MSE: {np.mean((state - smoothed_state_means)**2):.3f}")
```
In this example, the smoothed estimate is clearly superior to the filtered one. This makes sense as the forward pass uses only the past measurements, whereas the backward pass can use future measurement too, correcting the forward pass estimates given all the data we've collected.
So why would you ever use Kalman filtering alone, without smoothing? As Kalman filtering only depends on already observed data (i.e. the past) it can be run in a streaming, or on-line, setting. Kalman smoothing relies on future data as it were, and as such can only be applied in a batch, or off-line, setting. So use Kalman filtering if you need real-time corrections and Kalman smoothing if you are considering already-collected data.
This online case is typically what the brain faces.
## The Expectation-Maximization (EM) Algorithm
- want to maximize $\log p(m|\theta)$
- need to marginalize out latent state *(which is not tractable)*
$$p(m|\theta)=\int p(m,s|\theta)dz$$
- add a probability distribution $q(s)$ which will approximate the latent state distribution
$$\log p(m|\theta)\int_s q(s)dz$$
- can be rewritten as
$$\mathcal{L}(q,\theta)+KL\left(q(s)||p(s|m),\theta\right)$$
- $\mathcal{L}(q,\theta)$ contains the joint distribution of $m$ and $s$
- $KL(q||p)$ contains the conditional distribution of $s|m$
#### Expectation step
- parameters are kept fixed
- find a good approximation $q(s)$: maximize lower bound $\mathcal{L}(q,\theta)$ with respect to $q(s)$
- (already implemented Kalman filter+smoother)
#### Maximization step
- keep distribution $q(s)$ fixed
- change parameters to maximize the lower bound $\mathcal{L}(q,\theta)$
As mentioned, we have already effectively solved for the E-Step with our Kalman filter and smoother. The M-step requires further derivation, which is covered in the Appendix. Rather than having you implement the M-Step yourselves, let's instead turn to using a library that has already implemented EM for exploring some experimental data from cognitive neuroscience.
### The M-step for a LDS
*(see Bishop, chapter 13.3.2 Learning in LDS)*
Update parameters of the probability distribution
*For the updates in the M-step we will need the following posterior marginals obtained from the Kalman smoothing results* $\hat{\mu}_t^{\rm smooth}, \hat{\Sigma}_t^{\rm smooth}$
$$
\begin{eqnarray}
E(s_t) &=& \hat{\mu}_t \\
E(s_ts_{t-1}^T) &=& J_{t-1}\hat{\Sigma}_t+\hat{\mu}_t\hat{\mu}_{t-1}^T\\
E(s_ts_{t}^T) &=& \hat{\Sigma}_t+\hat{\mu}_t\hat{\mu}_{t}^T
\end{eqnarray}
$$
**Update parameters**
Initial parameters
$$
\begin{eqnarray}
\mu_0^{\rm new}&=& E(s_0)\\
Q_0^{\rm new} &=& E(s_0s_0^T)-E(s_0)E(s_0^T) \\
\end{eqnarray}
$$
Hidden (latent) state parameters
$$
\begin{eqnarray}
D^{\rm new} &=& \left(\sum_{t=2}^N E(s_ts_{t-1}^T)\right)\left(\sum_{t=2}^N E(s_{t-1}s_{t-1}^T)\right)^{-1} \\
Q^{\rm new} &=& \frac{1}{T-1} \sum_{t=2}^N E\big(s_ts_t^T\big) - D^{\rm new}E\big(s_{t-1}s_{t}^T\big) - E\big(s_ts_{t-1}^T\big)D^{\rm new}+D^{\rm new}E\big(s_{t-1}s_{t-1}^T\big)\big(D^{\rm new}\big)^{T}\\
\end{eqnarray}
$$
Observable (measured) space parameters
$$H^{\rm new}=\left(\sum_{t=1}^N y_t E(s_t^T)\right)\left(\sum_{t=1}^N E(s_t s_t^T)\right)^{-1}$$
$$R^{\rm new}=\frac{1}{T}\sum_{t=1}^Ny_ty_t^T-H^{\rm new}E(s_t)y_t^T-y_tE(s_t^T)H^{\rm new}+H^{\rm new}E(s_ts_t^T)H_{\rm new}$$
| github_jupyter |
# K-Means with Intel® Data Analytics Acceleration Library in Amazon SageMaker
## Introduction
Intel® Data Analytics Acceleration Library (Intel® DAAL) is the library of Intel® architecture optimized building blocks covering all stages of data analytics: data acquisition from a data source, preprocessing, transformation, data mining, modeling, validation, and decision making. One of its algorithms is K-Means.
K-Means is among the most popular and simplest clustering methods. It is intended to partition a data set into a small number of clusters such that feature vectors within a cluster have greater similarity with one another than with feature vectors from other clusters. Each cluster is characterized by a representative point, called a centroid, and a cluster radius.
In other words, the clustering methods enable reducing the problem of analysis of the entire data set to the analysis of clusters.
There are numerous ways to define the measure of similarity and centroids. For K-Means, the centroid is defined as the mean of feature vectors within the cluster.
Intel® DAAL developer guide: https://software.intel.com/en-us/daal-programming-guide
Intel® DAAL documentation for K-Means: https://software.intel.com/en-us/daal-programming-guide-k-means-clustering
## K-Means Usage with SageMaker Estimator
Firstly, you need to import SageMaker package, get execution role and create session.
```
import sagemaker
role = sagemaker.get_execution_role()
sess = sagemaker.Session()
```
Secondly, you can specify parameters of K-Means.
#### Hyperparameters
"nClusters" and "maxIterations" hyperparameters of K-Means algorithm are required, all other - optional.
<table style="border: 1px solid black;">
<tr>
<td><strong>Parameter name</strong></td>
<td><strong>Type</strong></td>
<td><strong>Default value</strong></td>
<td><strong>Description</strong></td>
</tr>
<tr>
<td>fptype</td>
<td>str</td>
<td>"double"</td>
<td>The floating-point type that the algorithm uses for intermediate computations. Can be "float" or "double"</td>
</tr>
<tr>
<td>nClusters</td>
<td>int</td>
<td>2</td>
<td>The number of clusters</td>
</tr>
<tr>
<td>initMethod</td>
<td>str</td>
<td>"defaultDense"</td>
<td>Available initialization methods for K-Means clustering: defaultDense - uses first nClusters points as initial clusters, randomDense - uses random nClusters points as initial clusters, plusPlusDense - uses K-Means++ algorithm; parallelPlusDense - uses parallel K-Means++ algorithm</td>
</tr>
<tr>
<td>oversamplingFactor</td>
<td>float</td>
<td>0.5</td>
<td>A fraction of nClusters in each of nRounds of parallel K-Means++. L=nClusters*oversamplingFactor points are sampled in a round</td>
</tr>
<tr>
<td>nRounds</td>
<td>int</td>
<td>5</td>
<td>The number of rounds for parallel K-Means++. (L*nRounds) must be greater than nClusters</td>
</tr>
<tr>
<td>seed</td>
<td>int</td>
<td>777</td>
<td>The seed for random number generator</td>
</tr>
<tr>
<td>method</td>
<td>str</td>
<td>"lloydDense"</td>
<td>Computation method for K-Means clustering</td>
</tr>
<tr>
<td>maxIterations</td>
<td>int</td>
<td>100</td>
<td>The number of iterations</td>
</tr>
<tr>
<td>accuracyThreshold</td>
<td>float</td>
<td>0</td>
<td>The threshold for termination of the algorithm</td>
</tr>
<tr>
<td>gamma</td>
<td>float</td>
<td>1</td>
<td>The weight to be used in distance calculation for binary categorical features</td>
</tr>
<tr>
<td>distanceType</td>
<td>str</td>
<td>"euclidean"</td>
<td>The measure of closeness between points (observations) being clustered. The only distance type supported so far is the Euclidian distance</td>
</tr>
<tr>
<td>assignFlag</td>
<td>bool</td>
<td>True</td>
<td>A flag that enables computation of assignments, that is, assigning cluster indices to respective observations</td>
</tr>
</table>
Example of hyperparameters dictionary:
```
kmeans_params = {
"fptype": "float",
"nClusters": 5,
"initMethod": "plusPlusDense",
"maxIterations": 1000
}
```
Then, you need to create SageMaker Estimator instance with following parameters:
<table style="border: 1px solid black;">
<tr>
<td><strong>Parameter name</strong></td>
<td><strong>Description</strong></td>
</tr>
<tr>
<td>image_name</td>
<td>The container image to use for training</td>
</tr>
<tr>
<td>role</td>
<td>An AWS IAM role. The SageMaker training jobs and APIs that create SageMaker endpoints use this role to access training data and models</td>
</tr>
<tr>
<td>train_instance_count</td>
<td>Number of Amazon EC2 instances to use for training. Should be 1, because it is not distributed version of algorithm</td>
</tr>
<tr>
<td>train_instance_type</td>
<td>Type of EC2 instance to use for training. See available types on Amazon Marketplace page of algorithm</td>
</tr>
<tr>
<td>input_mode</td>
<td>The input mode that the algorithm supports. May be "File" or "Pipe"</td>
</tr>
<tr>
<td>output_path</td>
<td>S3 location for saving the trainig result (model artifacts and output files)</td>
</tr>
<tr>
<td>sagemaker_session</td>
<td>Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed</td>
</tr>
<tr>
<td>hyperparameters</td>
<td>Dictionary containing the hyperparameters to initialize this estimator with</td>
</tr>
</table>
Full SageMaker Estimator documentation: https://sagemaker.readthedocs.io/en/latest/estimators.html
```
daal_kmeans_arn = "<algorithm-arn>" # you can find it on algorithm page in your subscriptions
daal_kmeans = sagemaker.algorithm.AlgorithmEstimator(
algorithm_arn=daal_kmeans_arn,
role=role,
base_job_name="<base-job-name>",
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
input_mode="File",
output_path="s3://<bucket-name>/<output-path>",
sagemaker_session=sess,
hyperparameters=kmeans_params
)
```
### Training stage
On training stage, K-Means algorithm consume input data from S3 location and computes centroids.
This container supports only .csv ("comma-separated values") files.
```
daal_kmeans.fit({"training": "s3://<bucket-name>/<training-data-path>"})
```
### Real-time prediction
On prediction stage, K-Means algorithm determines assignments for input data using previously computed centroids.
Firstly, you need to deploy SageMaker endpoint that consumes data.
```
predictor = daal_kmeans.deploy(1, "ml.m4.xlarge", serializer=sagemaker.predictor.csv_serializer)
```
Secondly, you should pass data as numpy array to predictor instance and get assignments.
In this example we are passing random data, but you can use any numpy 2D array.
```
import numpy as np
predict_data = np.random.random(size=(10,10))
print(predictor.predict(predict_data).decode("utf-8"))
```
Don't forget to delete endpoint if you don't need it anymore.
```
sess.delete_endpoint(predictor.endpoint)
```
### Batch transform job
If you don't need real-time prediction, you can use transform job. It uses saved model with centroids, compute assignments one time and saves it in specified or auto-generated output path.
More about transform jobs: https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html
Transformer API: https://sagemaker.readthedocs.io/en/latest/transformer.html
```
transformer = daal_kmeans.transformer(1, 'ml.m4.xlarge')
transformer.transform("s3://<bucket-name>/<training-data-path>", content_type='text/csv')
transformer.wait()
print(transformer.output_path)
```
| github_jupyter |
# Shor's Algorithm
Shor’s algorithm is famous for factoring integers in polynomial time. Since the best-known classical algorithm requires superpolynomial time to factor the product of two primes, the widely used cryptosystem, RSA, relies on factoring being impossible for large enough integers.
In this chapter we will focus on the quantum part of Shor’s algorithm, which actually solves the problem of _period finding_. Since a factoring problem can be turned into a period finding problem in polynomial time, an efficient period finding algorithm can be used to factor integers efficiently too. For now its enough to show that if we can compute the period of $a^x\bmod N$ efficiently, then we can also efficiently factor. Since period finding is a worthy problem in its own right, we will first solve this, then discuss how this can be used to factor in section 5.
```
import matplotlib.pyplot as plt
import numpy as np
from qiskit import QuantumCircuit, Aer, transpile, assemble
from qiskit.visualization import plot_histogram
from math import gcd
from numpy.random import randint
import pandas as pd
from fractions import Fraction
```
## 1. The Problem: Period Finding
Let’s look at the periodic function:
$$ f(x) = a^x \bmod{N}$$
<details>
<summary>Reminder: Modulo & Modular Arithmetic (Click here to expand)</summary>
The modulo operation (abbreviated to 'mod') simply means to find the remainder when dividing one number by another. For example:
$$ 17 \bmod 5 = 2 $$
Since $17 \div 5 = 3$ with remainder $2$. (i.e. $17 = (3\times 5) + 2$). In Python, the modulo operation is denoted through the <code>%</code> symbol.
This behaviour is used in <a href="https://en.wikipedia.org/wiki/Modular_arithmetic">modular arithmetic</a>, where numbers 'wrap round' after reaching a certain value (the modulus). Using modular arithmetic, we could write:
$$ 17 = 2 \pmod 5$$
Note that here the $\pmod 5$ applies to the entire equation (since it is in parenthesis), unlike the equation above where it only applied to the left-hand side of the equation.
</details>
where $a$ and $N$ are positive integers, $a$ is less than $N$, and they have no common factors. The period, or order ($r$), is the smallest (non-zero) integer such that:
$$a^r \bmod N = 1 $$
We can see an example of this function plotted on the graph below. Note that the lines between points are to help see the periodicity and do not represent the intermediate values between the x-markers.
```
N = 35
a = 3
# Calculate the plotting data
xvals = np.arange(35)
yvals = [np.mod(a**x, N) for x in xvals]
# Use matplotlib to display it nicely
fig, ax = plt.subplots()
ax.plot(xvals, yvals, linewidth=1, linestyle='dotted', marker='x')
ax.set(xlabel='$x$', ylabel='$%i^x$ mod $%i$' % (a, N),
title="Example of Periodic Function in Shor's Algorithm")
try: # plot r on the graph
r = yvals[1:].index(1) +1
plt.annotate('', xy=(0,1), xytext=(r,1), arrowprops=dict(arrowstyle='<->'))
plt.annotate('$r=%i$' % r, xy=(r/3,1.5))
except ValueError:
print('Could not find period, check a < N and have no common factors.')
```
## 2. The Solution
Shor’s solution was to use [quantum phase estimation](./quantum-phase-estimation.html) on the unitary operator:
$$ U|y\rangle \equiv |ay \bmod N \rangle $$
To see how this is helpful, let’s work out what an eigenstate of U might look like. If we started in the state $|1\rangle$, we can see that each successive application of U will multiply the state of our register by $a \pmod N$, and after $r$ applications we will arrive at the state $|1\rangle$ again. For example with $a = 3$ and $N = 35$:
$$\begin{aligned}
U|1\rangle &= |3\rangle & \\
U^2|1\rangle &= |9\rangle \\
U^3|1\rangle &= |27\rangle \\
& \vdots \\
U^{(r-1)}|1\rangle &= |12\rangle \\
U^r|1\rangle &= |1\rangle
\end{aligned}$$
```
ax.set(xlabel='Number of applications of U', ylabel='End state of register',
title="Effect of Successive Applications of U")
fig
```
So a superposition of the states in this cycle ($|u_0\rangle$) would be an eigenstate of $U$:
$$|u_0\rangle = \tfrac{1}{\sqrt{r}}\sum_{k=0}^{r-1}{|a^k \bmod N\rangle} $$
<details>
<summary>Click to Expand: Example with $a = 3$ and $N=35$</summary>
$$\begin{aligned}
|u_0\rangle &= \tfrac{1}{\sqrt{12}}(|1\rangle + |3\rangle + |9\rangle \dots + |4\rangle + |12\rangle) \\[10pt]
U|u_0\rangle &= \tfrac{1}{\sqrt{12}}(U|1\rangle + U|3\rangle + U|9\rangle \dots + U|4\rangle + U|12\rangle) \\[10pt]
&= \tfrac{1}{\sqrt{12}}(|3\rangle + |9\rangle + |27\rangle \dots + |12\rangle + |1\rangle) \\[10pt]
&= |u_0\rangle
\end{aligned}$$
</details>
This eigenstate has an eigenvalue of 1, which isn’t very interesting. A more interesting eigenstate could be one in which the phase is different for each of these computational basis states. Specifically, let’s look at the case in which the phase of the $k$th state is proportional to $k$:
$$\begin{aligned}
|u_1\rangle &= \tfrac{1}{\sqrt{r}}\sum_{k=0}^{r-1}{e^{-\tfrac{2\pi i k}{r}}|a^k \bmod N\rangle}\\[10pt]
U|u_1\rangle &= e^{\tfrac{2\pi i}{r}}|u_1\rangle
\end{aligned}
$$
<details>
<summary>Click to Expand: Example with $a = 3$ and $N=35$</summary>
$$\begin{aligned}
|u_1\rangle &= \tfrac{1}{\sqrt{12}}(|1\rangle + e^{-\tfrac{2\pi i}{12}}|3\rangle + e^{-\tfrac{4\pi i}{12}}|9\rangle \dots + e^{-\tfrac{20\pi i}{12}}|4\rangle + e^{-\tfrac{22\pi i}{12}}|12\rangle) \\[10pt]
U|u_1\rangle &= \tfrac{1}{\sqrt{12}}(|3\rangle + e^{-\tfrac{2\pi i}{12}}|9\rangle + e^{-\tfrac{4\pi i}{12}}|27\rangle \dots + e^{-\tfrac{20\pi i}{12}}|12\rangle + e^{-\tfrac{22\pi i}{12}}|1\rangle) \\[10pt]
U|u_1\rangle &= e^{\tfrac{2\pi i}{12}}\cdot\tfrac{1}{\sqrt{12}}(e^{\tfrac{-2\pi i}{12}}|3\rangle + e^{-\tfrac{4\pi i}{12}}|9\rangle + e^{-\tfrac{6\pi i}{12}}|27\rangle \dots + e^{-\tfrac{22\pi i}{12}}|12\rangle + e^{-\tfrac{24\pi i}{12}}|1\rangle) \\[10pt]
U|u_1\rangle &= e^{\tfrac{2\pi i}{12}}|u_1\rangle
\end{aligned}$$
(We can see $r = 12$ appears in the denominator of the phase.)
</details>
This is a particularly interesting eigenvalue as it contains $r$. In fact, $r$ has to be included to make sure the phase differences between the $r$ computational basis states are equal. This is not the only eigenstate with this behaviour; to generalise this further, we can multiply an integer, $s$, to this phase difference, which will show up in our eigenvalue:
$$\begin{aligned}
|u_s\rangle &= \tfrac{1}{\sqrt{r}}\sum_{k=0}^{r-1}{e^{-\tfrac{2\pi i s k}{r}}|a^k \bmod N\rangle}\\[10pt]
U|u_s\rangle &= e^{\tfrac{2\pi i s}{r}}|u_s\rangle
\end{aligned}
$$
<details>
<summary>Click to Expand: Example with $a = 3$ and $N=35$</summary>
$$\begin{aligned}
|u_s\rangle &= \tfrac{1}{\sqrt{12}}(|1\rangle + e^{-\tfrac{2\pi i s}{12}}|3\rangle + e^{-\tfrac{4\pi i s}{12}}|9\rangle \dots + e^{-\tfrac{20\pi i s}{12}}|4\rangle + e^{-\tfrac{22\pi i s}{12}}|12\rangle) \\[10pt]
U|u_s\rangle &= \tfrac{1}{\sqrt{12}}(|3\rangle + e^{-\tfrac{2\pi i s}{12}}|9\rangle + e^{-\tfrac{4\pi i s}{12}}|27\rangle \dots + e^{-\tfrac{20\pi i s}{12}}|12\rangle + e^{-\tfrac{22\pi i s}{12}}|1\rangle) \\[10pt]
U|u_s\rangle &= e^{\tfrac{2\pi i s}{12}}\cdot\tfrac{1}{\sqrt{12}}(e^{-\tfrac{2\pi i s}{12}}|3\rangle + e^{-\tfrac{4\pi i s}{12}}|9\rangle + e^{-\tfrac{6\pi i s}{12}}|27\rangle \dots + e^{-\tfrac{22\pi i s}{12}}|12\rangle + e^{-\tfrac{24\pi i s}{12}}|1\rangle) \\[10pt]
U|u_s\rangle &= e^{\tfrac{2\pi i s}{12}}|u_s\rangle
\end{aligned}$$
</details>
We now have a unique eigenstate for each integer value of $s$ where $$0 \leq s \leq r-1.$$ Very conveniently, if we sum up all these eigenstates, the different phases cancel out all computational basis states except $|1\rangle$:
$$ \tfrac{1}{\sqrt{r}}\sum_{s=0}^{r-1} |u_s\rangle = |1\rangle$$
<details>
<summary>Click to Expand: Example with $a = 7$ and $N=15$</summary>
For this, we will look at a smaller example where $a = 7$ and $N=15$. In this case $r=4$:
$$\begin{aligned}
\tfrac{1}{2}(\quad|u_0\rangle &= \tfrac{1}{2}(|1\rangle \hphantom{e^{-\tfrac{2\pi i}{12}}}+ |7\rangle \hphantom{e^{-\tfrac{12\pi i}{12}}} + |4\rangle \hphantom{e^{-\tfrac{12\pi i}{12}}} + |13\rangle)\dots \\[10pt]
+ |u_1\rangle &= \tfrac{1}{2}(|1\rangle + e^{-\tfrac{2\pi i}{4}}|7\rangle + e^{-\tfrac{\hphantom{1}4\pi i}{4}}|4\rangle + e^{-\tfrac{\hphantom{1}6\pi i}{4}}|13\rangle)\dots \\[10pt]
+ |u_2\rangle &= \tfrac{1}{2}(|1\rangle + e^{-\tfrac{4\pi i}{4}}|7\rangle + e^{-\tfrac{\hphantom{1}8\pi i}{4}}|4\rangle + e^{-\tfrac{12\pi i}{4}}|13\rangle)\dots \\[10pt]
+ |u_3\rangle &= \tfrac{1}{2}(|1\rangle + e^{-\tfrac{6\pi i}{4}}|7\rangle + e^{-\tfrac{12\pi i}{4}}|4\rangle + e^{-\tfrac{18\pi i}{4}}|13\rangle)\quad) = |1\rangle \\[10pt]
\end{aligned}$$
</details>
Since the computational basis state $|1\rangle$ is a superposition of these eigenstates, which means if we do QPE on $U$ using the state $|1\rangle$, we will measure a phase:
$$\phi = \frac{s}{r}$$
Where $s$ is a random integer between $0$ and $r-1$. We finally use the [continued fractions](https://en.wikipedia.org/wiki/Continued_fraction) algorithm on $\phi$ to find $r$. The circuit diagram looks like this (note that this diagram uses Qiskit's qubit ordering convention):
<img src="images/shor_circuit_1.svg">
We will next demonstrate Shor’s algorithm using Qiskit’s simulators. For this demonstration we will provide the circuits for $U$ without explanation, but in section 4 we will discuss how circuits for $U^{2^j}$ can be constructed efficiently.
## 3. Qiskit Implementation
In this example we will solve the period finding problem for $a=7$ and $N=15$. We provide the circuits for $U$ where:
$$U|y\rangle = |ay\bmod 15\rangle $$
without explanation. To create $U^x$, we will simply repeat the circuit $x$ times. In the next section we will discuss a general method for creating these circuits efficiently. The function `c_amod15` returns the controlled-U gate for `a`, repeated `power` times.
```
def c_amod15(a, power):
"""Controlled multiplication by a mod 15"""
if a not in [2,4,7,8,11,13]:
raise ValueError("'a' must be 2,4,7,8,11 or 13")
U = QuantumCircuit(4)
for iteration in range(power):
if a in [2,13]:
U.swap(0,1)
U.swap(1,2)
U.swap(2,3)
if a in [7,8]:
U.swap(2,3)
U.swap(1,2)
U.swap(0,1)
if a in [4, 11]:
U.swap(1,3)
U.swap(0,2)
if a in [7,11,13]:
for q in range(4):
U.x(q)
U = U.to_gate()
U.name = "%i^%i mod 15" % (a, power)
c_U = U.control()
return c_U
```
We will use 8 counting qubits:
```
# Specify variables
n_count = 8 # number of counting qubits
a = 7
```
We also import the circuit for the QFT (you can read more about the QFT in the [quantum Fourier transform chapter](./quantum-fourier-transform.html#generalqft)):
```
def qft_dagger(n):
"""n-qubit QFTdagger the first n qubits in circ"""
qc = QuantumCircuit(n)
# Don't forget the Swaps!
for qubit in range(n//2):
qc.swap(qubit, n-qubit-1)
for j in range(n):
for m in range(j):
qc.cp(-np.pi/float(2**(j-m)), m, j)
qc.h(j)
qc.name = "QFT†"
return qc
```
With these building blocks we can easily construct the circuit for Shor's algorithm:
```
# Create QuantumCircuit with n_count counting qubits
# plus 4 qubits for U to act on
qc = QuantumCircuit(n_count + 4, n_count)
# Initialize counting qubits
# in state |+>
for q in range(n_count):
qc.h(q)
# And auxiliary register in state |1>
qc.x(3+n_count)
# Do controlled-U operations
for q in range(n_count):
qc.append(c_amod15(a, 2**q),
[q] + [i+n_count for i in range(4)])
# Do inverse-QFT
qc.append(qft_dagger(n_count), range(n_count))
# Measure circuit
qc.measure(range(n_count), range(n_count))
qc.draw(fold=-1) # -1 means 'do not fold'
```
Let's see what results we measure:
```
aer_sim = Aer.get_backend('aer_simulator')
t_qc = transpile(qc, aer_sim)
results = aer_sim.run(t_qc).result()
counts = results.get_counts()
plot_histogram(counts)
```
Since we have 8 qubits, these results correspond to measured phases of:
```
rows, measured_phases = [], []
for output in counts:
decimal = int(output, 2) # Convert (base 2) string to decimal
phase = decimal/(2**n_count) # Find corresponding eigenvalue
measured_phases.append(phase)
# Add these values to the rows in our table:
rows.append([f"{output}(bin) = {decimal:>3}(dec)",
f"{decimal}/{2**n_count} = {phase:.2f}"])
# Print the rows in a table
headers=["Register Output", "Phase"]
df = pd.DataFrame(rows, columns=headers)
print(df)
```
We can now use the continued fractions algorithm to attempt to find $s$ and $r$. Python has this functionality built in: We can use the `fractions` module to turn a float into a `Fraction` object, for example:
```
Fraction(0.666)
```
Because this gives fractions that return the result exactly (in this case, `0.6660000...`), this can give gnarly results like the one above. We can use the `.limit_denominator()` method to get the fraction that most closely resembles our float, with denominator below a certain value:
```
# Get fraction that most closely resembles 0.666
# with denominator < 15
Fraction(0.666).limit_denominator(15)
```
Much nicer! The order (r) must be less than N, so we will set the maximum denominator to be `15`:
```
rows = []
for phase in measured_phases:
frac = Fraction(phase).limit_denominator(15)
rows.append([phase, f"{frac.numerator}/{frac.denominator}", frac.denominator])
# Print as a table
headers=["Phase", "Fraction", "Guess for r"]
df = pd.DataFrame(rows, columns=headers)
print(df)
```
We can see that two of the measured eigenvalues provided us with the correct result: $r=4$, and we can see that Shor’s algorithm has a chance of failing. These bad results are because $s = 0$, or because $s$ and $r$ are not coprime and instead of $r$ we are given a factor of $r$. The easiest solution to this is to simply repeat the experiment until we get a satisfying result for $r$.
### Quick Exercise
- Modify the circuit above for values of $a = 2, 8, 11$ and $13$. What results do you get and why?
## 4. Modular Exponentiation
You may have noticed that the method of creating the $U^{2^j}$ gates by repeating $U$ grows exponentially with $j$ and will not result in a polynomial time algorithm. We want a way to create the operator:
$$ U^{2^j}|y\rangle = |a^{2^j}y \bmod N \rangle $$
that grows polynomially with $j$. Fortunately, calculating:
$$ a^{2^j} \bmod N$$
efficiently is possible. Classical computers can use an algorithm known as _repeated squaring_ to calculate an exponential. In our case, since we are only dealing with exponentials of the form $2^j$, the repeated squaring algorithm becomes very simple:
```
def a2jmodN(a, j, N):
"""Compute a^{2^j} (mod N) by repeated squaring"""
for i in range(j):
a = np.mod(a**2, N)
return a
a2jmodN(7, 2049, 53)
```
If an efficient algorithm is possible in Python, then we can use the same algorithm on a quantum computer. Unfortunately, despite scaling polynomially with $j$, modular exponentiation circuits are not straightforward and are the bottleneck in Shor’s algorithm. A beginner-friendly implementation can be found in reference [1].
## 5. Factoring from Period Finding
Not all factoring problems are difficult; we can spot an even number instantly and know that one of its factors is 2. In fact, there are [specific criteria](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf#%5B%7B%22num%22%3A127%2C%22gen%22%3A0%7D%2C%7B%22name%22%3A%22XYZ%22%7D%2C70%2C223%2C0%5D) for choosing numbers that are difficult to factor, but the basic idea is to choose the product of two large prime numbers.
A general factoring algorithm will first check to see if there is a shortcut to factoring the integer (is the number even? Is the number of the form $N = a^b$?), before using Shor’s period finding for the worst-case scenario. Since we aim to focus on the quantum part of the algorithm, we will jump straight to the case in which N is the product of two primes.
### Example: Factoring 15
To see an example of factoring on a small number of qubits, we will factor 15, which we all know is the product of the not-so-large prime numbers 3 and 5.
```
N = 15
```
The first step is to choose a random number, $a$, between $1$ and $N-1$:
```
np.random.seed(1) # This is to make sure we get reproduceable results
a = randint(2, 15)
print(a)
```
Next we quickly check it isn't already a non-trivial factor of $N$:
```
from math import gcd # greatest common divisor
gcd(a, N)
```
Great. Next, we do Shor's order finding algorithm for `a = 7` and `N = 15`. Remember that the phase we measure will be $s/r$ where:
$$ a^r \bmod N = 1 $$
and $s$ is a random integer between 0 and $r-1$.
```
def qpe_amod15(a):
n_count = 8
qc = QuantumCircuit(4+n_count, n_count)
for q in range(n_count):
qc.h(q) # Initialize counting qubits in state |+>
qc.x(3+n_count) # And auxiliary register in state |1>
for q in range(n_count): # Do controlled-U operations
qc.append(c_amod15(a, 2**q),
[q] + [i+n_count for i in range(4)])
qc.append(qft_dagger(n_count), range(n_count)) # Do inverse-QFT
qc.measure(range(n_count), range(n_count))
# Simulate Results
aer_sim = Aer.get_backend('aer_simulator')
# Setting memory=True below allows us to see a list of each sequential reading
t_qc = transpile(qc, aer_sim)
result = aer_sim.run(t_qc, shots=1, memory=True).result()
readings = result.get_memory()
print("Register Reading: " + readings[0])
phase = int(readings[0],2)/(2**n_count)
print("Corresponding Phase: %f" % phase)
return phase
```
From this phase, we can easily find a guess for $r$:
```
phase = qpe_amod15(a) # Phase = s/r
Fraction(phase).limit_denominator(15) # Denominator should (hopefully!) tell us r
frac = Fraction(phase).limit_denominator(15)
s, r = frac.numerator, frac.denominator
print(r)
```
Now we have $r$, we might be able to use this to find a factor of $N$. Since:
$$a^r \bmod N = 1 $$
then:
$$(a^r - 1) \bmod N = 0 $$
which means $N$ must divide $a^r-1$. And if $r$ is also even, then we can write:
$$a^r -1 = (a^{r/2}-1)(a^{r/2}+1)$$
(if $r$ is not even, we cannot go further and must try again with a different value for $a$). There is then a high probability that the greatest common divisor of $N$ and either $a^{r/2}-1$, or $a^{r/2}+1$ is a proper factor of $N$ [2]:
```
guesses = [gcd(a**(r//2)-1, N), gcd(a**(r//2)+1, N)]
print(guesses)
```
The cell below repeats the algorithm until at least one factor of 15 is found. You should try re-running the cell a few times to see how it behaves.
```
a = 7
factor_found = False
attempt = 0
while not factor_found:
attempt += 1
print("\nAttempt %i:" % attempt)
phase = qpe_amod15(a) # Phase = s/r
frac = Fraction(phase).limit_denominator(N) # Denominator should (hopefully!) tell us r
r = frac.denominator
print("Result: r = %i" % r)
if phase != 0:
# Guesses for factors are gcd(x^{r/2} ±1 , 15)
guesses = [gcd(a**(r//2)-1, N), gcd(a**(r//2)+1, N)]
print("Guessed Factors: %i and %i" % (guesses[0], guesses[1]))
for guess in guesses:
if guess not in [1,N] and (N % guess) == 0: # Check to see if guess is a factor
print("*** Non-trivial factor found: %i ***" % guess)
factor_found = True
```
## 6. References
1. Stephane Beauregard, _Circuit for Shor's algorithm using 2n+3 qubits,_ [arXiv:quant-ph/0205095](https://arxiv.org/abs/quant-ph/0205095)
2. M. Nielsen and I. Chuang, _Quantum Computation and Quantum Information,_ Cambridge Series on Information and the Natural Sciences (Cambridge University Press, Cambridge, 2000). (Page 633)
```
import qiskit.tools.jupyter
%qiskit_version_table
```
| github_jupyter |
# REINFORCE in TensorFlow
Just like we did before for Q-learning, this time we'll design a TensorFlow network to learn `CartPole-v0` via policy gradient (REINFORCE).
Most of the code in this notebook is taken from approximate Q-learning, so you'll find it more or less familiar and even simpler.
```
import sys, os
if 'google.colab' in sys.modules:
%tensorflow_version 1.x
if not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week5_policy_based/submit.py
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import gym
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
A caveat: we have received reports that the following cell may crash with `NameError: name 'base' is not defined`. The [suggested workaround](https://www.coursera.org/learn/practical-rl/discussions/all/threads/N2Pw652iEemRYQ6W2GuqHg/replies/te3HpQwOQ62tx6UMDoOt2Q/comments/o08gTqelT9KPIE6npX_S3A) is to install `gym==0.14.0` and `pyglet==1.3.2`.
```
env = gym.make("CartPole-v0")
# gym compatibility: unwrap TimeLimit
if hasattr(env, '_max_episode_steps'):
env = env.env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
plt.imshow(env.render("rgb_array"))
```
# Building the network for REINFORCE
For REINFORCE algorithm, we'll need a model that predicts action probabilities given states.
For numerical stability, please __do not include the softmax layer into your network architecture__.
We'll use softmax or log-softmax where appropriate.
```
import tensorflow as tf
sess = tf.InteractiveSession()
# create input variables. We only need <s, a, r> for REINFORCE
ph_states = tf.placeholder('float32', (None,) + state_dim, name="states")
ph_actions = tf.placeholder('int32', name="action_ids")
ph_cumulative_rewards = tf.placeholder('float32', name="cumulative_returns")
from tensorflow import keras
from tensorflow.keras import layers as L
model = keras.models.Sequential()
model.add(L.InputLayer(input_shape=state_dim))
model.add(L.Dense(128, activation='relu'))
model.add(L.Dense(128, activation='relu'))
model.add(L.Dense(n_actions, activation='linear'))
logits = model(ph_states)
policy = tf.nn.softmax(logits)
log_policy = tf.nn.log_softmax(logits)
# Initialize model parameters
sess.run(tf.global_variables_initializer())
def predict_probs(states):
"""
Predict action probabilities given states.
:param states: numpy array of shape [batch, state_shape]
:returns: numpy array of shape [batch, n_actions]
"""
return policy.eval({ph_states: [states]})[0]
```
### Play the game
We can now use our newly built agent to play the game.
```
def generate_session(env, t_max=1000):
"""
Play a full session with REINFORCE agent.
Returns sequences of states, actions, and rewards.
"""
# arrays to record session
states, actions, rewards = [], [], []
s = env.reset()
for t in range(t_max):
# action probabilities array aka pi(a|s)
action_probs = predict_probs(s)
# Sample action with given probabilities.
a = np.random.choice([0, 1], p=action_probs)
new_s, r, done, info = env.step(a)
# record session history to train later
states.append(s)
actions.append(a)
rewards.append(r)
s = new_s
if done:
break
return states, actions, rewards
# test it
states, actions, rewards = generate_session(env)
```
### Computing cumulative rewards
$$
\begin{align*}
G_t &= r_t + \gamma r_{t + 1} + \gamma^2 r_{t + 2} + \ldots \\
&= \sum_{i = t}^T \gamma^{i - t} r_i \\
&= r_t + \gamma * G_{t + 1}
\end{align*}
$$
```
def get_cumulative_rewards(rewards, # rewards at each step
gamma=0.99 # discount for reward
):
"""
take a list of immediate rewards r(s,a) for the whole session
compute cumulative rewards R(s,a) (a.k.a. G(s,a) in Sutton '16)
R_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...
The simple way to compute cumulative rewards is to iterate from last to first time tick
and compute R_t = r_t + gamma*R_{t+1} recurrently
You must return an array/list of cumulative rewards with as many elements as in the initial rewards.
"""
rewards = rewards[::-1]
cumulative_rewards = [float(rewards[0])]
for i in rewards[1:]:
cumulative_rewards.append(i + gamma * cumulative_rewards[-1])
return cumulative_rewards[::-1]
assert len(get_cumulative_rewards(range(100))) == 100
assert np.allclose(get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9),
[1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0])
assert np.allclose(get_cumulative_rewards([0, 0, 1, -2, 3, -4, 0], gamma=0.5),
[0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0])
assert np.allclose(get_cumulative_rewards([0, 0, 1, 2, 3, 4, 0], gamma=0),
[0, 0, 1, 2, 3, 4, 0])
print("looks good!")
```
#### Loss function and updates
We now need to define objective and update over policy gradient.
Our objective function is
$$ J \approx { 1 \over N } \sum_{s_i,a_i} G(s_i,a_i) $$
REINFORCE defines a way to compute the gradient of the expected reward with respect to policy parameters. The formula is as follows:
$$ \nabla_\theta \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \nabla_\theta \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$
We can abuse Tensorflow's capabilities for automatic differentiation by defining our objective function as follows:
$$ \hat J(\theta) \approx { 1 \over N } \sum_{s_i, a_i} \log \pi_\theta (a_i \mid s_i) \cdot G_t(s_i, a_i) $$
When you compute the gradient of that function with respect to network weights $\theta$, it will become exactly the policy gradient.
```
# This code selects the log-probabilities (log pi(a_i|s_i)) for those actions that were actually played.
indices = tf.stack([tf.range(tf.shape(log_policy)[0]), ph_actions], axis=-1)
log_policy_for_actions = tf.gather_nd(log_policy, indices)
# Policy objective as in the last formula. Please use reduce_mean, not reduce_sum.
# You may use log_policy_for_actions to get log probabilities for actions taken.
# Also recall that we defined ph_cumulative_rewards earlier.
J = tf.reduce_mean(log_policy_for_actions * ph_cumulative_rewards)
```
As a reminder, for a discrete probability distribution (like the one our policy outputs), entropy is defined as:
$$ \operatorname{entropy}(p) = -\sum_{i = 1}^n p_i \cdot \log p_i $$
```
# Entropy regularization. If you don't add it, the policy will quickly deteriorate to
# being deterministic, harming exploration.
entropy = -tf.reduce_sum(policy * log_policy, 1, name='entropy')
# # Maximizing X is the same as minimizing -X, hence the sign.
loss = -(J + 0.1 * entropy)
update = tf.train.AdamOptimizer().minimize(loss)
def train_on_session(states, actions, rewards, t_max=1000):
"""given full session, trains agent with policy gradient"""
cumulative_rewards = get_cumulative_rewards(rewards)
update.run({
ph_states: states,
ph_actions: actions,
ph_cumulative_rewards: cumulative_rewards,
})
return sum(rewards)
# Initialize optimizer parameters
sess.run(tf.global_variables_initializer())
```
### The actual training
```
for i in range(100):
rewards = [train_on_session(*generate_session(env)) for _ in range(100)] # generate new sessions
print("mean reward: %.3f" % (np.mean(rewards)))
if np.mean(rewards) > 300:
print("You Win!") # but you can train even further
break
```
### Results & video
```
# Record sessions
import gym.wrappers
with gym.wrappers.Monitor(gym.make("CartPole-v0"), directory="videos", force=True) as env_monitor:
sessions = [generate_session(env_monitor) for _ in range(100)]
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from IPython.display import HTML
video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(video_names[-1])) # You can also try other indices
from submit import submit_cartpole
submit_cartpole(generate_session, "rahulpathak263@gmail.com", "xKABD2rwFs5y19Zd")
```
That's all, thank you for your attention!
Not having enough? There's an actor-critic waiting for you in the honor section. But make sure you've seen the videos first.
| github_jupyter |
# TensorFlow Tutorial #03-B
# Layers API
by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)
/ [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ)
## Introduction
It is important to use a builder API when constructing Neural Networks in TensorFlow because it makes it easier to implement and modify the source-code. This also lowers the risk of bugs.
Many of the other tutorials used the TensorFlow builder API called PrettyTensor for easy construction of Neural Networks. But there are several other builder APIs available for TensorFlow. PrettyTensor was used in these tutorials, because at the time in mid-2016, PrettyTensor was the most complete and polished builder API available for TensorFlow. But PrettyTensor is only developed by a single person working at Google and although it has some unique and elegant features, it is possible that it may become deprecated in the future.
This tutorial is about a small builder API that has recently been added to TensorFlow version 1.1. It is simply called *Layers* or the *Layers API* or by its Python name `tf.layers`. This builder API is automatically installed as part of TensorFlow, so you no longer have to install a separate Python package as was needed with PrettyTensor.
This tutorial is very similar to Tutorial #03 on PrettyTensor and shows how to implement the same Convolutional Neural Network using the Layers API. It is recommended that you are familiar with Tutorial #02 on Convolutional Neural Networks.
## Flowchart
The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. See Tutorial #02 for a more detailed description of convolution.

The input image is processed in the first convolutional layer using the filter-weights. This results in 16 new images, one for each filter in the convolutional layer. The images are also down-sampled using max-pooling so the image resolution is decreased from 28x28 to 14x14.
These 16 smaller images are then processed in the second convolutional layer. We need filter-weights for each of these 16 channels, and we need filter-weights for each output channel of this layer. There are 36 output channels so there are a total of 16 x 36 = 576 filters in the second convolutional layer. The resulting images are also down-sampled using max-pooling to 7x7 pixels.
The output of the second convolutional layer is 36 images of 7x7 pixels each. These are then flattened to a single vector of length 7 x 7 x 36 = 1764, which is used as the input to a fully-connected layer with 128 neurons (or elements). This feeds into another fully-connected layer with 10 neurons, one for each of the classes, which is used to determine the class of the image, that is, which number is depicted in the image.
The convolutional filters are initially chosen at random, so the classification is done randomly. The error between the predicted and true class of the input image is measured as the so-called cross-entropy. The optimizer then automatically propagates this error back through the Convolutional Network using the chain-rule of differentiation and updates the filter-weights so as to improve the classification error. This is done iteratively thousands of times until the classification error is sufficiently low.
These particular filter-weights and intermediate images are the results of one optimization run and may look different if you re-run this Notebook.
Note that the computation in TensorFlow is actually done on a batch of images instead of a single image, which makes the computation more efficient. This means the flowchart actually has one more data-dimension when implemented in TensorFlow.
## Imports
```
%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
import math
```
This was developed using Python 3.6 (Anaconda) and TensorFlow version:
```
tf.__version__
```
## Load Data
The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.
```
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
```
The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial.
```
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
```
The class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test-set, so we calculate it now.
```
data.test.cls = np.argmax(data.test.labels, axis=1)
```
## Data Dimensions
The data dimensions are used in several places in the source-code below. They are defined once so we can use these variables instead of numbers throughout the source-code below.
```
# We know that MNIST images are 28 pixels in each dimension.
img_size = 28
# Images are stored in one-dimensional arrays of this length.
img_size_flat = img_size * img_size
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# Number of colour channels for the images: 1 channel for gray-scale.
num_channels = 1
# Number of classes, one class for each of 10 digits.
num_classes = 10
```
### Helper-function for plotting images
Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.
```
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
```
### Plot a few images to see if data is correct
```
# Get the first images from the test-set.
images = data.test.images[0:9]
# Get the true classes for those images.
cls_true = data.test.cls[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true)
```
## TensorFlow Graph
The entire purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time.
TensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives.
TensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs.
A TensorFlow graph consists of the following parts which will be detailed below:
* Placeholder variables used for inputting data to the graph.
* Variables that are going to be optimized so as to make the convolutional network perform better.
* The mathematical formulas for the convolutional neural network.
* A so-called cost-measure or loss-function that can be used to guide the optimization of the variables.
* An optimization method which updates the variables.
In addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard, which is not covered in this tutorial.
## Placeholder variables
Placeholder variables serve as the input to the TensorFlow computational graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below.
First we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional array. The data-type is set to `float32` and the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`.
```
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
```
The convolutional layers expect `x` to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead `[num_images, img_height, img_width, num_channels]`. Note that `img_height == img_width == img_size` and `num_images` can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is:
```
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
```
Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable `x`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of labels and each label is a vector of length `num_classes` which is 10 in this case.
```
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
```
We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point.
```
y_true_cls = tf.argmax(y_true, dimension=1)
```
## PrettyTensor Implementation
This section shows the implementation of a Convolutional Neural Network using PrettyTensor taken from Tutorial #03 so it can be compared to the implementation using the Layers API below. This code has been enclosed in an `if False:` block so it does not run here.
The basic idea is to wrap the input tensor `x_image` in a PrettyTensor object which has helper-functions for adding new computational layers so as to create an entire Convolutional Neural Network. This is a fairly simple and elegant syntax.
```
if False:
x_pretty = pt.wrap(x_image)
with pt.defaults_scope(activation_fn=tf.nn.relu):
y_pred, loss = x_pretty.\
conv2d(kernel=5, depth=16, name='layer_conv1').\
max_pool(kernel=2, stride=2).\
conv2d(kernel=5, depth=36, name='layer_conv2').\
max_pool(kernel=2, stride=2).\
flatten().\
fully_connected(size=128, name='layer_fc1').\
softmax_classifier(num_classes=num_classes, labels=y_true)
```
## Layers Implementation
We now implement the same Convolutional Neural Network using the Layers API that is included in TensorFlow version 1.1. This requires more code than PrettyTensor, although a lot of the following are just comments.
We use the `net`-variable to refer to the last layer while building the Neural Network. This makes it easy to add or remove layers in the code if you want to experiment. First we set the `net`-variable to the reshaped input image.
```
net = x_image
```
The input image is then input to the first convolutional layer, which has 16 filters each of size 5x5 pixels. The activation-function is the Rectified Linear Unit (ReLU) described in more detail in Tutorial #02.
```
net = tf.layers.conv2d(inputs=net, name='layer_conv1', padding='same',
filters=16, kernel_size=5, activation=tf.nn.relu)
```
One of the advantages of constructing neural networks in this fashion, is that we can now easily pull out a reference to a layer. This was more complicated in PrettyTensor.
Further below we want to plot the output of the first convolutional layer, so we create another variable for holding a reference to that layer.
```
layer_conv1 = net
```
We now do the max-pooling on the output of the convolutional layer. This was also described in more detail in Tutorial #02.
```
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)
```
We now add the second convolutional layer which has 36 filters each with 5x5 pixels, and a ReLU activation function again.
```
net = tf.layers.conv2d(inputs=net, name='layer_conv2', padding='same',
filters=36, kernel_size=5, activation=tf.nn.relu)
```
We also want to plot the output of this convolutional layer, so we keep a reference for later use.
```
layer_conv2 = net
```
The output of the second convolutional layer is also max-pooled for down-sampling the images.
```
net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)
```
The tensors that are being output by this max-pooling are 4-rank, as can be seen from this:
```
net
```
Next we want to add fully-connected layers to the Neural Network, but these require 2-rank tensors as input, so we must first flatten the tensors.
The `tf.layers` API was first located in `tf.contrib.layers` before it was moved into TensorFlow Core. But even though it has taken the TensorFlow developers a year to move these fairly simple functions, they have somehow forgotten to move the even simpler `flatten()` function. So we still need to use the one in `tf.contrib.layers`.
```
net = tf.contrib.layers.flatten(net)
# This should eventually be replaced by:
# net = tf.layers.flatten(net)
```
This has now flattened the data to a 2-rank tensor, as can be seen from this:
```
net
```
We can now add fully-connected layers to the neural network. These are called *dense* layers in the Layers API.
```
net = tf.layers.dense(inputs=net, name='layer_fc1',
units=128, activation=tf.nn.relu)
```
We need the neural network to classify the input images into 10 different classes. So the final fully-connected layer has `num_classes=10` output neurons.
```
net = tf.layers.dense(inputs=net, name='layer_fc_out',
units=num_classes, activation=None)
```
The output of the final fully-connected layer are sometimes called logits, so we have a convenience variable with that name.
```
logits = net
```
We use the softmax function to 'squash' the outputs so they are between zero and one, and so they sum to one.
```
y_pred = tf.nn.softmax(logits=logits)
```
This tells us how likely the neural network thinks the input image is of each possible class. The one that has the highest value is considered the most likely so its index is taken to be the class-number.
```
y_pred_cls = tf.argmax(y_pred, dimension=1)
```
We have now created the exact same Convolutional Neural Network in a few lines of code that required many complex lines of code in the direct TensorFlow implementation.
The Layers API is perhaps not as elegant as PrettyTensor, but it has some other advantages, e.g. that we can more easily refer to intermediate layers, and it is also easier to construct neural networks with branches and multiple outputs using the Layers API.
### Loss-Function to be Optimized
To make the model better at classifying the input images, we must somehow change the variables of the Convolutional Neural Network.
The cross-entropy is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output then the cross-entropy equals zero. The goal of optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the variables of the model.
TensorFlow has a function for calculating the cross-entropy, which uses the values of the `logits`-layer because it also calculates the softmax internally, so as to to improve numerical stability.
```
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=logits)
```
We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications.
```
loss = tf.reduce_mean(cross_entropy)
```
### Optimization Method
Now that we have a cost measure that must be minimized, we can then create an optimizer. In this case it is the Adam optimizer with a learning-rate of 1e-4.
Note that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution.
```
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
```
### Classification Accuracy
We need to calculate the classification accuracy so we can report progress to the user.
First we create a vector of booleans telling us whether the predicted class equals the true class of each image.
```
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
```
The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers.
```
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
```
### Getting the Weights
Further below, we want to plot the weights of the convolutional layers. In the TensorFlow implementation we had created the variables ourselves so we could just refer to them directly. But when the network is constructed using a builder API such as `tf.layers`, all the variables of the layers are created indirectly by the builder API. We therefore have to retrieve the variables from TensorFlow.
First we need a list of the variable names in the TensorFlow graph:
```
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(var)
```
Each of the convolutional layers has two variables. For the first convolutional layer they are named `layer_conv1/kernel:0` and `layer_conv1/bias:0`. The `kernel` variables are the ones we want to plot further below.
It is somewhat awkward to get references to these variables, because we have to use the TensorFlow function `get_variable()` which was designed for another purpose; either creating a new variable or re-using an existing variable. The easiest thing is to make the following helper-function.
```
def get_weights_variable(layer_name):
# Retrieve an existing variable named 'kernel' in the scope
# with the given layer_name.
# This is awkward because the TensorFlow function was
# really intended for another purpose.
with tf.variable_scope(layer_name, reuse=True):
variable = tf.get_variable('kernel')
return variable
```
Using this helper-function we can retrieve the variables. These are TensorFlow objects. In order to get the contents of the variables, you must do something like: `contents = session.run(weights_conv1)` as demonstrated further below.
```
weights_conv1 = get_weights_variable(layer_name='layer_conv1')
weights_conv2 = get_weights_variable(layer_name='layer_conv2')
```
## TensorFlow Run
### Create TensorFlow session
Once the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.
```
session = tf.Session()
```
### Initialize variables
The variables for the TensorFlow graph must be initialized before we start optimizing them.
```
session.run(tf.global_variables_initializer())
```
### Helper-function to perform optimization iterations
There are 55,000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer.
If your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to do more optimization iterations.
```
train_batch_size = 64
```
This function performs a number of optimization iterations so as to gradually improve the variables of the neural network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations.
```
# Counter for total number of iterations performed so far.
total_iterations = 0
def optimize(num_iterations):
# Ensure we update the global variable rather than a local copy.
global total_iterations
for i in range(total_iterations,
total_iterations + num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batch, y_true_batch = data.train.next_batch(train_batch_size)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
# Print status every 100 iterations.
if i % 100 == 0:
# Calculate the accuracy on the training-set.
acc = session.run(accuracy, feed_dict=feed_dict_train)
# Message for printing.
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
# Print it.
print(msg.format(i + 1, acc))
# Update the total number of iterations performed.
total_iterations += num_iterations
```
### Helper-function to plot example errors
Function for plotting examples of images from the test-set that have been mis-classified.
```
def plot_example_errors(cls_pred, correct):
# This function is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.test.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.test.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
```
### Helper-function to plot confusion matrix
```
def plot_confusion_matrix(cls_pred):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.test.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
```
### Helper-function for showing the performance
Below is a function for printing the classification accuracy on the test-set.
It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function.
Note that this function can use a lot of computer memory, which is why the test-set is split into smaller batches. If you have little RAM in your computer and it crashes, then you can try and lower the batch-size.
```
# Split the test-set into smaller batches of this size.
test_batch_size = 256
def print_test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# Number of images in the test-set.
num_test = len(data.test.images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_test, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_test:
# The ending index for the next batch is denoted j.
j = min(i + test_batch_size, num_test)
# Get the images from the test-set between index i and j.
images = data.test.images[i:j, :]
# Get the associated labels.
labels = data.test.labels[i:j, :]
# Create a feed-dict with these images and labels.
feed_dict = {x: images,
y_true: labels}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Convenience variable for the true class-numbers of the test-set.
cls_true = data.test.cls
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / num_test
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
```
## Performance before any optimization
The accuracy on the test-set is very low because the variables for the neural network have only been initialized and not optimized at all, so it just classifies the images randomly.
```
print_test_accuracy()
```
## Performance after 1 optimization iteration
The classification accuracy does not improve much from just 1 optimization iteration, because the learning-rate for the optimizer is set very low.
```
optimize(num_iterations=1)
print_test_accuracy()
```
## Performance after 100 optimization iterations
After 100 optimization iterations, the model has significantly improved its classification accuracy.
```
%%time
optimize(num_iterations=99) # We already performed 1 iteration above.
print_test_accuracy(show_example_errors=True)
```
## Performance after 1000 optimization iterations
After 1000 optimization iterations, the model has greatly increased its accuracy on the test-set to more than 90%.
```
%%time
optimize(num_iterations=900) # We performed 100 iterations above.
print_test_accuracy(show_example_errors=True)
```
## Performance after 10,000 optimization iterations
After 10,000 optimization iterations, the model has a classification accuracy on the test-set of about 99%.
```
%%time
optimize(num_iterations=9000) # We performed 1000 iterations above.
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
```
## Visualization of Weights and Layers
### Helper-function for plotting convolutional weights
```
def plot_conv_weights(weights, input_channel=0):
# Assume weights are TensorFlow ops for 4-dim variables
# e.g. weights_conv1 or weights_conv2.
# Retrieve the values of the weight-variables from TensorFlow.
# A feed-dict is not necessary because nothing is calculated.
w = session.run(weights)
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(w)
w_max = np.max(w)
# Number of filters used in the conv. layer.
num_filters = w.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = w[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
```
### Helper-function for plotting the output of a convolutional layer
```
def plot_conv_layer(layer, image):
# Assume layer is a TensorFlow op that outputs a 4-dim tensor
# which is the output of a convolutional layer,
# e.g. layer_conv1 or layer_conv2.
# Create a feed-dict containing just one image.
# Note that we don't need to feed y_true because it is
# not used in this calculation.
feed_dict = {x: [image]}
# Calculate and retrieve the output values of the layer
# when inputting that image.
values = session.run(layer, feed_dict=feed_dict)
# Number of filters used in the conv. layer.
num_filters = values.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot the output images of all the filters.
for i, ax in enumerate(axes.flat):
# Only plot the images for valid filters.
if i<num_filters:
# Get the output image of using the i'th filter.
img = values[0, :, :, i]
# Plot image.
ax.imshow(img, interpolation='nearest', cmap='binary')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
```
### Input Images
Helper-function for plotting an image.
```
def plot_image(image):
plt.imshow(image.reshape(img_shape),
interpolation='nearest',
cmap='binary')
plt.show()
```
Plot an image from the test-set which will be used as an example below.
```
image1 = data.test.images[0]
plot_image(image1)
```
Plot another example image from the test-set.
```
image2 = data.test.images[13]
plot_image(image2)
```
### Convolution Layer 1
Now plot the filter-weights for the first convolutional layer.
Note that positive weights are red and negative weights are blue.
```
plot_conv_weights(weights=weights_conv1)
```
Applying each of these convolutional filters to the first input image gives the following output images, which are then used as input to the second convolutional layer.
```
plot_conv_layer(layer=layer_conv1, image=image1)
```
The following images are the results of applying the convolutional filters to the second image.
```
plot_conv_layer(layer=layer_conv1, image=image2)
```
### Convolution Layer 2
Now plot the filter-weights for the second convolutional layer.
There are 16 output channels from the first conv-layer, which means there are 16 input channels to the second conv-layer. The second conv-layer has a set of filter-weights for each of its input channels. We start by plotting the filter-weigths for the first channel.
Note again that positive weights are red and negative weights are blue.
```
plot_conv_weights(weights=weights_conv2, input_channel=0)
```
There are 16 input channels to the second convolutional layer, so we can make another 15 plots of filter-weights like this. We just make one more with the filter-weights for the second channel.
```
plot_conv_weights(weights=weights_conv2, input_channel=1)
```
It can be difficult to understand and keep track of how these filters are applied because of the high dimensionality.
Applying these convolutional filters to the images that were ouput from the first conv-layer gives the following images.
Note that these are down-sampled to 14 x 14 pixels which is half the resolution of the original input images, because the first convolutional layer was followed by a max-pooling layer with stride 2. Max-pooling is also done after the second convolutional layer, but we retrieve these images before that has been applied.
```
plot_conv_layer(layer=layer_conv2, image=image1)
```
And these are the results of applying the filter-weights to the second image.
```
plot_conv_layer(layer=layer_conv2, image=image2)
```
### Close TensorFlow Session
We are now done using TensorFlow, so we close the session to release its resources.
```
# This has been commented out in case you want to modify and experiment
# with the Notebook without having to restart it.
# session.close()
```
## Conclusion
This tutorial showed how to use the so-called *Layers API* for easily building Convolutional Neural Networks in TensorFlow. The syntax is different and more verbose than that of PrettyTensor. Both builder API's have advantages and disadvantages, but since PrettyTensor is only developed by one person and the Layers API is now an official part of TensorFlow Core, it is possible that PrettyTensor will become deprecated in the future. If this happens, we might hope that some of its unique and elegant features will become integrated into TensorFlow Core as well.
I have been trying to get a clear answer from the TensorFlow developers for almost a year, on which of their APIs will be the main builder API for TensorFlow. They still seem to be undecided and very slow to implement it.
## Exercises
These are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly.
You may want to backup this Notebook before making any changes.
* Change the activation function to sigmoid for some of the layers.
* Can you find a simple way of changing the activation function for all the layers?
* Add a dropout-layer after the fully-connected layer. If you want a different probability during training and testing then you will need a placeholder variable and set it in the feed-dict.
* Plot the output of the max-pooling layers instead of the conv-layers.
* Replace the 2x2 max-pooling layers with stride=2 in the convolutional layers. Is there a difference in classification accuracy? What if you optimize it again and again? The difference is random, so how would you measure if there really is a difference? What are the pros and cons of using max-pooling vs. stride in the conv-layer?
* Change the parameters for the layers, e.g. the kernel, depth, size, etc. What is the difference in time usage and classification accuracy?
* Add and remove some convolutional and fully-connected layers.
* What is the simplest network you can design that still performs well?
* Retrieve the bias-values for the convolutional layers and print them. See `get_weights_variable()` for inspiration.
* Remake the program yourself without looking too much at this source-code.
* Explain to a friend how the program works.
## License (MIT)
Copyright (c) 2016-2017 by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| github_jupyter |
# bulbea
> Deep Learning based Python Library for Stock Market Prediction and Modelling

A canonical way of importing the `bulbea` module is as follows:
```
import bulbea as bb
```
### `bulbea.Share`
In order to analyse a desired share, we use the `Share` object defined under `bulbea` which considers 2 arguments - *the **source code** for the economic data* and *the **ticker symbol** for a said company*.
```
source, ticker = 'YAHOO', 'INDEX_GSPC'
```
Go ahead and create a `Share` object as follows:
```
share = bb.Share(source, ticker)
```
By default, a `Share` object for a said source and symbol provides you historical data since a company's inception, as a `pandas.DataFrame` object. In order to access the same, use the `Share` object's member variable - `data` as follows:
```
data = share.data
nsamples = 5
data.tail(nsamples)
```
In order to analyse a given attribute, you could plot the same as follows:
```
figsize = (20, 15)
% matplotlib inline
share.plot(figsize = figsize)
share.plot(['Close', 'Adjusted Close'], figsize = figsize)
```
### Statistics
#### Global Mean
In order to plot the **global mean** of the stock, we could do the same as follows:
```
share.plot(figsize = (20, 15), global_mean = True)
```
#### Moving Averages and Bollinger Bands (R)
```
bands = share.bollinger_bands(period = 50, bandwidth = 2)
bands.tail(nsamples)
share.plot(['Close', 'Adjusted Close'], figsize = (20, 15), bollinger_bands = True, period = 100, bandwidth = 2)
```
### Training & Testing
```
from bulbea.learn.evaluation import split
scaler, Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize = True)
import numpy as np
Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1))
Xtest = np.reshape(Xtest, ( Xtest.shape[0], Xtest.shape[1], 1))
```
### Modelling
```
layers = [1, 100, 100, 1] # number of neurons in each layer
nbatch = 512
epochs = 5
nvalidation = 0.05
from bulbea.learn.models import RNN
from bulbea.learn.models.ann import RNNCell
rnn = RNN(layers, cell = RNNCell.LSTM)
```
#### TRAINING
```
rnn.fit(Xtrain, ytrain,
batch_size = nbatch,
nb_epoch = epochs,
validation_split = nvalidation)
```
#### TESTING
```
predicted = rnn.predict(Xtest)
from sklearn.metrics import mean_squared_error
mean_squared_error(ytest, predicted)
from bulbea.entity.share import _plot_bollinger_bands
import pandas as pd
import matplotlib.pyplot as pplt
figsize = (20, 15)
figure = pplt.figure(figsize = figsize)
axes = figure.add_subplot(111)
series = pd.Series(data = scaler.inverse_transform(ytest))
# axes.plot(scaler.inverse_transform(ytest))
axes.plot(scaler.inverse_transform(predicted))
_plot_bollinger_bands(series, axes, bandwidth = 10)
```
### Sentiment Analysis
```
s = bb.sentiment(share)
s
```
| github_jupyter |
These exercises accompany the tutorial on [lists and tuples](https://www.kaggle.com/colinmorris/lists).
As always be sure to run the setup code below before working on the questions (and if you leave this notebook and come back later, don't forget to run the setup code again).
```
# SETUP. You don't need to worry for now about what this code does or how it works. If you're ever curious about the
# code behind these exercises, it's available under an open source license here: https://github.com/Kaggle/learntools/
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex4 import *
print('Setup complete.')
```
# Exercises
## 1.
Complete the function below according to its docstring.
```
def select_second(L):
"""Return the second element of the given list. If the list has no second
element, return None.
"""
pass
q1.check()
#q1.hint()
#q1.solution()
```
## 2.
You are analyzing sports teams. Members of each team are stored in a list. The Coach is the first name in the list, the captain is the second name in the list, and other players are listed after that.
These lists are stored in another list, which starts with the best team and proceeds through the list to the worst team last. Complete the function below to select the **captain** of the worst team.
```
def losing_team_captain(teams):
"""Given a list of teams, where each team is a list of names, return the 2nd player (captain)
from the last listed team
"""
pass
q2.check()
#q2.hint()
#q2.solution()
```
## 3.
The next iteration of Mario Kart will feature an extra-infuriating new item, the *Purple Shell*. When used, it warps the last place racer into first place and the first place racer into last place. Complete the function below to implement the Purple Shell's effect.
```
def purple_shell(racers):
"""Given a list of racers, set the first place racer (at the front of the list) to last
place and vice versa.
>>> r = ["Mario", "Bowser", "Luigi"]
>>> purple_shell(r)
>>> r
["Luigi", "Bowser", "Mario"]
"""
pass
q3.check()
#q3.hint()
#q3.solution()
```
## 4.
What are the lengths of the following lists? Fill in the variable `lengths` with your predictions. (Try to make a prediction for each list *without* just calling `len()` on it.)
```
a = [1, 2, 3]
b = [1, [2, 3]]
c = []
d = [1, 2, 3][1:]
# Put your predictions in the list below. Lengths should contain 4 numbers, the
# first being the length of a, the second being the length of b and so on.
lengths = []
q4.check()
# line below provides some explanation
#q4.solution()
```
## 5. <span title="A bit spicy" style="color: darkgreen ">🌶️</span>
We're using lists to record people who attended our party and what order they arrived in. For example, the following list represents a party with 7 guests, in which Adela showed up first and Ford was the last to arrive:
party_attendees = ['Adela', 'Fleda', 'Owen', 'May', 'Mona', 'Gilbert', 'Ford']
A guest is considered 'fashionably late' if they arrived after at least half of the party's guests. However, they must not be the very last guest (that's taking it too far). In the above example, Mona and Gilbert are the only guests who were fashionably late.
Complete the function below which takes a list of party attendees as well as a person, and tells us whether that person is fashionably late.
```
def fashionably_late(arrivals, name):
"""Given an ordered list of arrivals to the party and a name, return whether the guest with that
name was fashionably late.
"""
pass
q5.check()
#q5.hint()
#q5.solution()
```
## 6. <span style="color: firebrick" title="This is an optional extra-spicy question">🌶️🌶️🌶️</span>
*This question is intended more as a fun riddle than a test of your programming prowess :)*
Implement the body of the following function **using only tools we've covered so far - NO LOOPS!**
```
def count_negatives(nums):
"""Return the number of negative numbers in the given list.
>>> count_negatives([5, -1, -2, 0, 3])
2
"""
pass
#q6.check()
#q6.hint()
#q6.solution()
```
That's it for lists and tuples! If you have any questions or feedback (or just want to argue about whether Pluto should be a planet), head over to the [forums](https://kaggle.com/learn-forum).
Remember that your notebook is private by default, and in order to share it with other people or ask for help with it, you'll need to make it public. First, you'll need to save a version of your notebook that shows your current work by hitting the "Commit & Run" button. (Your work is saved automatically, but versioning your work lets you go back and look at what it was like at the point you saved it. It also let's you share a nice compiled notebook instead of just the raw code.) Then, once your notebook is finished running, you can go to the Settings tab in the panel to the left (you may have to expand it by hitting the [<] button next to the "Commit & Run" button) and setting the "Visibility" dropdown to "Public".
# Keep Going
When you're ready to continue, [click here](https://www.kaggle.com/colinmorris/loops-and-list-comprehensions) to continue on to the next tutorial on loops and list comprehensions.
| github_jupyter |
```
%reload_ext nb_black
# creating supervised learning imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.metrics import (
classification_report,
confusion_matrix,
)
# Due to time constraints and such a large data set, I subsampled my data to work with a smaller set for preprocessing
final_df = pd.read_csv("./cleaned_data.csv")
# final_df = final_df.sample(80000, random_state=13)
final_df.head()
```
# Additional EDA
```
# Do not think event_id, app_id are necessary
final_df = final_df.drop(columns=["app_id"])
# Need to convert these values to numeric values
# Considering:
# M22- : 0
# M23-26 : 1
# M27-28 : 2
# M29-31 : 3
# M32-38 : 4
# M39+ : 5
# F23- : 6
# F24-26 : 7
# F27-28 : 8
# F29-32 : 9
# F33-42 : 10
# F43+ : 11
final_df["group"].value_counts()
# Converting object data to numeric data in group column
to_num = {
"M22-": 0,
"M23-26": 1,
"M27-28": 2,
"M29-31": 3,
"M32-38": 4,
"M39+": 5,
"F23-": 6,
"F24-26": 7,
"F27-28": 8,
"F29-32": 9,
"F33-42": 10,
"F43+": 11,
}
# Created new column for numeric group, can now drop old group column
final_df["num_group"] = final_df["group"].map(to_num)
final_df = final_df.drop(columns=["group"])
final_df.head()
num_cols = ["device_id", "event_id"]
# splitting up the data and choosing num_group as target variable
X = final_df.drop(columns=["num_group"])
y = final_df["num_group"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=13, stratify=y
)
# set up preprocessing for pipeline (used to scale features)
preprocessing = ColumnTransformer(
[("scale", StandardScaler(), num_cols),], remainder="passthrough",
)
```
# Original Models
### KNearestNeighbor Classifier
```
pipeline = Pipeline(
[("preprocessing", preprocessing), ("knn", KNeighborsClassifier())], verbose=True
)
pipeline.fit(X_train, y_train)
# Due to shortage of time I decreased the amount of parameters and lowered the cv
grid = {
"knn__n_neighbors": [50, 100, 150],
"knn__weights": ["distance"],
"knn__leaf_size": [1, 10, 20],
}
model = GridSearchCV(pipeline, grid, n_jobs=-1, verbose=2, cv=2)
model.fit(X_train, y_train)
# It seems like the higher the amount of n_neighbors the better the score, for next test, will increase n_neighbors
model.best_params_
# Model still appears to be extremely overfitting
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print(f"Train score: {train_score}")
print(f"Test score: {test_score}")
y_pred = model.predict(X_test)
con_mat = pd.DataFrame(
confusion_matrix(y_test, y_pred),
index=[
"Actually M22-",
"Actually M23-26",
"Actually M27-28",
"Actually M29-31",
"Actually M32-38",
"Actually M39+",
"Actually F23-",
"Actually F24-26",
"Actually F27-28",
"Actually F29-32",
"Actually F33-42",
"Actually F43+",
],
columns=[
"Predicted M22-",
"Predicted M23-26",
"Predicted M27-28",
"Predicted M29-31",
"Predicted M32-38",
"Predicted M39+",
"Predicted F23-",
"Predicted F24-26",
"Predicted F27-28",
"Predicted F29-32",
"Predicted F33-42",
"Predicted F43+",
],
)
con_mat.style.background_gradient(axis=None)
print(classification_report(y_test, y_pred))
```
### Random Forest Classifier
```
pipeline = Pipeline(
[("preprocessing", preprocessing), ("rf", RandomForestClassifier())], verbose=True
)
pipeline.fit(X_train, y_train)
# Max depth of 50 provides a better score, this is much better than the previous score of around 0.2.
# To improve score suggest increasing max depth even more
grid = {
"rf__max_depth": [50, 70, 90, 110],
"rf__n_estimators": [1, 10, 100],
"rf__min_samples_leaf": [1, 3, 5, 7],
"rf__criterion": ["gini"],
}
model = GridSearchCV(pipeline, grid, n_jobs=-1, verbose=True)
model.fit(X_train, y_train)
model.best_params_
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print(f"Train score: {train_score}")
print(f"Test score: {test_score}")
y_pred = model.predict(X_test)
con_mat = pd.DataFrame(
confusion_matrix(y_test, y_pred),
index=[
"Actually M22-",
"Actually M23-26",
"Actually M27-28",
"Actually M29-31",
"Actually M32-38",
"Actually M39+",
"Actually F23-",
"Actually F24-26",
"Actually F27-28",
"Actually F29-32",
"Actually F33-42",
"Actually F43+",
],
columns=[
"Predicted M22-",
"Predicted M23-26",
"Predicted M27-28",
"Predicted M29-31",
"Predicted M32-38",
"Predicted M39+",
"Predicted F23-",
"Predicted F24-26",
"Predicted F27-28",
"Predicted F29-32",
"Predicted F33-42",
"Predicted F43+",
],
)
con_mat.style.background_gradient(axis=None)
print(classification_report(y_test, y_pred))
```
# Models with PCA
```
# Since I am performing pca its necessary to standardize data and fit it to data frame.
scaler = StandardScaler()
X_train_std = scaler.fit_transform(X_train)
# Computing the principal components
pca = PCA()
X_pca = pca.fit_transform(X_train_std)
# I plan to use the amount of components that add up to 90% variance.
n_components = np.sum(np.cumsum(pca.explained_variance_ratio_) < 0.90)
X_pca = X_pca[:, :n_components]
# check to see the amount of components being used
n_components
```
### KNearestNeighbor Classifier
```
pipeline = Pipeline(
[
("scaler", StandardScaler()),
("pca", PCA(n_components=n_components)),
("knn", KNeighborsClassifier()),
]
)
pipeline = pipeline.fit(X_train, y_train)
grid = {
"knn__n_neighbors": [50, 100, 150],
"knn__weights": ["distance"],
"knn__leaf_size": [1, 10, 20],
}
model = GridSearchCV(pipeline, grid, n_jobs=-1, cv=2, verbose=2)
model.fit(X_train, y_train)
model.best_params_
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print(f"Train score: {train_score}")
print(f"Test score: {test_score}")
y_pred = model.predict(X_test)
con_mat = pd.DataFrame(
confusion_matrix(y_test, y_pred),
index=[
"Actually M22-",
"Actually M23-26",
"Actually M27-28",
"Actually M29-31",
"Actually M32-38",
"Actually M39+",
"Actually F23-",
"Actually F24-26",
"Actually F27-28",
"Actually F29-32",
"Actually F33-42",
"Actually F43+",
],
columns=[
"Predicted M22-",
"Predicted M23-26",
"Predicted M27-28",
"Predicted M29-31",
"Predicted M32-38",
"Predicted M39+",
"Predicted F23-",
"Predicted F24-26",
"Predicted F27-28",
"Predicted F29-32",
"Predicted F33-42",
"Predicted F43+",
],
)
con_mat.style.background_gradient(axis=None)
print(classification_report(y_test, y_pred))
```
### Random Forest Classifier
```
pipeline = Pipeline(
[
("scaler", StandardScaler()),
("pca", PCA(n_components=n_components)),
("rf", RandomForestClassifier()),
]
)
pipeline = pipeline.fit(X_train, y_train)
grid = {
"rf__max_depth": [90, 110, 130],
"rf__n_estimators": [1, 10, 100],
"rf__min_samples_leaf": [1, 3, 5, 7],
"rf__criterion": ["gini"],
}
model = GridSearchCV(pipeline, grid, n_jobs=-1, cv=2, verbose=2)
model.fit(X_train, y_train)
model.best_params_
train_score = model.score(X_train, y_train)
test_score = model.score(X_test, y_test)
print(f"Train score: {train_score}")
print(f"Test score: {test_score}")
y_pred = model.predict(X_test)
con_mat = pd.DataFrame(
confusion_matrix(y_test, y_pred),
index=[
"Actually M22-",
"Actually M23-26",
"Actually M27-28",
"Actually M29-31",
"Actually M32-38",
"Actually M39+",
"Actually F23-",
"Actually F24-26",
"Actually F27-28",
"Actually F29-32",
"Actually F33-42",
"Actually F43+",
],
columns=[
"Predicted M22-",
"Predicted M23-26",
"Predicted M27-28",
"Predicted M29-31",
"Predicted M32-38",
"Predicted M39+",
"Predicted F23-",
"Predicted F24-26",
"Predicted F27-28",
"Predicted F29-32",
"Predicted F33-42",
"Predicted F43+",
],
)
con_mat.style.background_gradient(axis=None)
print(classification_report(y_test, y_pred))
```
| github_jupyter |
<img src="../../images/banners/python-basics.png" width="600"/>
# <img src="../../images/logos/python.png" width="23"/> Dictionary
Python provides another composite data type called a dictionary, which is similar to a list in that it is a collection of objects.
## <img src="../../images/logos/toc.png" width="20"/> Table of Contents
* [Defining a Dictionary](#defining_a_dictionary)
* [Accessing Dictionary Values](#accessing_dictionary_values)
* [Dictionary Keys vs. List Indices](#dictionary_keys_vs._list_indices)
* [Building a Dictionary Incrementally](#building_a_dictionary_incrementally)
* [Restrictions on Dictionary Keys](#restrictions_on_dictionary_keys)
* [Restrictions on Dictionary Values](#restrictions_on_dictionary_values)
* [<img src="../../images/logos/checkmark.png" width="20"/> Conclusion](#<img_src="../../images/logos/checkmark.png"_width="20"/>_conclusion)
---
Dictionaries and lists share the following characteristics:
- Both are mutable.
- Both are dynamic. They can grow and shrink as needed.
- Both can be nested. A list can contain another list. A dictionary can contain another dictionary. A dictionary can also contain a list, and vice versa.
Dictionaries differ from lists primarily in how elements are accessed:
- List elements are accessed by their position in the list, via indexing.
- Dictionary elements are accessed via keys.
<a class="anchor" id="defining_a_dictionary"></a>
## Defining a Dictionary
Dictionaries are Python’s implementation of a data structure that is more generally known as an associative array. A dictionary consists of a collection of key-value pairs. Each key-value pair maps the key to its associated value.
You can define a dictionary by enclosing a comma-separated list of key-value pairs in curly braces (`{}`). A colon (`:`) separates each key from its associated value:
```python
d = {
<key>: <value>,
<key>: <value>,
.
.
.
<key>: <value>
}
```
The following defines a dictionary that maps a location to the name of its corresponding Major League Baseball team:
```
MLB_team = {
'Colorado' : 'Rockies',
'Boston' : 'Red Sox',
'Minnesota': 'Twins',
'Milwaukee': 'Brewers',
'Seattle' : 'Mariners'
}
```
<img src="./images/dictionary.webp" alt="dictionary" width=400 align="center" />
You can also construct a dictionary with the built-in `dict()` function. The argument to `dict()` should be a sequence of key-value pairs. A list of tuples works well for this:
```python
d = dict([
(<key>, <value>),
(<key>, <value),
.
.
.
(<key>, <value>)
])
```
`MLB_team` can then also be defined this way:
```
MLB_team = dict([
('Colorado', 'Rockies'),
('Boston', 'Red Sox'),
('Minnesota', 'Twins'),
('Milwaukee', 'Brewers'),
('Seattle', 'Mariners')
])
```
If the key values are simple strings, they can be specified as keyword arguments. So here is yet another way to define `MLB_team`:
```
MLB_team = dict(
Colorado='Rockies',
Boston='Red Sox',
Minnesota='Twins',
Milwaukee='Brewers',
Seattle='Mariners'
)
```
Once you’ve defined a dictionary, you can display its contents, the same as you can do for a list. All three of the definitions shown above appear as follows when displayed:
```
type(MLB_team)
MLB_team
```
The entries in the dictionary display in the order they were defined. But that is irrelevant when it comes to retrieving them. Dictionary elements are not accessed by numerical index:
```
MLB_team[1]
```
<a class="anchor" id="accessing_dictionary_values"></a>
## Accessing Dictionary Values
Of course, dictionary elements must be accessible somehow. If you don’t get them by index, then how do you get them?
A value is retrieved from a dictionary by specifying its corresponding key in square brackets (`[]`):
```
MLB_team['Minnesota']
MLB_team['Colorado']
```
If you refer to a key that is not in the dictionary, Python raises an exception:
```
MLB_team['Toronto']
```
Adding an entry to an existing dictionary is simply a matter of assigning a new key and value:
```
MLB_team['Kansas City'] = 'Royals'
MLB_team
```
If you want to update an entry, you can just assign a new value to an existing key:
```
MLB_team['Seattle'] = 'Seahawks'
MLB_team
```
To delete an entry, use the `del` statement, specifying the key to delete:
```
del MLB_team['Seattle']
MLB_team
```
<a class="anchor" id="dictionary_keys_vs._list_indices"></a>
## Dictionary Keys vs. List Indices
You may have noticed that the interpreter raises the same exception, KeyError, when a dictionary is accessed with either an undefined key or by a numeric index:
```
MLB_team['Toronto']
MLB_team[1]
```
In fact, it’s the same error. In the latter case, `[1]` looks like a numerical index, but it isn’t.
You will see later in this tutorial that an object of any immutable type can be used as a dictionary key. Accordingly, there is no reason you can’t use integers:
```
d = {0: 'a', 1: 'b', 2: 'c', 3: 'd'}
d
d[0]
d[2]
```
In the expressions `MLB_team[1]`, `d[0]`, and `d[2]`, the numbers in square brackets appear as though they might be indices. But they have nothing to do with the order of the items in the dictionary. Python is interpreting them as dictionary keys. If you define this same dictionary in reverse order, you still get the same values using the same keys:
```
d = {3: 'd', 2: 'c', 1: 'b', 0: 'a'}
d
d[0]
d[2]
```
The syntax may look similar, but you can’t treat a dictionary like a list:
```
type(d)
d[-1]
d[0:2]
```
> **Note:** Although access to items in a dictionary does not depend on order, Python does guarantee that the order of items in a dictionary is preserved. When displayed, items will appear in the order they were defined, and iteration through the keys will occur in that order as well. Items added to a dictionary are added at the end. If items are deleted, the order of the remaining items is retained.
>
>You can only count on this preservation of order very recently. It was [added as a part of the Python language specification in version 3.7](https://realpython.com/python37-new-features/#the-order-of-dictionaries-is-guaranteed). However, it was true as of version 3.6 as well—by happenstance as a result of the implementation but not guaranteed by the language specification.
<a class="anchor" id="building_a_dictionary_incrementally"></a>
## Building a Dictionary Incrementally
Defining a dictionary using curly braces and a list of key-value pairs, as shown above, is fine if you know all the keys and values in advance. But what if you want to build a dictionary on the fly?
You can start by creating an empty dictionary, which is specified by empty curly braces. Then you can add new keys and values one at a time:
```
person = {}
type(person)
person['fname'] = 'Joe'
person['lname'] = 'Fonebone'
person['age'] = 51
person['spouse'] = 'Edna'
person['children'] = ['Ralph', 'Betty', 'Joey']
person['pets'] = {'dog': 'Fido', 'cat': 'Sox'}
```
Once the dictionary is created in this way, its values are accessed the same way as any other dictionary:
```
person
person['fname']
person['age']
person['children']
```
Retrieving the values in the sublist or subdictionary requires an additional index or key:
```
person['children'][-1]
person['pets']['cat']
```
This example exhibits another feature of dictionaries: the values contained in the dictionary don’t need to be the same type. In person, some of the values are strings, one is an integer, one is a list, and one is another dictionary.
Just as the values in a dictionary don’t need to be of the same type, the keys don’t either:
```
foo = {42: 'aaa', 2.78: 'bbb', True: 'ccc'}
foo
foo[42]
foo[2.78]
foo[True]
```
Here, one of the keys is an integer, one is a float, and one is a Boolean. It’s not obvious how this would be useful, but you never know.
Notice how versatile Python dictionaries are. In MLB_team, the same piece of information (the baseball team name) is kept for each of several different geographical locations. person, on the other hand, stores varying types of data for a single person.
You can use dictionaries for a wide range of purposes because there are so few limitations on the keys and values that are allowed. But there are some. Read on!
<a class="anchor" id="restrictions_on_dictionary_keys"></a>
## Restrictions on Dictionary Keys
Almost any type of value can be used as a dictionary key in Python. You just saw this example, where integer, float, and Boolean objects are used as keys:
```
foo = {42: 'aaa', 2.78: 'bbb', True: 'ccc'}
foo
```
You can even use built-in objects like types and functions:
```
d = {int: 1, float: 2, bool: 3}
d
d[float]
d = {bin: 1, hex: 2, oct: 3}
d[oct]
```
However, there are a couple restrictions that dictionary keys must abide by.
First, a given key can appear in a dictionary only once. Duplicate keys are not allowed. A dictionary maps each key to a corresponding value, so it doesn’t make sense to map a particular key more than once.
You saw above that when you assign a value to an already existing dictionary key, it does not add the key a second time, but replaces the existing value:
```
MLB_team = {
'Colorado' : 'Rockies',
'Boston' : 'Red Sox',
'Minnesota': 'Twins',
'Milwaukee': 'Brewers',
'Seattle' : 'Mariners'
}
MLB_team['Minnesota'] = 'Timberwolves'
MLB_team
```
Similarly, if you specify a key a second time during the initial creation of a dictionary, the second occurrence will override the first:
```
MLB_team = {
'Colorado' : 'Rockies',
'Boston' : 'Red Sox',
'Minnesota': 'Timberwolves',
'Milwaukee': 'Brewers',
'Seattle' : 'Mariners',
'Minnesota': 'Twins'
}
MLB_team
```
Secondly, a dictionary key must be of a type that is immutable. You have already seen examples where several of the immutable types you are familiar with—integer, float, string, and Boolean—have served as dictionary keys.
A tuple can also be a dictionary key, because tuples are immutable:
```
d = {(1, 1): 'a', (1, 2): 'b', (2, 1): 'c', (2, 2): 'd'}
d[(1,1)]
d[(2,1)]
```
> Recall from the discussion on tuples that one rationale for using a tuple instead of a list is that there are circumstances where an immutable type is required. This is one of them.
However, neither a list nor another dictionary can serve as a dictionary key, because lists and dictionaries are mutable:
```
d = {[1, 1]: 'a', [1, 2]: 'b', [2, 1]: 'c', [2, 2]: 'd'}
```
> **Technical Note:** Why does the error message say “unhashable”?
>
> Technically, it is not quite correct to say an object must be immutable to be used as a dictionary key. More precisely, an object must be hashable, which means it can be passed to a hash function. A hash function takes data of arbitrary size and maps it to a relatively simpler fixed-size value called a hash value (or simply hash), which is used for table lookup and comparison.
>
> Python’s built-in hash() function returns the hash value for an object which is hashable, and raises an exception for an object which isn’t:
>
```python
>>> hash('foo')
11132615637596761
>>> hash([1, 2, 3])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unhashable type: 'list'
```
>
> All of the built-in immutable types you have learned about so far are hashable, and the mutable container types (lists and dictionaries) are not. So for present purposes, you can think of hashable and immutable as more or less synonymous.
>
> In future tutorials, you will encounter mutable objects which are also hashable.
<a class="anchor" id="restrictions_on_dictionary_values"></a>
## Restrictions on Dictionary Values
By contrast, there are no restrictions on dictionary values. Literally none at all. A dictionary value can be any type of object Python supports, including mutable types like lists and dictionaries, and user-defined objects, which you will learn about in upcoming tutorials.
There is also no restriction against a particular value appearing in a dictionary multiple times:
```
d = {0: 'a', 1: 'a', 2: 'a', 3: 'a'}
d
d[0] == d[1] == d[2]
```
<a class="anchor" id="conclusion"></a>
## <img src="../../images/logos/checkmark.png" width="20"/> Conclusion
In this tutorial, you covered the basic properties of the Python **dictionary** and learned how to access and manipulate dictionary data.
**Lists** and **dictionaries** are two of the most frequently used Python types. As you have seen, they have several similarities, but differ in how their elements are accessed. Lists elements are accessed by numerical index based on order, and dictionary elements are accessed by key
Because of this difference, lists and dictionaries tend to be appropriate for different circumstances. You should now have a good feel for which, if either, would be best for a given situation.
Next you will learn about Python **sets**. The set is another composite data type, but it is quite different from either a list or dictionary.
| github_jupyter |
<a href="https://colab.research.google.com/github/krmiddlebrook/intro_to_deep_learning/blob/master/machine_learning/lesson%202%20-%20logistic%20regression/logistic-regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Classification: Logistic Regression
In the previous lessons, we learned about linear regression and how we can use it to construct a single layer linear neural network to predict a numeric value (i.e., how powerful a Pokemon is given their x features). Regression is great when we want to answer *how much?* or *how many?* questions. In practice, we are often more interested in *classification*: asking *which one?* not *how much?*
- Is this customer more likely to *sign up* or *not* for a subscription service?
- Does this image contain one of the following, a cat or a dog?
- Is this song in the genre of hip hop, pop, or funk?
When we want to distinguish two classes (called *binary classification*), we can use a classification technique called logistic regression.
In this notebook, we will learn the foundations of logistic regression and demonstrate how to solve binary classification problems using an example--building a logistic regression model to predict whether an app on the Google Play Store is free or not. The ideas we introduce here will build on previous material and continue to lay out the fundamental concepts used in deep learning and neural networks, which we will cover in future lessons. Here is the lesson roadmap:
1. Introduction to logistic regression
2. From linear to logistic regression
3. Building a logistic regression classifier: identifying free vs paid apps on the Google Play Store
7. Summary
# Representing categorical data
<figure>
<img src='https://envato-shoebox-0.imgix.net/2718/a008-795b-4376-972d-ed9cbad8ac4f/2015_227_003_0063_A_2018_07_19.jpg?auto=compress%2Cformat&fit=max&mark=https%3A%2F%2Felements-assets.envato.com%2Fstatic%2Fwatermark2.png&markalign=center%2Cmiddle&markalpha=18&w=700&s=e3fbeb220008b297bee64675504ae70c' width='50%'>
<figcaption>Representing data: a Shina Inu, Retriever, and Lab</figcaption>
</figure>
Before we dive into logistic regression, let's consider how machine learning problems generally represents categorical data.
Categorical features represent types of data which may be divided into groups. Examples of categorical features are dog breed, game genre, and educational level. While the latter feature may also be considered in a numerical manner by using exact values for highest grade completed, it is often more informative to categorize such variables into a relatively small number of groups.
Consider an example where we want to distinguish 3 different dog breeds--(golden) retrievers, labs, and shiba inus, given 3 features about each dog: height, weight, and fur color. The numeric features are height ($x_1$) and weight ($x_2$), while the categorical feature is fur color ($x_3$), which we determined has 3 colors: black, red, yellow (golden/light gold). To make this categorical feature useful, we need to convert it into a numerical representation.
There are two general ways to represent categorical data in numeric terms. Perhaps the most natural choice to is to choose $x_3 \in \{1, 2, 3\}$, where the integers represent the fur colors {black, red, yellow} repectively. This is a great way to compress and store info on a computer, but it's not great for machine learning. Fortunately, great minds got together long ago and invented a simple method to represent categorical data called *one-hot encoding*. A one-hot encoding is a vector with as many components as we have categories. The component corresponding to particular sample's category is set to 1 and all other components are set to 0. So in our case, this translates to:
$$
x_3 \in \{ (1, 0, 0), (0, 1, 0), (0, 0, 1) \},
$$
where $x_3$ would be a three-dimensional vector representing the fur color feature with $(1, 0, 0)$ corresponding to "black", (0, 1, 0) to "red", and (0, 0, 1) to "yellow" fur.
## Challenge: Representing categorical data
Now that you know how to represent categorical data, consider the dog breed example above. We one-hot encoded the fur color feature $x_3$ so that all the features $x_1, x_2, x_3$ were represented by numeric values. Thus, the features ($\mathbf{x}$) were ready to be passed as input to a machine learning model. On the other hand, are the labels $y$ (the dog bread) ready? Are they in the proper format? How should $y$ be *encoded*? Write your answer in the text cell below.
Hint: currently, $y \in \{\ \text{retrievers}, \text{labs}, \text{shiba inus} \}$ is a one-dimensional vector with categorical values.
# Intro to logistic regression
<figure>
<img src='https://www.elie.net/static/images/images/challenges-faced-while-training-an-ai-to-combat-abuse/dog-vs-cat.jpg' width='70%'>
<figcaption>Classification: Cat vs Dog</figcaption>
</figure>
With a healthy understanding of categorical encoding, let's dive into the logistic regression method.
Logistic regression is perhaps the simplest and most common machine learning algorithm for binary classification tasks. It is a special case of linear regression where the labels variable ($y$) is categorical in nature. It is called "logistic" regression because it uses a *logit* function, called the *sigmoid* function, to estimate the probability of a given class.
To motivate logistic regression, let's consider a simple image classification problem--distinguish between cat and dog photos. Here, each image consists of a $2 \times 2$ grayscale image. We can represent each pixel value with a single scalar (number), giving us four features $x_1,x_2,x_3,x_4$. Further, let's assume that each image belongs to one among the categories “cat” and “dog”. However, as we demonstrated in the previous section, we can't use the labels $y$ in its current format ("cat" and "dog"). We need to convert the labels to discrete numerical values (i.e., 0 and 1). To this end, we map each category to an integer, making $y \in \{0,1\}$, where the integers represent $\{\text{cat}, \text{dog}\}$ repsectively. Notice that this is not exactly like *one-hot encoding*, where the one-dimensional vector is converted into a multi-dimensional vector with dimensions equivalent to the number of classes in the labels $y$. Instead, we used the simpler (first) method we discussed in the previous section: encoding each category as a numerical value, in this case $\{0, 1\}$ corresponding to $\{\text{cat}, \text{dog}\}$. When we only need to encode two categories (called binary categorization), we don't have to use one-hot encoding. However, we do need to encode the data numerically. Specifically, among the category labels, we need to assign 0 to one category and 1 to the other.
# From linear to logistic regression
<figure>
<img src='https://miro.medium.com/max/1400/1*dm6ZaX5fuSmuVvM4Ds-vcg.jpeg' width='70%'>
<figcaption>Linear vs Logistic Regression | Source: Datacamp</figcaption>
</figure>
Now that we know how labels are properly *encoded*, let's demonstrate the connection between linear and logistic regression.
When we are doing linear regression the equation is as follows:
$$
\hat{\mathbf{y}} = \mathbf{w} \mathbf{X} + b,\tag{1}
$$
where the linear model learns the most *optimal* parameter values for the *weights* ($\mathbf{w}$) and *bias* term ($b$). The linear regression method is great when we want to predict continuous numerical data, but not so good when we need to distinguish between classes.
To make a binary logistic classifier to distinguish between cat and dog photos, we need to convert the predictions ($\hat{\mathbf{y}}$) into probabilities ($\hat{\mathbf{p}}$). Here, each sample is assigned a corresponding probability $\hat{p}$ that indicates the model's degree of *certainty* that it belongs to a particular class (in our case, cat or dog). Further, we set a threshold, usually 0.5, that the model will use to determine the final class prediction. For our cat ($y=0$) and dog ($y=1$) problem, a sample with a $\hat{p}$ value greater than 0.5 would receive the "dog" label for example.
In order to predict classes, logistic regression maps predictions ($\hat{\mathbf{y}}$) to probabilities ($\hat{\mathbf{p}}$) via the *sigmoid* logit function:
$$
\tag{2}
p = \sigma(y) = \frac{1}{1 + e^{-y}},
$$
which leads us to the equation for logistic regression:
$$
\tag{3}
\hat{\mathbf{p}} = \sigma(\hat{\mathbf{y}}) = \frac{1}{1 + e^{-(\hat{\mathbf{w} \mathbf{X} + b})}},
$$
where the logistic model (binary classifier) learns the most *optimal* parameter values ($\mathbf{w}$ and $b$) by producing probabilities ($\hat{\mathbf{p}}$) that *maximize the likelihood* of predicting the observed data.
Generally, the logistic regression equation from $(3)$ is compressed:
$$
\tag{4}
\hat{\mathbf{p}} = \sigma(\hat{\mathbf{y}}) = \sigma(\hat{\mathbf{w} \mathbf{X} + b}),
$$
where $\sigma$ represents the sigmoid function (eq. $2$) in this case. Does this equation look similar to linear regression yet?
To summarize logistic regression:
- Category labels are converted to discrete integer values (e.g., 0 and 1).
- The *sigmoid* logit function maps input features ($\mathbf{x}$) to probabilities (i.e., a number between 0 and 1).
- A category prediction is determined by the threshold value (usually 0.5) and the probability (i.e., in our cat/dog example, a sample with a probability greater than 0.5 is classified as a dog image).
- Logistic regression classifiers try to maximize *certainty*: predict a particular class with high confidence ($\hat{p}$ closer to 1) and be correct (after thresholding, $\hat{p} = y$), most of the time.
# Logistic Regression: identifying free apps on the Google Play Store
Now that we know about the fundamentals of logistic regression, let's apply this method to a real-world problem--identifying free apps (or not) on the Google Play Store given the features corresponding to each app. In this section, we will demonstrate in an end-to-end fashion the process of creating a logistic regression classifier: from building, to training, and finally evaluating the model to solve the free (or not) Google Play Store app task. This process involves several steps:
1. Find a dataset related to our question.
2. Explore the dataset and prepare it for the model.
3. Build the model.
4. Train the model using an algorithm such as stochastic gradient descent.
5. Evaluate the quality of our model.
Draw conclusions.
For step 1, we found the [Google Play Store dataset](https://www.kaggle.com/lava18/google-play-store-apps). The dataset contains approximately 10k rows, each representing an app on the Google Play Store. It provides data about the category, average rating, number of reviews, number of installs, price, and more for each app.
```
# import the libraries we be need
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
```
## 2. Explore the dataset and prepare it for our model
In this section we will focus on defining the *features* ($\mathbf{x}$) and *labels* ($\mathbf{y}$) that we will use in our logistic regression classifier to indentify free apps. As you will see, this require us to do some data cleaning and preprocessing.
```
data_url = 'https://raw.githubusercontent.com/krmiddlebrook/intro_to_deep_learning/master/datasets/googleplaystore.csv'
apps_data = pd.read_csv(data_url)
apps_data.head()
# check the column types and get basic info
apps_data.info()
```
Looks like there are some null values in the data (mainly in the Ratings feature). In addition, most of the numeric features, such as number of "Reviews", "Size", number of "Installs", are labeled as *object* types instead of numeric types like *int* or *float*. We'll need to cast these features to the correct numeric types.
```
# convert the Size column to an integer type using these provided functions
#scaling and cleaning "Size" (of app) column
def change_size(size):
if 'M' in size:
x = size[:-1]
x = float(x)*1000000
return(x)
elif 'k' == size[-1:]:
x = size[:-1]
x = float(x)*1000
return(x)
else:
return None
apps_data["Size"] = apps_data["Size"].map(change_size)
#filling Size which had NA
apps_data.Size.fillna(method = 'ffill', inplace = True)
apps_data.head()
```
After analyzing the data, we found that the Installs feature was defined as a object type because most rows contained string characters like "+" and ",". Let's remove those characters and remove any mislabeled data rows (rows that don't contain numbers).
```
# clean and convert the "Installs" column to int
apps_data['Installs'] = apps_data['Installs'].str.replace(',', '')
apps_data['Installs'] = apps_data['Installs'].str.replace('+', '')
# remove mislabeled row
apps_data = apps_data.loc[~apps_data['Installs'].str.contains('\D+'), :].reset_index(drop=True)
apps_data['Installs'] = apps_data['Installs'].astype(int)
apps_data.head()
```
Now we convert the Reviews feature to an integer type and remove the Price feature, since keeping it would be cheating (the model would learn that 0 means an app is free and any other value means an app is not).
```
# clean and convert the "Reviews" column to int
# fix mislabeled row in the "Reviews" column
# apps_data.loc[apps_data.Reviews.str.contains('M'), 'Reviews'] = int(apps_data.loc[apps_data.Reviews.str.contains('M'), 'Reviews'].values.tolist()[0][:-1])*1000000
apps_data['Reviews'] = apps_data['Reviews'].astype('int')
# remove price column so we don't cheat
apps_data = apps_data.drop(columns=['Price'])
# remove columns we won't be using in our analysis
apps_data.info()
```
Looking good. Now we need to *one-hot encode* the categorical features that have more than 2 categories, in this case, Category and Content Rating. We use the `get_dummies` method from Pandas to create multi-dimensional (one-hot encoded) vectors for each feature, and concatenate the vectors to the original dataframe using the `concat` method from Pandas.
```
# converting non-numeric columns to numeric columns using "one-hot encoding"
catgry = pd.get_dummies(apps_data['Category'],prefix='catg',drop_first=True)
cr = pd.get_dummies(apps_data['Content Rating'],prefix='cr',drop_first=True)
frames = [apps_data,catgry,cr]
apps_data=pd.concat(frames,axis=1)
apps_data = apps_data.drop(['Category', 'Content Rating'], axis=1)
apps_data.info()
```
Now we are ready to define the features $\mathbf{x}$ and labels $y$. We define the labels as the Type feature and encode. Additionally, we use every feature except App, Genres, Last Updated, Current Ver, and Android Ver for the input features. Finally, we prepare the training and test datasets for the model. In this step we introduce an semi-new concept *normalization*. Specifically, we will normalize the features (of non-categorical features) using their respective mean and standard deviations.
```
# select the x and y variables for the model
x = apps_data.loc[:, ~apps_data.columns.isin(['App', 'Genres', 'Last Updated', 'Current Ver', 'Android Ver'])].columns
y = 'Type'
apps_data = apps_data.drop(['App', 'Genres', 'Last Updated', 'Current Ver', 'Android Ver'], axis=1)
# drop rows with missing values
apps_data = apps_data.dropna()
# convert "Type" column to int before creating y variable
def convert_type_to_int(type):
if type == 'Free':
return 0
else:
return 1
apps_data[y] = apps_data[y].map(convert_type_to_int)
# normalize the non-categorical (not one-hot encoded) features
norm_cols = ['Rating', 'Reviews', 'Size', 'Installs']
means = apps_data[norm_cols].mean()
std = apps_data[norm_cols].std()
apps_data[norm_cols] -= means
apps_data[norm_cols] /= std
# split the dataset into a training set and a test set.
# we will use the test set in the final evaluation of our model.
train = apps_data.sample(frac=0.8, random_state=0)
test = apps_data.drop(train.index)
# separate the x (features) and y (labels) in the train/test datasets
train_features = train[x].values
test_features = test[x].values
train_labels = train[y].values.reshape(-1, 1)
test_labels = test[y].values.reshape(-1, 1)
print('train features shape:', train_features.shape)
print('train labels shape:', train_labels.shape)
print('test features shape:', test_features.shape)
print('test labels shape:', test_labels.shape)
print('first 5 test labels:\n', test_labels[:5])
```
The above code returns a training and test dataset. For context, the `training_features` array contains about 7.5k rows (samples) and 42 columns (features).
```
counts = np.bincount(train_labels[:, 0])
print(
"Number of positive samples (paid apps) in training data: {} ({:.2f}% of total)".format(
counts[1], 100 * float(counts[1]) / len(train_labels)
)
)
weight_for_0 = 1.0 / counts[0]
weight_for_1 = 1.0 / counts[1]
print('weight for free apps:', weight_for_0)
print('weight for paid apps:', weight_for_1)
```
## 3. Build the model
Now that the data is ready, we can build a model. We will use Tensorflow to define a simple logistic regression model (single-layer neural network) to predict the class of each app (free or not). Given a smaple with a corresponding prediction that is above 0.5, the model will assign the "paid" (1) category to it, otherwise it is categorized as "free".
We also define the loss function, optimization algorithm, and metrics and "glue" them together with the model using the `compile` method. We will use *binary cross-entropy* loss, *stochastic gradient descent*, and track the *accuracy* metric.
```
# build the linear model
model = keras.Sequential([
layers.Input((train_features.shape[-1],)), # the input layer (corresponds to the features)
layers.Dense(1, activation='sigmoid'), # the output layer with the sigmoid function (this layer contains the weights and a bias term)
], name='logistic_regression_model')
print('model summary')
print(model.summary())
# define the loss function, optimization algorithm, and metrics for the model
# and "glue" them all together
model.compile(loss=keras.losses.BinaryCrossentropy(),
optimizer=keras.optimizers.SGD(learning_rate=0.01),
metrics=['accuracy'])
```
## 4. Train the model
No it's time to train the model. We will train it for 100 *epochs* (iterations) with a *batch size* of 2048 (the number of training examples to evaluate prior to doing gradient descent), and record the training and validation metrics in the `history` object.
```
epochs = 100
batch_size = 2048
history = model.fit(train_features, train_labels,
epochs=epochs, validation_split=0.1,
batch_size=batch_size)
# create a dataframe to store the history
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print(hist.tail())
# visualize the mean squared error over the training process
hist.plot.line(x='epoch', y='val_accuracy');
```
As the above plot suggests, our model converges to optimal parameters around the 40th epoch. The validation accuracy peaks around ~92.3%.
## 5. Evaluate the model
Now that we trained our model, it's time to evaluate it using the test dataset, which we did not use when training the model. This gives us a sense of how well our model predicts unseen data, which is the case when we use it in the real world. We will use the `evaluate` method to test the model.
```
loss, accuracy = model.evaluate(test_features, test_labels)
print('Test set accuracy: {}%'.format(round(accuracy, 4)*100))
```
Wow! Our logistic regression model fit the Google Play Store data pretty well, correctly predicting whether an app was free or not around 93% to 94% of the time. However, the distribution of free apps to non-free apps in our datasets is is not balanced, this is called *class imbalance*. To give our results more context, we should check the *confusion matrix* to make sure the model wasn't just predicting every app as "free" since there are a lot more those than there are "paid" apps.
A confusion matrix indicates the number of correct predictions and incorrect predictions for each class. It is particularly useful whenever the data has an imbalanced representation of the classes. The diagonals of a confusion matrix indicate the correct predictions for each class, while the cross-diagonal indicates misclassified predictions. Below is an example of a binary classification confusion matrix.
<figure>
<img src='https://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix_files/confusion_matrix_1.png' width='35%'>
<figcaption>A basic confusion matrix</figcaption>
</figure>
```
from sklearn import metrics
test_predictions = (model.predict(test_features) > 0.5).astype('int32')
test_predictions
# measure the accuracy
model_acc = metrics.accuracy_score(test_labels, test_predictions)
print(f'logistic regression model accuracy: {round(model_acc*100, 2)}%')
# plot confusion matrix
labels = ['Free', 'Paid']
cm = metrics.confusion_matrix(test_labels, test_predictions)
print('confusion matrix:\n', cm)
plt.imshow(cm, cmap=plt.cm.Blues)
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.xticks([0, 1], [0, 1])
plt.yticks([0, 1], [0,1])
plt.title('Confusion matrix ')
plt.colorbar()
plt.show()
```
From the confusion matrix, we can deduce that the model isn't all that great. In fact, it resorted to predicting every app as free. There are a variety of ways to improve upon this issue, but we won't be covering them in this lesson. Nonetheless, it's important to be aware of misleading model results.
Let's take a look at a more comprehensive set of evaluation metrics: accuracy, precision, and recall. Precision indicates the model's ability to return only relevant instances. While recall indicates the model's ability to identify all relevant instances; and depending on our data we may want a higher precision score or vice versa. If your curious, here is an in-depth discussion about these metrics: [Beyond Accuracy: Precision and Recall](https://towardsdatascience.com/beyond-accuracy-precision-and-recall-3da06bea9f6c).
```
print("Accuracy: {}%".format(round(model_acc*100, 2)))
print("Precision:", metrics.precision_score(test_labels, test_predictions, zero_division=True))
print("Recall:" ,metrics.recall_score(test_labels, test_predictions, zero_division=True))
```
What's the take away from all this...
**Always, always contextualize the model's results.**
# Summary
- We use *one-hot encoding* to represent categorical data.
- Logistic regression is popular and foundational algorithm for classification in machine learning and deep learning (neural networks).
- The *sigmoid* logit function maps the input features to a probability distribution.
- Linear and logistic regression are very similar, they differ in two ways. First, the labels are continous numerical values in linear regression, while they are discrete numerical values (0 and 1) each representing a particular category. Second, logistic regression uses the sigmoid function to transform the input features into a probability space and the model learns the optimal parameters to maximize the probability of confidently predicting the correct class.
```
```
| github_jupyter |
# Feature Importance and Feature Selection With XGBoost
A benefit of using ensembles of decision tree methods like gradient boosting is that they can automatically provide estimates of feature importance from a trained predictive model.
Here you will discover how you can estimate the importance of features for a predictive modeling problem using the XGBoost library in Python.
By the end you will know:
- How feature importance is calculated using the gradient boosting algorithm?
- How to plot feature importance in Python calculated by the XGBoost model?
- How to use feature importance calculated by XGBoost to perform feature selection?
## Feature Importance in Gradient Boosting
A benefit of using gradient boosting is that after the boosted trees are constructed, it is relatively straightforward to retrieve importance scores for each attribute.
Generally, importance provides a score that indicates how useful or valuable each feature was in the construction of the boosted decision trees within the model. The more an attribute is used to make key decisions with decision trees, the higher its relative importance.
This importance is calculated explicitly for each attribute in the dataset, allowing attributes to be ranked and compared to each other.
Importance is calculated for a single decision tree by the amount that each attribute split point improves the performance measure, weighted by the number of observations the node is responsible for. The performance measure may be the purity (Gini index) used to select the split points or another more specific error function.
The feature importances are then averaged across all of the the decision trees within the model.
## Manually Plot Feature Importance
A trained XGBoost model automatically calculates feature importance on your predictive modeling problem.
These importance scores are available in the **feature_importances_** member variable of the trained model. For example, they can be printed directly as follows:
<pre>
print(model.feature_importances_)
</pre>
We can plot these scores on a bar chart directly to get a visual indication of the relative importance of each feature in the dataset. For example:
<pre>
# plot
pyplot.bar(range(len(model.feature_importances_)), model.feature_importances_)
pyplot.show()
</pre>
We can demonstrate this by training an XGBoost model on the Pima Indians onset of diabetes [dataset](https://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes) and creating a bar chart from the calculated feature importances.
```
# plot feature importance manually
from numpy import loadtxt
from xgboost import XGBClassifier
from matplotlib import pyplot
# load data
dataset = loadtxt('pima-indians-diabetes.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:8]
y = dataset[:,8]
# fit model no training data
model = XGBClassifier()
model.fit(X, y)
# feature importance
print(model.feature_importances_)
# plot
pyplot.bar(range(len(model.feature_importances_)), model.feature_importances_)
pyplot.show()
```
A downside of this plot is that the features are ordered by their input index rather than their importance. We could sort the features before plotting.
Thankfully, there is a built in plot function to help us.
## Using theBuilt-in XGBoost Feature Importance Plot
The XGBoost library provides a built-in function to plot features ordered by their importance.
The function is called **plot_importance()** and can be used as follows:
<pre>
# plot feature importance
plot_importance(model)
pyplot.show()
</pre>
For example, below is a complete code listing plotting the feature importance for the Pima Indians dataset using the built-in plot_importance() function.
```
# plot feature importance using built-in function
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from matplotlib import pyplot
# load data
dataset = loadtxt('pima-indians-diabetes.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:8]
y = dataset[:,8]
# fit model no training data
model = XGBClassifier()
model.fit(X, y)
# plot feature importance
plot_importance(model)
pyplot.show()
```
You can see that features are automatically named according to their index in the input array (X) from F0 to F7.
Manually mapping these indices to [names](https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.names) in the problem description, we can see that the plot shows F5 (body mass index) has the highest importance and F3 (skin fold thickness) has the lowest importance.
## Feature Selection with XGBoost Feature Importance Scores
Feature importance scores can be used for feature selection in scikit-learn.
This is done using the [SelectFromModel](http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectFromModel.html) class that takes a model and can transform a dataset into a subset with selected features.
This class can take a pre-trained model, such as one trained on the entire training dataset. It can then use a threshold to decide which features to select. This threshold is used when you call the **transform()** method on the **SelectFromModel** instance to consistently select the same features on the training dataset and the test dataset.
In the example below we first train and then evaluate an XGBoost model on the entire training dataset and test datasets respectively.
Using the feature importances calculated from the training dataset, we then wrap the model in a SelectFromModel instance. We use this to select features on the training dataset, train a model from the selected subset of features, then evaluate the model on the testset, subject to the same feature selection scheme.
For example:
<pre>
# select features using threshold
selection = SelectFromModel(model, threshold=thresh, prefit=True)
select_X_train = selection.transform(X_train)
# train model
selection_model = XGBClassifier()
selection_model.fit(select_X_train, y_train)
# eval model
select_X_test = selection.transform(X_test)
y_pred = selection_model.predict(select_X_test)
</pre>
For interest, we can test multiple thresholds for selecting features by feature importance. Specifically, the feature importance of each input variable, essentially allowing us to test each subset of features by importance, starting with all features and ending with a subset with the most important feature.
The complete code listing is provided below.
```
# use feature importance for feature selection
from numpy import loadtxt
from numpy import sort
from xgboost import XGBClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectFromModel
# load data
dataset = loadtxt('pima-indians-diabetes.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:8]
Y = dataset[:,8]
# split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=7)
# fit model on all training data
model = XGBClassifier()
model.fit(X_train, y_train)
# make predictions for test data and evaluate
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# Fit model using each importance as a threshold
thresholds = sort(model.feature_importances_)
for thresh in thresholds:
# select features using threshold
selection = SelectFromModel(model, threshold=thresh, prefit=True)
select_X_train = selection.transform(X_train)
# train model
selection_model = XGBClassifier()
selection_model.fit(select_X_train, y_train)
# eval model
select_X_test = selection.transform(X_test)
y_pred = selection_model.predict(select_X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Thresh=%.3f, n=%d, Accuracy: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0))
```
We can see that the performance of the model generally decreases with the number of selected features.
On this problem there is a trade-off of features to test set accuracy and we could decide to take a less complex model (fewer attributes such as n=4) and accept a modest decrease in estimated accuracy from 77.95% down to 76.38%.
This is likely to be a wash on such a small dataset, but may be a more useful strategy on a larger dataset and using cross validation as the model evaluation scheme.
| github_jupyter |
ERROR: type should be string, got "https://numpy.org/doc/stable/reference/arrays.datetime.html\n\n创建日期时间的最基本方法是使用ISO 8601日期或日期时间格式的字符串。 内部存储单位是从字符串形式中自动选择的,可以是日期单位或时间单位。\n日期单位是年('Y'),月('M'),周('W')和天('D'),而时间单位是小时('h'),分钟('m' ),秒(“ s”),毫秒(“ ms”)和一些其他基于SI前缀秒的单位。\ndatetime64数据类型还接受字符串“ NAT”(大小写字母的任意组合)作为“ Not A Time”值。\n\n```\nimport numpy as np\n# A simple ISO date:\n# Using months for the unit:\n# Specifying just the month, but forcing a ‘days’ unit:\n# From a date and time:\n# NAT (not a time):\nnp.datetime64('2005-02-25'),\\\nnp.datetime64('2005-02'),\\\nnp.datetime64('2005-02', 'D'),\\\nnp.datetime64('2005-02-25T03:30'),\\\nnp.datetime64('nat')\nnp.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64'),\\\nnp.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype='datetime64')\n# 一个月的数据\nnp.arange('2005-02', '2005-03', dtype='datetime64[D]')\nnp.datetime64('2005') == np.datetime64('2005-01-01'),\\\nnp.datetime64('2010-03-14T15Z') == np.datetime64('2010-03-14T15:00:00.00Z')\n```\n\n## 日期时间和时间增量算法\n\n```\nnp.timedelta64(1, 'D'),\\\nnp.timedelta64(4, 'h'),\\\nnp.datetime64('2009-01-01') - np.datetime64('2008-01-01'),\\\nnp.datetime64('2009') + np.timedelta64(20, 'D'),\\\n np.datetime64('2011-06-15T00:00') + np.timedelta64(12, 'h'),\\\n np.timedelta64(1,'W') / np.timedelta64(1,'D'),\\\n np.timedelta64(1,'W') % np.timedelta64(10,'D'),\\\n np.datetime64('nat') - np.datetime64('2009-01-01'),\\\n np.datetime64('2009-01-01') + np.timedelta64('nat')\n```\n\n日期单位:\n\n| Code | Meaning | Time span \\(relative\\) | Time span \\(absolute\\) |\n|------|---------|------------------------|----------------------------|\n| Y | year | \\+/\\- 9\\.2e18 years | \\[9\\.2e18 BC, 9\\.2e18 AD\\] |\n| M | month | \\+/\\- 7\\.6e17 years | \\[7\\.6e17 BC, 7\\.6e17 AD\\] |\n| W | week | \\+/\\- 1\\.7e17 years | \\[1\\.7e17 BC, 1\\.7e17 AD\\] |\n| D | day | \\+/\\- 2\\.5e16 years | \\[2\\.5e16 BC, 2\\.5e16 AD\\] |\n\n时间单位:\n\n| Code | Meaning | Time span \\(relative\\) | Time span \\(absolute\\) |\n|------|-------------|------------------------|----------------------------|\n| h | hour | \\+/\\- 1\\.0e15 years | \\[1\\.0e15 BC, 1\\.0e15 AD\\] |\n| m | minute | \\+/\\- 1\\.7e13 years | \\[1\\.7e13 BC, 1\\.7e13 AD\\] |\n| s | second | \\+/\\- 2\\.9e11 years | \\[2\\.9e11 BC, 2\\.9e11 AD\\] |\n| ms | millisecond | \\+/\\- 2\\.9e8 years | \\[ 2\\.9e8 BC, 2\\.9e8 AD\\] |\n| us | microsecond | \\+/\\- 2\\.9e5 years | \\[290301 BC, 294241 AD\\] |\n| ns | nanosecond | \\+/\\- 292 years | \\[ 1678 AD, 2262 AD\\] |\n| ps | picosecond | \\+/\\- 106 days | \\[ 1969 AD, 1970 AD\\] |\n| fs | femtosecond | \\+/\\- 2\\.6 hours | \\[ 1969 AD, 1970 AD\\] |\n| as | attosecond | \\+/\\- 9\\.2 seconds | \\[ 1969 AD, 1970 AD\\] |\n\n\n工作日功能:\nbusday功能的默认设置是唯一有效的日期是星期一至星期五(通常的工作日)。\n该实现基于包含7个布尔值标志的“周掩码”,以指示有效日期; 可以使用自定义周掩码来指定其他有效日期集。\n\n“ busday”功能还可以检查“假期”列表,这些日期不是有效的日期。\n\n功能busday_offset使您可以将工作日中指定的偏移量以“ D”(天)为单位应用于日期时间。\n\n```\nnp.busday_offset('2011-06-23', 1),\\\nnp.busday_offset('2011-06-23', 2),\\\nnp.busday_offset('2011-06-25', 0, roll='forward'),\\\nnp.busday_offset('2011-06-25', 2, roll='forward'),\\\nnp.busday_offset('2011-06-25', 0, roll='backward'),\\\nnp.busday_offset('2011-06-25', 2, roll='backward'),\\\nnp.busday_offset('2011-03-20', 0, roll='forward'),\\\nnp.busday_offset('2011-03-22', 0, roll='forward'),\\\nnp.busday_offset('2011-03-20', 1, roll='backward'),\\\nnp.busday_offset('2011-03-22', 1, roll='backward'),\\\nnp.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')\n\"\"\"\n# Positional sequences; positions are Monday through Sunday.\n# Length of the sequence must be exactly 7.\nweekmask = [1, 1, 1, 1, 1, 0, 0]\n# list or other sequence; 0 == invalid day, 1 == valid day\nweekmask = \"1111100\"\n# string '0' == invalid day, '1' == valid day\n\n# string abbreviations from this list: Mon Tue Wed Thu Fri Sat Sun\nweekmask = \"Mon Tue Wed Thu Fri\"\n# any amount of whitespace is allowed; abbreviations are case-sensitive.\nweekmask = \"MonTue Wed Thu\\tFri\"\n\"\"\"\na = np.arange(np.datetime64('2011-07-11'), np.datetime64('2011-07-18'))\n\nnp.is_busday(np.datetime64('2011-07-15')),\\\n np.is_busday(np.datetime64('2011-07-16')),\\\n np.is_busday(np.datetime64('2011-07-16'), weekmask=\"Sat Sun\"),\\\n np.is_busday(a)\na = np.arange(np.datetime64('2011-07-11'), np.datetime64('2011-07-18'))\n\nnp.busday_count(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')),\\\n np.busday_count(np.datetime64('2011-07-18'), np.datetime64('2011-07-11')),\\\n np.count_nonzero(np.is_busday(a))\n```\n\n" | github_jupyter |
# Section 1.2 Model Fitting
```
import pymc3 as pm
import numpy as np
import arviz as az
import matplotlib.pyplot as plt
az.style.use('arviz-white')
```
## Activity 1: Estimate the Proportion of Water
Now it's your turn to work through an example inspired from Richard McElreath's excellent book [Statistical Rethinking](https://www.amazon.com/Statistical-Rethinking-Bayesian-Examples-Chapman/dp/1482253445/)
### How much of a planet is covered in water?
Good news: you're an astronomer that just discovered a new planet. Bad news: your telescope has a small field of view and you can only see one tiny point on the planet at a time. More bad news: you're also a starving grad student and you can only take 5 measurements on your monthly stipend.
**With 5 measurements what is your estimate for how much of the planet is covered in water?**
You are trying to estimate $\theta$ where
$$\theta = \text{Proportion of water on the planet}$$
Your model is formulated as follows
$$
\theta \sim \operatorname{Uniform}(0,1) \\
p_{\text{water}} \sim \operatorname{Binom}(\theta, N)
$$
(Note: the probability density function for $\operatorname{Uniform}(0, 1)$ is the same as for $\operatorname{Beta}(1, 1)$)
### Exercise 1
* What is the prior in this model? What does the prior intuitively mean?
$\theta \sim \operatorname{Uniform}(0,1) $
This means that prior to seeing any data we think that planet could have no surface water, be all water, or anything in between with equal probability. We just have no idea how much of the surface is water we just know it has to be somewhere between 0% and 100%.
In other words our prior is ¯\\\_(ツ)_/¯
### Exercise 2
* What is the likelihood in the model? What does the likelihood intuitively mean?
$p_{\text{water}} \sim \operatorname{Binom}(\theta, N)$
The likelihood is our Binomial model. This one is trickier, what it means is given our observations, how likely is a particular proportion of water. Remember here that $\theta$ is not just one number but a distribution of numbers.
### Exercise 3
Using the data provided below fit your model to estimate the proportion of water on the planet using PyStan or PyMC3. We have provided the PyMC3 model but please feel free to use the PPL you're more comfortable with.
After the fitting the model and plotting the posterior how "certain" are you about the proportion of water on this planet?
```
# A value of 0 signifies a land observation, a value of 1 signifies a water observation
observations = [0, 0, 1, 0, 1]
water_observations = sum(observations)
total_observations = len(observations)
with pm.Model() as planet_model:
# Prior
p_water = pm.Uniform("p_water", 0 ,1)
# Likelihood
w = pm.Binomial("w", p=p_water, n=total_observations, observed=water_observations)
# Inference Run/ Markov chain Monte Carlo
trace_5_obs = pm.sample(5000, chains=2)
az.plot_posterior(trace_5_obs)
```
### Exercise 4: Collect more data and get a new posterior
With some extra funding you're now able make 500 observations of this planet.
Using your inution, with more observations will you be more or less certain about the amount of water on a planet?
Do the results match your expectations?
```
one_hundred_times_the_observations = [0, 0, 1, 0, 1]*100
water_observations = sum(one_hundred_times_the_observations)
total_observations = len(one_hundred_times_the_observations)
with pm.Model() as planet_model_2:
p_water = pm.Uniform("p_water", 0 ,1)
w = pm.Binomial("w", p=p_water, n=total_observations, observed=water_observations)
trace_more_obs = pm.sample(5000, chains=2)
az.plot_posterior(trace_more_obs)
```
### Exercise 5: A new planet
During your research you encountered a new planet. Unfortunately you once again were only able to take 5 measurements, but in those 5 you only observed land, and no water.
Fit your model and see if the results look any different
```
desert_observations = [0, 0, 0, 0, 0]
water_observations = sum(desert_observations)
total_observations = len(desert_observations)
with pm.Model() as planet_model_3:
p_water = pm.Uniform("p_water", 0 ,1)
w = pm.Binomial("w", p=p_water, n=total_observations, observed=water_observations)
trace_new_planet = pm.sample(5000, chains=2)
az.plot_posterior(trace_new_planet)
```
### Exercise 5: Try out a couple ArviZ functions
Convert your inference data to `az.InferenceData` using the methods `az.from_pymc3` or `az.from_stan`. Then try a couple plots such as
* az.plot_trace
* az.plot_autocorr
* az.plot_forest
Try running a summary function such as
* az.summary
For now don't worry too much about how to interpret these plots and statistics. This will be covered as we continue in the tutorial.
```
# Convert PPL data to az.InferenceData
inference_data = az.from_pymc3(trace_5_obs)
az.summary(inference_data)
az.plot_trace(inference_data)
az.plot_autocorr(inference_data)
az.plot_forest([trace_5_obs, trace_more_obs, trace_new_planet], model_names=["5_observations", "100_observations", "new_planet"])
```
## Bonus:
Explore the ArviZ documentation to see what else is possible.
https://arviz-devs.github.io/arviz/
| github_jupyter |
```
!pip install scikit-learn==1.0.2 statsmodels yellowbrick python-slugify sagemaker==2.88.0 s3fs
```
# Data cleaning and Feature engineering
```
import os
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as tls
from slugify import slugify
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
import datetime as dt
churn_data = pd.read_csv("/content/telco-customer-churn.csv")
churn_data['TotalCharges'] = churn_data["TotalCharges"].replace(" ",np.nan)
churn_data = churn_data[churn_data["TotalCharges"].notnull()]
churn_data = churn_data.reset_index()[churn_data.columns]
churn_data["TotalCharges"] = churn_data["TotalCharges"].astype(float)
def tenure_label(churn_data) :
if churn_data["tenure"] <= 24 :
return "0-24"
elif (churn_data["tenure"] > 24) & (churn_data["tenure"] <= 48) :
return "24-48"
elif churn_data["tenure"] > 48:
return "48-end"
churn_data["tenure_group"] = churn_data.apply(lambda churn_data:tenure_label(churn_data),
axis = 1)
replace_cols = [ 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection',
'TechSupport','StreamingTV', 'StreamingMovies']
for i in replace_cols :
churn_data[i] = churn_data[i].replace({'No internet service' : 'No'})
churn_data.sample(5)
churn_data.nunique()
bin_cols = churn_data.nunique()[churn_data.nunique() == 2].keys().tolist()
le = LabelEncoder()
for i in bin_cols :
churn_data[i] = le.fit_transform(churn_data[i])
all_categorical_cols = churn_data.nunique()[churn_data.nunique() <=4].keys().tolist()
multi_value_cols = [col for col in all_categorical_cols if col not in bin_cols]
churn_data = pd.get_dummies(data = churn_data, columns=multi_value_cols)
numerical_cols = ['tenure', 'MonthlyCharges', 'TotalCharges']
std = StandardScaler()
churn_data[numerical_cols] = std.fit_transform(churn_data[numerical_cols])
churn_data.columns = [slugify(col, lowercase=True, separator='_') for col in churn_data.columns]
sample = churn_data.head()
sample[['customerid', 'gender', 'seniorcitizen', 'partner', 'dependents',
'tenure', 'phoneservice', 'onlinesecurity', 'onlinebackup',
'deviceprotection', 'techsupport', 'streamingtv']]
sample[['streamingmovies', 'paperlessbilling', 'monthlycharges', 'totalcharges', 'churn',
'multiplelines_no', 'multiplelines_no_phone_service',
'multiplelines_yes', 'internetservice_dsl']]
sample[['internetservice_fiber_optic','internetservice_no',
'contract_month_to_month', 'contract_one_year', 'contract_two_year',
'paymentmethod_bank_transfer_automatic']]
sample[['paymentmethod_credit_card_automatic','paymentmethod_electronic_check',
'paymentmethod_mailed_check', 'tenure_group_0_24', 'tenure_group_24_48',
'tenure_group_48_end']]
```
# Featue group creation and ingestion
```
# import os
# os.environ["AWS_ACCESS_KEY_ID"] = "<aws_key_id>"
# os.environ["AWS_SECRET_ACCESS_KEY"] = "<aws_secret>"
# os.environ["AWS_DEFAULT_REGION"] = "us-east-1"
import boto3
FEATURE_GROUP_NAME = "telcom-customer-features"
feature_group_exist = False
client = boto3.client('sagemaker')
response = client.list_feature_groups(
NameContains=FEATURE_GROUP_NAME)
if FEATURE_GROUP_NAME in response["FeatureGroupSummaries"]:
feature_group_exist = True
import sagemaker
from sagemaker.session import Session
import time
role = "arn:aws:iam::<account_number>:role/sagemaker-iam-role"
sagemaker_session = sagemaker.Session()
region = sagemaker_session.boto_region_name
s3_bucket_name = "feast-demo-mar-2022"
from sagemaker.feature_store.feature_group import FeatureGroup
customers_feature_group = FeatureGroup(
name=FEATURE_GROUP_NAME, sagemaker_session=sagemaker_session
)
churn_data["event_timestamp"] = float(round(time.time()))
if not feature_group_exist:
customers_feature_group.load_feature_definitions(
churn_data[[col
for col in churn_data.columns
if col not in ["customerid"]]])
customer_id_def = FeatureDefinition(feature_name='customerid',
feature_type=FeatureTypeEnum.STRING)
customers_feature_group.feature_definitions = [customer_id_def] + customers_feature_group.feature_definitions
customers_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{FEATURE_GROUP_NAME}",
record_identifier_name="customerid",
event_time_feature_name="event_timestamp",
role_arn=role,
enable_online_store=False
)
ingestion_results = customers_feature_group.ingest(churn_data, max_workers=1)
ingestion_results.failed_rows
```
| github_jupyter |
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
```
NAME = ""
COLLABORATORS = ""
```
---
<!--NOTEBOOK_HEADER-->
*This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);
content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).*
<!--NAVIGATION-->
< [RosettaAntibody Framework](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/12.01-RosettaAntibody-Framework-and-SimpleMetrics.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [RosettaCarbohydrates](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/13.00-RosettaCarbohydrates-Working-with-Glycans.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/12.02-RosettaAntibodyDesign-RAbD.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
# RosettaAntibodyDesign
## Notes
This tutorial will walk you through how to use `RosettaAntibodyDesign` in PyRosetta. You should also go through the parellel distribution workshop as you will most likely need to create many decoys for some of these design tasks. Note that we are using the XML interface to the code here for simplicity (and because I had a C++ workshop I am converting - truth be told). The code-level interface is as robust as the XML - but will require more knowledge use. You are welcome to play around with it - all functions have descriptions and all options are possible to change through code.
Grab a coffee, take a breath, and lets learn how to design some antibodies!
## Citation
[Rosetta Antibody Design (RAbD): A General Framework for Computational Antibody Design, PLOS Computational Biology, 4/27/2018](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006112)
Jared Adolf-Bryfogle, Oleks Kalyuzhniy, Michael Kubitz, Brian D. Weitzner, Xiaozhen Hu, Yumiko Adachi, William R. Schief, Roland L. Dunbrack Jr.
## Manual
The full RAbD manual can be found here: https://www.rosettacommons.org/docs/latest/application_documentation/antibody/RosettaAntibodyDesign
# Overview
__RosettaAntibodyDesign (RAbD)__ is a generalized framework for the design of antibodies, in which a user can easily tailor the run to their project needs. __The algorithm is meant to sample the diverse sequence, structure, and binding space of an antibody-antigen complex.__ An app is available, and all components can be used within RosettaScripts for easy scripting of antibody design and incorporation into other Rosetta protocols.
The framework is based on rigorous bioinformatic analysis and rooted very much on our [recent clustering](https://www.ncbi.nlm.nih.gov/pubmed/21035459) of antibody CDR regions. It uses the __North/Dunbrack CDR definition__ as outlined in the North/Dunbrack clustering paper. A new clustering paper will be out in the next year, and this new analysis will be incorporated into RAbD.
The supplemental methods section of the published paper has all details of the RosettaAntibodyDesign method. This manual serves to get you started running RAbD in typical use fashions.
# Algorithm
Broadly, the RAbD protocol consists of alternating outer and inner Monte Carlo cycles. Each outer cycle consists of randomly choosing a CDR (L1, L2, etc.) from those CDRs set to design, randomly choosing a cluster and then a structure from that cluster from the database according to the input instructions, and optionally grafting that CDR's structure onto the antibody framework in place of the existing CDR (__GraftDesign__). The program then performs N rounds of the inner cycle, consisting of sequence design (__SeqDesign__) using cluster-based sequence profiles and structural constraints, energy minimization, and optional docking. Each inner cycle structurally optimizes the backbone and repacks side chains of the CDR chosen in the outer cycle as well as optional neighbors in order to optimize interactions of the CDR with the antigen and other CDRs.
__Backbone dihedral angle (CircularHarmonic) constraints__ derived from the cluster data are applied to each CDR to limit deleterious structural perturbations. Amino acid changes are typically sampled from __profiles derived for each CDR cluster in PyIgClassify__. Conservative amino acid substitutions (according to the BLOSUM62 substitution matrix) may be performed when too few sequences are available to produce a profile (e.g., for H3). After each inner cycle is completed, the new sequence and structure are accepted according to the Metropolis Monte Carlo criterion. After N rounds within the inner cycle, the program returns to the outer cycle, at which point the energy of the resulting design is compared to the previous design in the outer cycle. The new design is accepted or rejected according to the Monte Carlo criterion.
If optimizing the antibody-antigen orientation during the design (dock), SiteConstraints are automatically used to keep the CDRs (paratope) facing the antigen surface. These are termed __ParatopeSiteConstraints__. Optionally, one can enable constraints that keep the paratope of the antibody around a target epitope (antigen binding site). These are called __ParatopeEpitopeSiteConstraints__ as the constraints are between the paratope and the epitope. The epitope is automatically determined as the interface residues around the paratope on input into the program, however, any residue(s) can be set as the epitope to limit unwanted movement and sampling of the antibody. See the examples and options below.
More detail on the algorithm can be found in the published paper.
# General Setup and Inputs
1. Antibody Design Database
This app requires the Rosetta Antibody Design Database. A database of antibodies from the original North Clustering paper is included in Rosetta and is used as the default . An updated database (which is currently updated bi-yearly) can be downloaded here: <http://dunbrack2.fccc.edu/PyIgClassify/>.
For C++, It should be placed in `Rosetta/main/database/sampling/antibodies/`. For PyRosetta, use the cmd-line option `antibody_database` and set it to the full path of the downloaded database within the `init()` function as you have done in the past. It is recommended to use this up-to-date database for production runs. For this tutorial, we will use the database within Rosetta.
2. Starting Structure
The protocol begins with the three-dimensional structure of an antibody-antigen complex. Designs should start with an antibody bound to a target antigen (however optimizing just the antibody without the complex is also possible). Camelid antibodies are fully supported. This structure may be an experimental structure of an existing antibody in complex with its antigen, a predicted structure of an existing antibody docked computationally to its antigen, or even the best scoring result of low-resolution docking a large number of unrelated antibodies to a desired epitope on the structure of a target antigen as a prelude to de novo design.
The program CAN computationally design an antibody to anywhere on the target protein, but it is recommended to place the antibody at the target epitope. It is beyond the scope of this program to determine potential epitopes for binding, however servers and programs exist to predict these. Automatic SiteConstraints can be used to further limit the design to target regions.
3. Model Numbering and Light Chain identification
The input PDB file must be renumbered to the AHo Scheme and the light chain gene must be identified. This can be done through the [PyIgClassify Server](http://dunbrack2.fccc.edu/pyigclassify/).
On input into the program, Rosetta assigns our CDR clusters using the same methodology as PyIgClassify. The RosettaAntibodyDesign protocol is then driven by a set of command-line options and a set of design instructions provided as an input file that controls which CDR(s) are designed and how. Details and example command lines and instruction files are provided below.
The gene of the light chain should always be set on the command-line using the option `-light_chain`, these are either lamda or kappa. PyIgClassify will identify the gene of the light chain.
For this tutorial, the starting antibody is renumbered for you.
4. Notes for Tutorial Shortening
Always set the option, `-outer_cycle_rounds` to 5 in order to run these examples quickly. The default is 25. We include this in our common options file that is read in by Rosetta at the start. We will only be outputting a single structure, but typical use of the protocol is with default settings of `-outer_cycle_rounds` and an `nstruct` of at least 1000, with 5000-10000 recommended for jobs that are doing a lot of grafting. For De-novo design runs, one would want to go even higher. Note that the Docking stage increases runtime significantly as well.
The total number of rounds is outer_cycle_rounds * nstruct.
5. General Notes
setenv PATH ${PATH}:${HOME}/rosetta_workshop/rosetta/main/source/tools
We will be using JSON output of the scorefile, as this is much easier to work with in python and pandas.
We use the option `-scorefile_format json`
All of our common options for the tutorial are in the common file that you will copy to your working directory.
Rosetta/PyRosetta will look for this file in your working directory or your home folder in the directory `$HOME/.rosetta/flags`.
See this page for more info on using rosetta with custom config files: <https://www.rosettacommons.org/docs/latest/rosetta_basics/running-rosetta-with-options#common-options-and-default-user-configuration>
All tutorials have generated output in `outputs/rabd` and their approximate time to finish on a single (core i7) processor.
```
# Notebook setup
import sys
if 'google.colab' in sys.modules:
!pip install pyrosettacolabsetup
import pyrosettacolabsetup
pyrosettacolabsetup.setup()
print ("Notebook is set for PyRosetta use in Colab. Have fun!")
```
**Make sure you are in the directory with the pdb files:**
`cd google_drive/My\ Drive/student-notebooks/`
```
from typing import *
import pandas
from pathlib import Path
import json
import re
#Functions we will be using. I like to collect any extra functions at the top of my notebook.
def load_json_scorefile(file_path: Path, sort_by: str="dG_separated") -> pandas.DataFrame:
"""
Read scorefile lines as a dataframe, sorted by total_score with Nan's correctly replaced.
"""
local_lines = open(file_path, 'r').readlines()
decoys=[]
for line in local_lines:
o = json.loads(line.replace("nan", "NaN"))
# print o[self.decoy_field_name]
# print repr(o)
decoys.append(o)
local_df = pandas.DataFrame.from_dict(decoys)
local_df = local_df.infer_objects()
# df.to_csv("debugging.csv", sep=",")
local_df = local_df.sort_values(sort_by, ascending=True)
return local_df
def drop_cluster_columns(local_df: pandas.DataFrame, keep_cdrs: List[str]=None) -> pandas.DataFrame:
"""
Drop cluster columns that RAbD outputs to make it easier to work with the dataframe.
"""
to_drop = []
for column in local_df.columns:
if re.search("cdr_cluster", column):
skip=False
if (keep_cdrs):
for cdr in keep_cdrs:
if re.search(cdr, column):
skip=True
break
if not skip:
to_drop.append(column)
return local_df.drop(columns=to_drop)
```
## Imports
```
#Python
from pyrosetta import *
from pyrosetta.rosetta import *
from pyrosetta.teaching import *
import os
#Core Includes
from rosetta.protocols.rosetta_scripts import *
from rosetta.protocols.antibody import *
from rosetta.protocols.antibody.design import *
from rosetta.utility import *
```
## Intitlialization
Since we are sharing the working directory with all other notebooks, instead of using the common-configuration we spoke about in the introduction, we will be using the flags file located in the inputs directory.
```
init('-no_fconfig @inputs/rabd/common')
#Import a pose
pose = pose_from_pdb("inputs/rabd/my_ab.pdb")
original_pose = pose.clone()
```
# Tutorial
## Tutorial A: General Design
In many of these examples, we will use the xml interface to PyRosetta for simplicity with the AntibodyDesignMover - which is the actual C++ application as a mover. <https://www.rosettacommons.org/docs/latest/scripting_documentation/RosettaScripts/Movers/movers_pages/antibodies/AntibodyDesignMover>
Lets copy the files we need first:
cp ../inputs/rabd/color_cdrs.pml .
cp ../inputs/rabd/rabd.xml .
You are starting design on a new antibody that is not bound to the antigen in the crystal. This is difficult and risky, but we review how one could go about this anyway. We start by selecting a framework. Here, we use the trastuzumab framework as it expresses well, is thermodynamically stable with a Tm of 69.5 degrees, and has been shown repeatedly that it can tolerate CDRs of different sequence and structure. Note that the energy of the complex is high as we are starting from a manual placement of the antibody to antigen. If we relax the structure too much, we will fall into an energy well that is hard to escape without significant sampling.
We are using an arbitrary protein at an arbitrary site for design. The PDB of our target is 1qaw. 1qaw is an oligomer of the TRP RNA-Binding Attenuation Protein from Bacillus Stearothermophilus. It is usually a monomer/dimer, but at its multimeric interface is a tryptophan residue by itself.
It's a beautiful protein, with a cool mechanism. We will attempt to build an antibody to bind to two subunits to stabilize the dimeric state of the complex in the absence of TRP. Note that denovo design currently takes a large amount of processing power. Each tutorial below is more complex than the one before it. The examples we have for this tutorial are short runs to show HOW it can be done, but more outer_cycle_rounds and nstruct would produce far better models than the ones you will see here - as we will need to sample the relative orientation of the antibody-antigen complex through docking, the CDR clusters and lengths, the internal backbone degrees of freedom of the CDRs, as well as the sequence of the CDRs and possibly the framework. As you can tell, just the sampling problem alone is difficult. However, this will give you a basis for using RAbD on your own.
### Tut A1. Sequence Design
Using the application is as simple as setting the `-seq_design_cdrs` option.
This simply designs the CDRs of the heavy chain using cdr profiles if they exist for those clusters during flexible-backbone design. If the clusters do not exist (as is the case for H3 at the moment), we use conservative design by default. Note that InterfaceAnalyzer is run on each output decoy in the RAbD mover. Note that you can also set `light_chain` on the command line if you are only working on a single PDB through the rosetta run.
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3" light_chain="kappa"/>
This will take a about a minute (50 seconds on my laptop). Output structures and scores are in `outputs/rabd` if you wish to copy them over - these include 4 more structures.
```
rabd = XmlObjects.static_get_mover('<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3" light_chain="kappa"/>')
if not os.getenv("DEBUG"):
rabd.apply(pose)
```
Now, for the sake of learning how to do this - how would we do this in code instead of the XML - we just need to use setters.
```
pose = original_pose.clone()
ab_info = AntibodyInfo(pose) #We don't need to supply scheme and definition since we do so in the flags file.
rabd2 = AntibodyDesignMover(ab_info)
cdrs = vector1_protocols_antibody_CDRNameEnum()
cdrs.append(l1)
cdrs.append(l3)
rabd2.set_seq_design_cdrs(cdrs)
rabd2.set_light_chain("kappa")
if not os.getenv("DEBUG"):
rabd2.apply(pose)
```
Score the input pose using the InterfaceAnalayzerMover
```
from rosetta.protocols.analysis import InterfaceAnalyzerMover
if not os.getenv("DEBUG"):
iam = InterfaceAnalyzerMover("LH_ABCDEFGIJKZ")
iam.set_pack_separated(True)
iam.apply(pose)
iam.apply(original_pose)
dg_term = "dG_separated"
print("dG Diff:", pose.scores[dg_term] - original_pose[dg_term])
```
Has the energy gone down after our sequence design? The `dG_separated` is calculated by scoring the complex, separating the antigen from the antibody, repacking side-chains at the interface, and then taking the difference in score - i.e. the dG.
Lets take a look at scores from a previous run of 5 antibodies. The scorefiles are in json format, so it will be easy to turn them into pandas Dataframes and do some cool stuff. We'll do this often as the runtimes increase for our protocol - but all the scores in them can be accessed using the pose.scores attribute (which is PyRosetta-specific functionality.)
Are any of these better than our input pose?
```
df = load_json_scorefile("expected_outputs/rabd/tutA1_score.sc")
df = drop_cluster_columns(df, keep_cdrs=["L1", "L3"])
df
```
### Tut A2. Graft Design
Now we will be enabling graft design AND sequence design on L1 and L3 loops. With an nstruct (n decoys) of 5, we are doing 25 design trials total - IE 25 actual grafts.
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3" graft_design_cdrs="L1,L3">
This will take a about 2-3 times as long as sequence design, as grafting a non-breaking loop takes time. This was 738 seconds on my laptop to generate 5. Here, you will generate 1 at about 150 seconds Ouptut structures and scores are in ../expected_outputs/rabd.
Typically, we require a much higher `-outer_cycle_rounds` and number of decoys to see anything significant. Did this improve energies in your single antibody? How about our pre-generated ones? Load and take a look at the scorefile as a pandas DataFrame as we did above (`expected_outputs/rabd/tutA2_score.sc`).
```
# YOUR CODE HERE
raise NotImplementedError()
```
Lets merge these dataframes, sort by dG_separated, and see if any of our graft-design models did better.
```
df_tut_a12 = pandas.concat([df, df_a2], ignore_index=True).sort_values("dG_separated", ascending=True)
df_tut_a12
```
Take a look at the lowest (dG) scoring pose in pymol - do you see any difference in L1 and L3 loops there? Do they make better contact than what we had before?
Lets take a look in pymol.
pymol inputs/rabd/my_ab.pdb inputs/rabd/tutA2_*
@color_cdrs.pml
center full_epitope
How different are the L1 and L3 loops? Have any changed length?
Lets take a look at the clusters in our dataframe. Have they changed from the native?
```
if not os.getenv("DEBUG"):
print("L1", original_pose.scores["cdr_cluster_ID_L1"])
print("L3", original_pose.scores["cdr_cluster_ID_L3"])
```
### Tut A3. Basic De-novo run
Here, we want to do a denovo-run (without docking), starting with random CDRs grafted in - instead of whatever we have in the antibody to start with (only for the CDRs that are actually undergoing graft-design). This is useful, as we start the design with very high energy and work our way down. Note that since this is an entirely new interface for our model protein, this interface is already at a very high energy - and so this is less needed, but it should be noted how to do this. (139 seconds on my laptop). Do this below as you have done in other tutorials - either through code or XML.
<AntibodyDesignMover name="RAbD" graft_design_cdrs="L1,L3" seq_design_cdrs="L1,L3"
random_start="1"/>
```
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
```
Would starting from a random CDR help anywhere? Perhaps if you want an entirely new cluster or length to break a patent or remove some off target effects? We will use it below to start de novo design with docking.
### Tut A4. RAbD Framework Components
This tutorial will give you some exprience with an antibody design protocol using the `RosettaAntibdyDesign` components. We will take the light chain CDRs from a malaria antibody and graft them into our antibody. In the tutorial we are interested in stabilizing the grafted CDRs in relation to the whole antibody, instead of interface design to an antigen.
We will graft the CDRs in, minimize the structure with CDR dihedral constraints (that use the CDR clusters) to not purturb the CDRs too much, and then design the framework around the CDRs while designing the CDRs and neighbors. The result should be mutations that better accomodate our new CDRs. This can be useful for humanizing potential antibodies or framework switching, where we want the binding properties of certain CDRs, but the stability or immunological profile of a different framework.
We are using an XML here for simplicity - all components are available in PyRosetta, but harder to setup.
#### 1. Copy the Files
cp ../inputs/rabd/ab_design_components.xml .
cp ../inputs/rabd/malaria_cdrs.pdb .
Take a look at the xml.
- We are using the `AntibodyCDRGrafter` to do the grafting of our CDRs.
- We then add constraints using `CDRDihderalConstraintMovers` for each CDR with use the CDR clusters determinedy by RosettaAntibody to keep from perturbing the CDRs too much.
- Finally, we do a round of pack/min/pack using the `RestrictToCDRsAndNeighborsOperation` and the `CDRResidueSelector`. This task operation controls what we pack and design. It first limits packing and design to only the CDRs and its neighbors. By specifying the `design_framework=1` option we allow the neighbor framework residues to design, while the CDRs and antigen neighbors will only repack. If we wanted to disable antigen repacking, we would pass the _DisableAntibodyRegionOperation_ task operation. Using this, we can specify any antibody region as `antibody_region`, `cdr_region`, or `antigen_region` and we can disable just design or both packing and design.
These task operations allow us to chisel exactly what we want to design in antibody, sans a residue-specific resfile (though we could combine these with one of them!). All of these tools are available in-code. If you've done the design workshop, you will know how to use them here. Checkout `rosetta.protocols.antibody.task_operations` for a list of them.
Finally, we use the new SimpleMetric system to obtain our final sequence of the CDRs to compare to our native antibody as well as pymol selections of our CDRs - which you have been introduced to in the previous tutorial.
##### PyRosetta Locations
`rosetta.protocols.antibody.task_operations`
`rosetta.protocols.antibody.constraints`
`rosetta.protocols.antibody.residue_selectors`
##### Documentation
- <https://www.rosettacommons.org/docs/latest/scripting_documentation/RosettaScripts/SimpleMetrics/SimpleMetrics>
- More Documentation is available here:
- <https://www.rosettacommons.org/docs/latest/scripting_documentation/RosettaScripts/RosettaScripts>
- <https://www.rosettacommons.org/docs/latest/scripting_documentation/RosettaScripts/TaskOperations/TaskOperations-RosettaScripts#antibody-and-cdr-specific-operations>
- <https://www.rosettacommons.org/docs/latest/scripting_documentation/RosettaScripts/Movers/Movers-RosettaScripts#antibody-modeling-and-design-movers>
#### 2. Run the protocol or copy the output (357 seconds).
#### 3. Look at the score file as you have before. Are the sequences different between what we started with? How about the interaction energies?
```
os.system('cp inputs/rabd/malaria_cdrs.pdb .')
if not os.getenv("DEBUG"):
pose = original_pose.clone()
parser = RosettaScriptsParser()
protocol = parser.generate_mover_and_apply_to_pose(pose, "inputs/rabd/ab_design_components.xml")
protocol.apply(pose)
```
### Challenge: Custom Design Protocol in code
If you want a challenge - try to set these up in-code without RosettaScripts. It can be tricky - which is why I made PyRosetta finally work optionally with RosettaScripts. Its good to know how to use both.
```
# YOUR CODE HERE
raise NotImplementedError()
```
## Tutorial B: Optimizing Interface Energy (opt-dG)
### Tut B1. Optimizing dG
Here, we want to set the protocol to optimize the interface energy during Monte Carlo instead of total energy. The interface energy is calculated by the InterfaceAnalyzerMover through a specialized MonteCarlo object called `MonteCarloInterface`. This is useful to improve binding energy and generally results in better interface energies . Resulting models should still be pruned for high total energy. This was benchmarked in the paper, and has been used for real-life designs to the HIV epitope (165 seconds for 1 decoy).
Use the provided XML or set this up through code.
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3" graft_design_cdrs="L1,L3" mc_optimize_dG="1" />
```
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
```
Load the scorefile with nstruct=5 from `expected_outputs/rabd/tutB1_score.sc`
Compare this data to tutorial A2. Are the interface energies better? Has the Shape Complementarity improved (sc score) improved?
### Tut B2. Optimizing Interface Energy and Total Score (opt-dG and opt-E)
Here, we want to set the protocol to optimize the interface energy during Monte Carlo, but we want to add some total energy to the weight. Because the overall numbers of total energy will dominate the overall numbers, we only add a small weight for total energy. This has not been fully benchmarked, but if your models have very bad total energy when using opt-dG - consider using it. (178 sec for 1 nstruct)
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3" graft_design_cdrs="L1,L3" mc_optimize_dG="1" mc_total_weight=".01 mc_interface_weight=".99 light_chain="kappa"/>
```
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
```
Use the scorefile from an nstruct=5 run to compare total energies (`total_score`) of this run vs the one right before it located at `expected_outputs/rabd/tutB2_score.sc`. Are the total scores better?
## Tutorial C: Towards DeNovo Design: Integrated Dock/Design
This tutorial takes a long time to run as docking is fairly slow - even with the optimizations that are part of RAbD. PLEASE USE THE PREGENERATED OUTPUT. The top 10 designs from each tutorial and associated scorefiles of a 1000 nstruct cluster run are in the output directory. Note that we are starting these tutorials with a pre-relaxed structure in order to get more reliable rosetta energies. Since we are running a large nstruct, we will escape the local energy well that this leads us into.
### Tut C1. RosettaDocking
In this example, we use integrated RosettaDock (with sequence design during the high-res step) to sample the antibody-antigen orientation, but we don't care where the antibody binds to the antigen. Just that it binds. IE - No Constraints. The RAbD protocol always has at least Paratope SiteConstraints enabled to make sure any docking is contained to the paratope (like most good docking programs).
This takes too long to run, so PLEASE USE THE OUTPUT GENERATED FOR YOU. We will use opt-dG here and for these tutorials, we will be sequence-designing all cdrs to begin to create a better interface. Note that sequence design happens whereever packing occurs - Including during high-resolution docking.
<AntibodyDesignMover name="RAbD" mc_optimize_dG="1" do_dock="1" seq_design_cdrs="L1,L2,L3,H1,H2,H3" graft_design_cdrs="L1,L2,L3,H1,H2" light_chain="kappa"/>
**Use pymol to load the files, and load tutC1_score from the expected_outputs directory as a pandas dataframe.**
pymol my_ab.pdb expected_outputs/rabd/top10_C1/*
@color_cdrs.pml
center full_epitope
Where is the antibody in the resulting designs? Are the interfaces restricted to the Paratope? Has the epitope moved relative to the starting interface?
### Tut C2. Auto Epitope Constraints
Allow Dock-Design, incorporating auto-generated SiteConstraints to keep the antibody around the starting interface residues. These residues are determined by being within 6A to the CDR residues (This interface distance can be customized). Again, these results are provided for you.
<AntibodyDesignMover name="RAbD" mc_optimize_dG="1" do_dock="1" use_epitope_csts="1"
seq_design_cdrs="L1,L2,L3,H1,H2,H3" graft_design_cdrs="L1,L2,L3,H1,H2" light_chain="kappa"/>
**Use pymol to load the files and checkout the scores in `expected_outputs/rabd/tutC2_score.sc` as before.**
pymol my_ab.pdb expected_outputs/rabd/top10_C2/*
@color_cdrs.pml
center full_epitope
How do these compare with with the previous tutorial? Are the antibodies closer to the starting interface? Are the scores better?
### Tut C3. Specific Residue Epitope Constraints
Allow Dock-Design, as above, but specify the Epitope Residues and Paratope CDRs to guide the dock/design to have these interact.
For now, we are more focused on the light chain. We could do this as a two-stage process, where we first optimize positioning and CDRs of the light chain and then the heavy chain or simply add heavy chain CDRs to the paratope CDRs option.
<AntibodyDesignMover name="RAbD" mc_optimize_dG="1" do_dock="1" use_epitope_csts="1"
epitope_residues="38J,52J,34K,37K" paratope_cdrs="L1,L3"
seq_design_cdrs="L1,L2,L3,H1,H2,H3" graft_design_cdrs="L1,L2,L3,H1,H2" light_chain="kappa"/>
**Again, load these into Pymol and take a look at the scorefile in a dataframe.**
pymol my_ab.pdb expected_outputs/rabd/top10_C3/*
@color_cdrs.pml
center full_epitope
Now that we have specified where we want the interface to be and are additionally designing more CDRS, how do the enegies compare? Are we starting to get a decent interface with the lowest energy structure?
How do these compare with the previous runs?
## Tutorial D: Advanced Settings and CDR Instruction File Customization
Once again, all output files are in `expected_outputs`. Please use these if you want - as many of these take around 10 minutes to run.
### Tut D1. CDR Instruction File
More complicated design runs can be created by using the Antibody Design Instruction file. This file allows complete customization of the design run. See below for a review of the syntax of the file and possible customization. An instruction file is provided where we use conservative design on L1 and graft in L1, H2, and H1 CDRs at a longer length to attempt to create a larger interface area. More info on instruction file syntax can be found at the end of this tutorial. (150 seconds on my laptop for nstruc 1)
cp ../inputs/my_instruction_file.txt .
cp ../inputs/default_cdr_instructions.txt .
Take a look at the default CDR instructions. These are loaded by default into Rosetta. There is syntax examples at the end of the file. Run the XML or attempt to use it in-code.
<AntibodyDesignMover name="RAbD" instruction_file="my_instruction_file.txt"
seq_design_cdrs="L1,L3,H1,H2,H3" graft_design_cdrs="L1,H2,H1" random_start="1" light_chain="kappa"/>
```
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
```
### Tut D2. Dissallow AAs and the Resfile
Here, we will disallow ANY sequence design into Proline residues and Cysteine residues, while giving a resfile to further LIMIT design and packing as specific positions. These can be given as 3 or 1 letter codes and mixed codes such as PRO and C are accepted. Note that the resfile does NOT turn any residues ON, it is simply used to optionally LIMIT design residue types and design and packing positions.
Resfile syntax can be found here: [https://www.rosettacommons.org/docs/wiki/rosetta_basics/file_types/resfiles] Note that specifying a resfile and dissalowing aa are only currently available as cmd-line options that are read by RAbD.
Runtime is less than a minute for nstruct 1.
cp ../inputs/rabd/my_resfile.resfile .
Take a look at the resfile. Can you describe what it is we are doing with it?
Unfortunately, at the moment, resfile setting is only available as a cmd-line option that needs to be set in the `init()` function as `-resfile my_resfile.resfile`
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3,H1,H2,H3" light_chain="kappa"/>
### Tut D3. Mintype
Here, we will change the mintype to relax. This mintype enables Flexible-Backbone design as we have seen in previous workshops. Our default is to use min/pack cycles, but relax typically works better. However, it also takes considerably more time! This tutorial takes about 339 seconds for one struct!
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3,H1,H3" mintype="relax light_chain="kappa"/>
```
if not os.getenv("DEBUG"):
# YOUR CODE HERE
raise NotImplementedError()
```
### Tut D4. Framework Design
Finally, we want to allow the framework residues AROUND the CDRs we will be designing and any interacting antigen residues to design as well here. In addition, we will disable conservative framework design as we want something funky (this is not typically recommended and is used here to indicate what you CAN do. Note that we will also design the interface of the antigen using the `-design_antigen` option. This can be useful for vaccine design. Note that these design options are cmd-line ony options currently (but will be available in a later version of Rosetta). Approx 900 second runtime.
antibody_designer.linuxgccrelease -s my_ab.pdb -seq_design_cdrs L1 L3 H1 H3 \
-light_chain kappa -resfile my_resfile.resfile -disallow_aa PRO CYS \
-mintype relax -design_antigen -design_framework \
-conservative_framework_design false -nstruct 1 -out:prefix tutD4_
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3,H1,H3" mintype="relax" light_chain="kappa"/>
### Tut D5. H3 Stem, kT, and Sequence variablility.
Finally, we want increased variability for our sequence designs. So, we will increase number of sampling rounds for our lovely cluster profiles using the `-seq_design_profile_samples` option, increase kT, and allow H3 stem design.
We will enable H3 Stem design here, which can cause a flipping of the H3 stem type from bulged to non-bulged and vice-versa. Typically, if you do this, you may want to run loop modeling on the top designs to confirm the H3 structure remains in-tact. Note that once again, these sequence-design specific options must be set on the cmd-line.
Description of the `seq_design_profile_samples` option (default 1): "If designing using profiles, this is the number of times the profile is sampled each time packing done. Increase this number to increase variability of designs - especially if not using relax as the mintype."
This tutorial takes approx 450 seconds.
antibody_designer.linuxgccrelease -s my_ab.pdb -seq_design_cdrs L1 L2 H3 \
-graft_design_cdrs L1 L2 -light_chain kappa -design_H3_stem -inner_kt 2.0 \
-outer_kt 2.0 -seq_design_profile_samples 5 -nstruct 5 -out:prefix tutD5_
<AntibodyDesignMover name="RAbD" seq_design_cdrs="L1,L3,H1,H3" mintype="relax"
inner_kt="2.0" outer_kt="2.0"/>
How different is the sequence of L1,L2, and H3 from our starting antibody?
__You should now be ready to explore and use RosettaAntibodyDesign on your own. Congrats! Thanks for going through this tutorial!__
The full reference manual can be found here: https://www.rosettacommons.org/docs/latest/application_documentation/antibody/RosettaAntibodyDesign#antibody-design-cdr-instruction-file
<!--NAVIGATION-->
< [Side Chain Conformations and Dunbrack Energies](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.01-Side-Chain-Conformations-and-Dunbrack-Energies.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Protein Design with a Resfile and FastRelax](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.03-Design-with-a-resfile-and-relax.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/06.02-Packing-design-and-regional-relax.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
<!--NAVIGATION-->
< [RosettaAntibody Framework](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/12.01-RosettaAntibody-Framework-and-SimpleMetrics.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [RosettaCarbohydrates](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/13.00-RosettaCarbohydrates-Working-with-Glycans.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/12.02-RosettaAntibodyDesign-RAbD.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
| github_jupyter |
# Image augmentation strategies:
## Author: Dr. Rahul Remanan
### (CEO and Chief Imagination Officer, [Moad Computer](https://www.moad.computer))
### Demo data: [Kaggle Cats Vs. Dogs Redux](https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition)
## Part 01 - [Using Keras pre-processing:](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)
### Why perform image augmentation?
In order to make the most out of our few training image data, the process of "augmentation" of these images via a number of random transformations is helpful. This process feed the data to the neural network model, so that it would never see twice the exact same picture. The key advantage of implementation such an augmentation strategy is to help prevent overfitting and better generalization by the trained model.
In Keras this can be done via the keras.preprocessing.image.ImageDataGenerator class. This class allows you to:
* configure random transformations and normalization operations to be done on your image data during training
* instantiate generators of augmented image batches (and their labels) via .flow(data, labels) or .flow_from_directory(directory). These generators can then be used with the Keras model methods that accept data generators as inputs, fit_generator, evaluate_generator and predict_generator.
### Example implementation of image augmentation in Keras:
```
try:
import warnings
warnings.filterwarnings('ignore')
from keras.preprocessing.image import ImageDataGenerator
except:
print ("Please install Keras (cmd: $sudo pip3 install keras) to run this notebook ...")
datagen = ImageDataGenerator(rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
```
### Image Augmentation in Keras -- Quick start:
For more information, see the [documentation](https://keras.io/preprocessing/image/).
* rotation_range is a value in degrees (0-180), a range within which to randomly rotate pictures
* width_shift and height_shift are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally
* rescale is a value by which we will multiply the data before any other processing. Our original images consist in RGB coefficients in the 0-255, but such values would be too high for our models to process (given a typical learning rate), so we target values between 0 and 1 instead by scaling with a 1/255. factor.
* shear_range is for randomly applying [shearing transformations](https://en.wikipedia.org/wiki/Shear_mapping)
* zoom_range is for randomly zooming inside pictures
* horizontal_flip is for randomly flipping half of the images horizontally --relevant when there are no assumptions of horizontal assymetry (e.g. real-world pictures).
* fill_mode is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.
```
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
datagen = ImageDataGenerator(rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
img = load_img('./train/cats/cat.1.jpg')
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size=1,
save_to_dir='./preview/', save_prefix='cat', save_format='jpeg'):
i += 1
if i > 20:
break
```
### Keras pre-processing overview:
* The load_img uses Pillow, a complete fork of PIL. This creates a PIL image.
* The img_to_array creates a Numpy array with shape (3, 150, 150).
* The reshape command creates a Numpy array with shape (1, 3, 150, 150).
* The .flow() command below generates batches of randomly transformed images and saves the results to the `../data/cats_dogs/preview/` directory
* The break function prevents the loop from iterating indefinitely.
```
import os
import matplotlib.image as mpl_image
import matplotlib.pyplot as plt
from IPython.display import Image as PyImage
def load_images(folder):
images = []
for filename in os.listdir(folder):
img = mpl_image.imread(os.path.join(folder, filename))
if img is not None:
images.append(img)
return images
def stack_plot(stack_size, folder):
rows, cols = stack_size, stack_size
fig,ax = plt.subplots(rows,cols,figsize=[24,24])
i = 0
try:
for filename in os.listdir(folder):
img = mpl_image.imread(os.path.join(folder, filename))
ax[int(i/rows),int(i % rows)].imshow(img)
ax[int(i/rows),int(i % rows)].axis('off')
i += 1
except:
print ("Failed to add an image to the stacked plot ...")
plt.show()
```
### Plotting augmented images:
* Using matplotlib library.
* The load_images function return a Numpy array of all the images in the folder specified in the function.
* The stack_plot generates a stack of images contained inside a specific folder of size: stack_size*stack_size
```
stack_plot(5, './preview/')
```
## Part 02 - Implementing a convolutional neural network that uses image augmentation:
### Importing dependent libraries:
```
try:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
except:
print ("Failed to load Keras modules. Verify if dependency requirements are satisfied ...")
```
* Importing preprocessing.image and models functions from Keras
* Importing layers function
* Importing keras backend
### Initialize some variables:
```
img_width, img_height = 150, 150
train_data_dir = './train/'
validation_data_dir = './validation/'
nb_train_samples = 20000
nb_validation_samples = 5000
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
```
* Using img_width, img_height variables for specifying the dimensions of images to be consumed by the neural network
* Initilaizing variables for location pointers to training data, validation data, train data sample size, validation data sample size, number of training epochs, number of images to be processed in each batch
* Specifying a function to adjust input shape of the tensor if the image RGB data format is channels first or channels last
### Build and compile a neural network:
* Building a neural network model using the Sequential format in Keras
* Compile the model using binary cross entropy as the loss function, RMSProp as the optimizer and accuracy as the evaluation metrics
```
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
```
### Configuring data generators to process and feed the data to the neural network:
```
train_datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
```
* The image augmentation configuration for training
```
test_datagen = ImageDataGenerator(rescale=1. / 255)
```
* Image augmentation configuration to be used for testing
* This generator uses only rescaling
### Creating train and validation generators:
```
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
```
### Creating a model fit generator function for training the neural network:
```
model.fit_generator(train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
```
### Saving model weights at the end of the training session:
```
model.save_weights('./model/first_try.h5')
```
## Part 03 - Improving classification accuracy of a neural network using transfer learning:
### Importing dependent libraries:
```
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
```
### Defining and initializing variables:
```
top_model_weights_path = './model/bottleneck_fc_model.h5'
train_data_dir = './train'
validation_data_dir = './validation'
bottleneck_train_path = './model/bottleneck_features_train.npy'
bottleneck_val_path = './model/bottleneck_features_validation.npy'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
```
### Specify the dimensions of images:
```
img_width, img_height = 150, 150
```
### Build the VGG16 network:
```
model = applications.VGG16(include_top=False, weights='imagenet', input_shape = (img_width, img_height,3))
```
### Define data generator:
```
datagen = ImageDataGenerator(rescale=1. / 255)
```
### Creating a function to save bottleneck features:
```
def save_train_bottlebeck_features(bottleneck_train_path=None):
generator = datagen.flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode="binary",
shuffle=False)
bottleneck_features_train = model.predict_generator(generator, nb_train_samples // batch_size)
np.save(open(bottleneck_train_path),
bottleneck_features_train)
def save_validation_bottlebeck_features(bottleneck_val_path=None):
generator = datagen.flow_from_directory(validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode="binary",
shuffle=False)
bottleneck_features_validation = model.predict_generator(generator, nb_validation_samples // batch_size)
np.save(open(bottleneck_val_path),
bottleneck_features_validation)
```
### Saving bottleneck features:
```
save_train_bottlebeck_features(bottleneck_train_path = bottleneck_train_path)
save_validation_bottlebeck_features(bottleneck_val_path = bottleneck_val_path)
```
### Creating a function to train the top model:
```
def train_top_model(save_path=None, bottleneck_train_path = None, bottleneck_val_path = None):
top_model_weights_path = save_path
train_data = np.load(open(bottleneck_train_path, 'rb'))
train_labels = np.array([0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2))
validation_data = np.load(open(bottleneck_val_path, 'rb'))
validation_labels = np.array([0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
```
### Intialize trainig session of the top model and save weights at the end of training:
```
train_top_model(save_path=top_model_weights_path, \
bottleneck_train_path = bottleneck_train_path, \
bottleneck_val_path = bottleneck_val_path)
```
### Fine tuning the model:
#### Load dependent libraries:
```
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
```
#### Specify dimensions of the images:
```
img_width, img_height = 150, 150
```
#### Load model weights:
```
weights_path = './model/vgg16_weights.h5'
top_model_weights_path = './model/bottleneck_fc_model.h5'
```
#### Initialize some variables:
```
train_data_dir = './train'
validation_data_dir = './validation'
nb_train_samples = 20000
nb_validation_samples = 5000
epochs = 50
batch_size = 16
checkpointer_savepath = './model/checkpointer.h5'
```
#### Build the VGG16 network:
```
model = Sequential()
model.add(applications.VGG16(weights='imagenet', include_top=False, input_shape = (img_width, img_height,3)))
print('Model loaded ...')
```
#### Build a classifier model to put on top of the V6616 convolutional model:
```
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
```
#### Generate model summary:
```
model.summary()
from keras.utils import plot_model
import pydot
import graphviz # apt-get install -y graphviz libgraphviz-dev && pip3 install pydot graphviz
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
output_dir = './model'
plot_model(model, to_file= output_dir + '/model_top.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
```
#### Load model weights:
* It is necessary to start with a fully-trained classifier
* This includes the top classifier
* Initializing model weights from zero may not train the train the network successfully
```
top_model.load_weights(top_model_weights_path)
```
#### Add top model top of the Vgg16 convolutional base:
```
model.add(top_model)
```
#### Generate sumary with base VGG16 model:
```
model.summary()
output_dir = './model'
plot_model(model, to_file= output_dir + '/model_full.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
```
#### Freezing layers:
* Freeze the first 25 layers, up to the last conv block
* Weighhts become non-trainable and will not be updated
```
for layer in model.layers[:25]:
layer.trainable = False
```
#### Compile the model:
* With a SGD/momentum optimizer
* Very slow learning rate.
```
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
```
#### Prepare data augmentation configuration:
```
train_datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
```
#### Create generator functions to handle data:
```
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
```
#### Implement a checkpoiting mechanism:
```
from keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopper = EarlyStopping(patience=5, verbose=1)
checkpointer = ModelCheckpoint(checkpointer_savepath,\
verbose=1,\
save_best_only=True)
```
#### Load saved model:
```
from keras.models import Model, load_model
load_from_checkpoint = True
load_from_config = False
load_model_weights = False
if load_from_checkpoint == True:
model = load_model(checkpointer_savepath)
elif load_from_config == True:
model = load_prediction_model(args)
model = load_prediction_model_weights(args)
elif load_model_weights == True:
try:
model = load_prediction_model_weights(args)
except:
print ("An exception has occurred, while loading model weights ...")
else:
model = model
```
#### Train the model:
```
model.fit_generator(train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[early_stopper, checkpointer])
model.output_shape[1:]
```
#### Save the model:
```
model.save_weights('./model/vgg16_tl.h5')
```
## Part 04 - [Using radial image transformation:](https://arxiv.org/abs/1708.04347)
Deep learning models have a large number of free parameters that must be estimated by efficient training of the models on a large number of training data samples to increase their generalization performance. In real-world applications, the data available to train these networks is often limited or imbalanced. Hojjat Salehinejad et.al propose a sampling method based on the radial transform in a polar coordinate system for image augmentation. This facilitates the training of deep learning models from limited source data. The pixel-wise transformation implemeted here provides representations of the original image in the polar coordinate system by generating a new image from each pixel. This technique can generate radial transformed images up to the number of pixels in the original image to increase the diversity of poorly represented image classes. Our experiments show improved generalization performance in training deep convolutional neural networks using these radial transformed images.
```
from skimage import data
from skimage import io
import numpy as np
import math
import matplotlib.pyplot as plt
def to_gray(img):
w, h,_ = img.shape
ret = np.empty((w, h), dtype=np.uint8)
retf = np.empty((w, h), dtype=np.float)
imgf = img.astype(float)
retf[:, :] = ((imgf[:, :, 1] + imgf[:, :, 2] + imgf[:, :, 0])/3)
ret = retf.astype(np.uint8)
return ret
def radial_transform(img,w,h):
shape = im.shape
new_im = np.zeros(shape)
print(shape)
print(len(shape))
print('w',w)
print('h',h)
width = shape[1]
height = shape[0]
lens = len(shape)
for i in range(0,width):
xita = 2*3.14159*i/width
for a in range(0,height):
x = (int)(math.floor(a * math.cos(xita)))
y = (int)(math.floor(a * math.sin(xita)))
new_y = (int)(h+x)
new_x = (int)(w+y)
#print(h.dtype)
if new_x>=0 and new_x<width:
if new_y>=0 and new_y<height:
if lens==3:
new_im[a,i,0] = (im[new_y,new_x,0]-127.5)/128
new_im[a,i,1] = (im[new_y,new_x,1]-127.5)/128
new_im[a,i,2] = (im[new_y,new_x,2]-127.5)/128
else:
new_im[a,i] = (im[new_y,new_x]-127.5)/128
new_im[a,i] = (im[new_y,new_x]-127.5)/128
new_im[a,i] = (im[new_y,new_x]-127.5)/128
return new_im
im = io.imread('./preview/cat_0_1511.jpeg')
im = to_gray(im)
h = im.shape[0]
w = im.shape[1]
rt_im1 = radial_transform(im,(int)(w/2),(int)(h/2))
rt_im2 = radial_transform(im,(int)(w/4),(int)(h/4))
rt_im3 = radial_transform(im,(int)(w*0.5),(int)(h*0.75))
io.imshow(im)
io.imsave('./radial_transform/112.jpg',rt_im1)
io.imsave('./radial_transform/112.jpg',rt_im2)
io.imsave('./radial_transform/112.jpg',rt_im3)
io.show()
plt.figure(num='cats_dogs',figsize=(8,8))
plt.subplot(2,2,1)
plt.title('origin image')
plt.imshow(im,plt.cm.gray)
plt.subplot(2,2,2)
plt.title('0.5')
plt.imshow(rt_im1,plt.cm.gray)
plt.axis('off')
plt.subplot(2,2,3)
plt.title('0.25')
plt.imshow(rt_im2,plt.cm.gray)
plt.axis('off')
plt.subplot(2,2,4)
plt.title('0.75')
plt.imshow(rt_im3,plt.cm.gray)
plt.axis('off')
plt.show()
```
| github_jupyter |
```
from molmap import model as molmodel
import molmap
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from joblib import load, dump
tqdm.pandas(ascii=True)
import numpy as np
import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
np.random.seed(123)
tf.compat.v1.set_random_seed(123)
tmp_feature_dir = './tmpignore'
if not os.path.exists(tmp_feature_dir):
os.makedirs(tmp_feature_dir)
mp1 = molmap.loadmap('../descriptor.mp')
mp2 = molmap.loadmap('../fingerprint.mp')
task_name = 'Tox21'
from chembench import load_data
df, induces = load_data(task_name)
MASK = -1
smiles_col = df.columns[0]
values_col = df.columns[1:]
Y = df[values_col].astype('float').fillna(MASK).values
if Y.shape[1] == 0:
Y = Y.reshape(-1, 1)
X1_name = os.path.join(tmp_feature_dir, 'X1_%s.data' % task_name)
X2_name = os.path.join(tmp_feature_dir, 'X2_%s.data' % task_name)
if not os.path.exists(X1_name):
X1 = mp1.batch_transform(df.smiles, n_jobs = 8)
dump(X1, X1_name)
else:
X1 = load(X1_name)
if not os.path.exists(X2_name):
X2 = mp2.batch_transform(df.smiles, n_jobs = 8)
dump(X2, X2_name)
else:
X2 = load(X2_name)
molmap1_size = X1.shape[1:]
molmap2_size = X2.shape[1:]
def get_pos_weights(trainY):
"""pos_weights: neg_n / pos_n """
dfY = pd.DataFrame(trainY)
pos = dfY == 1
pos_n = pos.sum(axis=0)
neg = dfY == 0
neg_n = neg.sum(axis=0)
pos_weights = (neg_n / pos_n).values
neg_weights = (pos_n / neg_n).values
return pos_weights, neg_weights
prcs_metrics = ['MUV', 'PCBA']
epochs = 800
patience = 50 #early stopping
dense_layers = [256] #12 outputs
batch_size = 128
lr = 1e-4
weight_decay = 0
monitor = 'val_auc'
dense_avf = 'relu'
last_avf = None #sigmoid in loss
if task_name in prcs_metrics:
metric = 'PRC'
else:
metric = 'ROC'
results = []
for i, split_idxs in enumerate(induces):
train_idx, valid_idx, test_idx = split_idxs
print(len(train_idx), len(valid_idx), len(test_idx))
trainX = (X1[train_idx], X2[train_idx])
trainY = Y[train_idx]
validX = (X1[valid_idx], X2[valid_idx])
validY = Y[valid_idx]
testX = (X1[test_idx], X2[test_idx])
testY = Y[test_idx]
pos_weights, neg_weights = get_pos_weights(trainY)
loss = lambda y_true, y_pred: molmodel.loss.weighted_cross_entropy(y_true,y_pred, pos_weights, MASK = -1)
model = molmodel.net.DoublePathNet(molmap1_size, molmap2_size,
n_outputs=Y.shape[-1],
dense_layers=dense_layers,
dense_avf = dense_avf,
last_avf=last_avf)
opt = tf.keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #
#import tensorflow_addons as tfa
#opt = tfa.optimizers.AdamW(weight_decay = 0.1,learning_rate=0.001,beta1=0.9,beta2=0.999, epsilon=1e-08)
model.compile(optimizer = opt, loss = loss)
if i == 0:
performance = molmodel.cbks.CLA_EarlyStoppingAndPerformance((trainX, trainY),
(validX, validY),
patience = patience,
criteria = monitor,
metric = metric,
)
model.fit(trainX, trainY, batch_size=batch_size,
epochs=epochs, verbose= 0, shuffle = True,
validation_data = (validX, validY),
callbacks=[performance])
else:
model.fit(trainX, trainY, batch_size=batch_size,
epochs = performance.best_epoch + 1, verbose = 1, shuffle = True,
validation_data = (validX, validY))
performance.model.set_weights(model.get_weights())
best_epoch = performance.best_epoch
trainable_params = model.count_params()
train_aucs = performance.evaluate(trainX, trainY)
valid_aucs = performance.evaluate(validX, validY)
test_aucs = performance.evaluate(testX, testY)
final_res = {
'task_name':task_name,
'train_auc':np.nanmean(train_aucs),
'valid_auc':np.nanmean(valid_aucs),
'test_auc':np.nanmean(test_aucs),
'metric':metric,
'# trainable params': trainable_params,
'best_epoch': best_epoch,
'batch_size':batch_size,
'lr': lr,
'weight_decay':weight_decay
}
results.append(final_res)
pd.DataFrame(performance.history)[['loss', 'val_loss']].plot()
pd.DataFrame(performance.history)[['auc', 'val_auc']].plot()
pd.DataFrame(results).test_auc.mean()
pd.DataFrame(results).test_auc.std()
pd.DataFrame(results)
pd.DataFrame(results).to_csv('./results/%s.csv' % task_name)
```
| github_jupyter |
###### Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2017 L.A. Barba, N.C. Clementi
```
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../style/custom.css'
HTML(open(css_file, "r").read())
```
# Play with data in Jupyter
This is the second lesson of our course in _"Engineering Computations."_ In the first lesson, [_Interacting with Python_](http://nbviewer.jupyter.org/github/engineersCode/EngComp/blob/8693986dbc0554495930c87bae7283bfa78130bd/modules/1_offtheground/1_Interacting_with_Python.ipynb), we used **IPython**, the interactive Python shell. It is really great to type single-line Python expressions and get the outputs, interactively. Yet, believe it or not, there are greater things!
In this lesson, you will continue playing with data using Python, but you will do so in a **Jupyter notebook**. This very lesson is written in a Jupyter notebook. Ready? You will love it.
## What is Jupyter?
Jupyter is a set of open-source tools for interactive and exploratory computing. You work right on your browser, which becomes the user interface through which Jupyter gives you a file explorer (the _dashboard_) and a document format: the **notebook**.
A Jupyter notebook can contain: input and output of code, formatted text, images, videos, pretty math equations, and much more. The computer code is _executable_, which means that you can run the bits of code, right in the document, and get the output of that code displayed for you. This interactive way of computing, mixed with the multi-media narrative, allows you to tell a story (even to yourself) with extra powers!
## Working in Jupyter
Several things will seem counter-intuitive to you at first. For example, most people are used to launching apps in their computers by clicking some icon: this is the first thing to "unlearn." Jupyter is launched from the _command line_ (like when you launched IPython). Next, we have two types of content—code and markdown—that handle a bit differently. The fact that your browser is an interface to a compute engine (called "kernel") leads to some extra housekeeping (like shutting down the kernel). But you'll get used to it pretty quick!
### Start Jupyter
The standard way to start Jupyter is to type the following in the command-line interface:
`jupyter notebook`
Hit enter and tadah!!
After a little set up time, your default browser will open with the Jupyter app. It should look like in the screenshot below, but you may see a list of files and folders, depending on the location of your computer where you launched it.
##### Note:
Don't close the terminal window where you launched Jupyter (while you're still working on Jupyter). If you need to do other tasks on the command line, open a new terminal window.
<img src="images/jupyter-main.png" style="width: 800px;"/>
#### Screenshot of the Jupyter dashboard, open in the browser.
To start a new Jupyter notebook, click on the top-right, where it says **New**, and select `Python 3`. Check out the screenshot below.
<img src="images/create_notebook.png" style="width: 800px;"/>
#### Screenshot showing how to create a new notebook.
A new tab will appear in your browser and you will see an empty notebook, with a single input line, waiting for you to enter some code. See the next screenshot.
<img src="images/new_notebook.png" style="width: 800px;"/>
#### Screenshot showing an empty new notebook.
The notebook opens by default with a single empty code cell. Try to write some Python code there and execute it by hitting `[shift] + [enter]`.
### Notebook cells
The Jupyter notebook uses _cells_: blocks that divide chunks of text and code. Any text content is entered in a *Markdown* cell: it contains text that you can format using simple markers to get headings, bold, italic, bullet points, hyperlinks, and more.
Markdown is easy to learn, check out the syntax in the ["Daring Fireball"](https://daringfireball.net/projects/markdown/syntax) webpage (by John Gruber). A few tips:
* to create a title, use a hash to start the line: `# Title`
* to create the next heading, use two hashes (and so on): `## Heading`
* to italicize a word or phrase, enclose it in asterisks (or underdashes): `*italic*` or `_italic_`
* to make it bold, enclose it with two asterisks: `**bolded**`
* to make a hyperlink, use square and round brackets: `[hyperlinked text](url)`
Computable content is entered in code cells. We will be using the IPython kernel ("kernel" is the name used for the computing engine), but you should know that Jupyter can be used with many different computing languages. It's amazing.
A code cell will show you an input mark, like this:
`In [ ]:`
Once you add some code and execute it, Jupyter will add a number ID to the input cell, and produce an output marked like this:
`Out [1]:`
##### A bit of history:
Markdown was co-created by the legendary but tragic [Aaron Swartz](https://en.wikipedia.org/wiki/Aaron_Swartz). The biographical documentary about him is called ["The Internet's Own Boy,"](https://en.wikipedia.org/wiki/The_Internet%27s_Own_Boy) and you can view it in YouTube or Netflix. Recommended!
### Interactive computing in the notebook
Look at the icons on the menu of Jupyter (see the screenshots above). The first icon on the left (an old floppy disk) is for saving your notebook. You can add a new cell with the big **+** button. Then you have the cut, copy, and paste buttons. The arrows are to move your current cell up or down. Then you have a button to "run" a code cell (execute the code), the square icon means "stop" and the swirly arrow is to "restart" your notebook's kernel (if the computation is stuck, for example). Next to that, you have the cell-type selector: Code or Markdown (or others that you can ignore for now).
You can test-drive a code cell by writing some arithmetic operations. Like we saw in our first lesson, the Python operators are:
```python
+ - * / ** % //
```
There's addition, subtraction, multiplication and division. The last three operators are _exponent_ (raise to the power of), _modulo_ (divide and return remainder) and _floor division_.
Typing `[shift] + [enter]` will execute the cell and give you the output in a new line, labeled `Out[1]` (the numbering increases each time you execute a cell).
##### Try it!
Add a cell with the plus button, enter some operations, and `[shift] + [enter]` to execute.
Everything we did using IPython we can do in code cells within a Jupyter notebook. Try out some of the things we learned in lesson 1:
```
print("Hello World!")
x = 2**8
x < 64
```
### Edit mode and Command mode
Once you click on a notebook cell to select it, you may interact with it in two ways, which are called _modes_. Later on, when you are reviewing this material again, read more about this in Reference 1.
**Edit mode:**
* We enter **edit mode** by pressing `Enter` or double-clicking on the cell.
* We know we are in this mode when we see a green cell border and a prompt in the cell area.
* When we are in edit mode, we can type into the cell, like a normal text editor.
**Command mode:**
* We enter in **command mode** by pressing `Esc` or clicking outside the cell area.
* We know we are in this mode when we see a grey cell border with a left blue margin.
* In this mode, certain keys are mapped to shortcuts to help with
common actions.
You can find a list of the shortcuts by selecting `Help->Keyboard Shortcuts`
from the notebook menu bar. You may want to leave this for later, and come back to it, but it becomes more helpful the more you use Jupyter.
### How to shut down the kernel and exit
Closing the browser tab where you've been working on a notebook does not immediately "shut down" the compute kernel. So you sometimes need to do a little housekeeping.
Once you close a notebook, you will see in the main Jupyter app that your
notebook file has a green book symbol next to it. You should click in the box at the left of that symbol, and then click where it says **Shutdown**. You don't need to do this all the time, but if you have a _lot_ of notebooks running, they will use resources in your machine.
Similarly, Jupyter is still running even after you close the tab that has the Jupyter dashboard open. To exit the Jupyter app, you should go to the terminal that you used to open Jupyter, and type `[Ctrl] + [c]` to exit.
### Nbviewer
[Nbviewer](http://nbviewer.jupyter.org/) is a free web service that allows you to share static versions of hosted notebook files, as if they were a web page. If a notebook file is publicly available on the web, you can view it by entering its URL in the nbviewer web page, and hitting the **Go!** button. The notebook will be rendered as a static page: visitors can read everything, but they cannot interact with the code.
## Play with Python strings
Let's keep playing around with strings, but now coding in a Jupyter notebook (instead of IPython). We recommend that you open a clean new notebook to follow along the examples in this lesson, typing the commands that you see. (If you copy and paste, you will save time, but you will learn little. Type it all out!)
```
str_1 = 'hello'
str_2 = 'world'
```
Remember that we can concatenate strings ("add"), for example:
```
new_string = str_1 + str_2
print(new_string)
```
What if we want to add a space that separates `hello` from `world`? We directly add the string `' '` in the middle of the two variables. A space is a character!
```
my_string = str_1 + ' ' + str_2
print(my_string)
```
##### Exercise:
Create a new string variable that adds three exclamation marks to the end of `my_string`.
### Indexing
We can access each separate character in a string (or a continuous segment of it) using _indices_: integers denoting the position of the character in the string. Indices go in square brackets, touching the string variable name on the right. For example, to access the 1st element of `new_string`, we would enter `new_string[0]`. Yes! in Python we start counting from 0.
```
my_string[0]
#If we want the 3rd element we do:
my_string[2]
```
You might have noticed that in the cell above we have a line before the code that starts with the `#` sign. That line seems to be ignored by Python: do you know why?
It is a _comment_: whenever you want to comment your Python code, you put a `#` in front of the comment. For example:
```
my_string[1] #this is how we access the second element of a string
```
How do we know the index of the last element in the string?
Python has a built-in function called `len()` that gives the information about length of an object. Let's try it:
```
len(my_string)
```
Great! Now we know that `my_string` is eleven characters long. What happens if we enter this number as an index?
```
my_string[11]
```
Oops. We have an error: why? We know that the length of `my_string` is eleven. But the integer 11 doesn't work as an index. If you expected to get the last element, it's because you forgot that Python starts counting at zero. Don't worry: it takes some getting used to.
The error message says that the index is out of range: this is because the index of the _last element_ will always be: ` len(string) - 1`. In our case, that number is 10. Let's try it out.
```
my_string[10]
```
Python also offers a clever way to grab the last element so we don't need to calculate the lenghth and substract one: it is using a negative 1 for the index. Like this:
```
my_string[-1]
```
What if we use a `-2` as index?
```
my_string[-2]
```
That is the last `l` in the string ` hello world`. Python is so clever, it can count backwards!
### Slicing strings
Sometimes, we want to grab more than one single element: we may want a section of the string. We do it using _slicing_ notation in the square brackets. For example, we can use `[start:end]`, where `start` is the index to begin the slice, and `end` is the (non-inclusive) index to finish the slice. For example, to grab the word `hello` from our string, we do:
```
my_string[0:5]
```
You can skip the `start` index, if you want to slice from the beginning of the string, and you can skip the `end` of a slice, indicating you want to go all the way to the end of the string. For example, if we want to grab the word `'world'` from `my_string`, we could do the following:
```
my_string[6:]
```
A helpful way to visualize slices is to imagine that the indices point to the spaces _between_ characters in the string. That way, when you write `my_string[i]`, you would be referring to the "character to the right of `i`" (Reference 2).
Check out the diagram below. We start counting at zero; the letter `'g'` is to the right of index 2. So if we want to grab the sub-string `'gin'` from `'engineer'`, we need `[start:end]=[2:5]`.
<img src="images/slicing.png" style="width: 400px;"/>
Try it yourself!
```
# Define your string
eng_string = 'engineer'
# Grab 'gin'slice
eng_string[2:5]
```
##### Exercises:
1. Define a string called `'banana'` and print out the first and last `'a'`.
2. Using the same string, grab the 2 possible slices that correspond to the word `'ana'` and print them out.
3. Create your own slicing exercise and ask your classmates to give it a try (work in groups of 3).
The following lines contain the solutions; to reveal the answer, select the lines with the mouse:
Solution Exercise 1:
<span style="color:white"> b = 'banana' </span>
<span style="color:white"> print(b[1]) </span>
<span style="color:white"> print(b[-1]) </span>
Solution Exercise 2:
<span style="color:white"> print(b[1:4]) </span>
<span style="color:white"> print(b[3:]) </span>
### What else we can do with strings?
Python has many useful built-in functions for strings. You'll learn a few of them in this section. A technical detail: in Python, some functions are associated with a particular class of objects (e.g., strings). The word **method** is used in this case, and we have a new way to call them: the dot operator. It is a bit counter-intuitive in that the name of the method comes _after the dot_, while the name of the particular object it acts on comes first. Like this: `mystring.method()`.
If you are curious about the many available methods for strings, go to the section "Built-in String Methods" in this [tutorial](https://www.tutorialspoint.com/python3/python_strings.htm).
Let's use a quote by Albert Einstein as a string and apply some useful string methods.
```
AE_quote = "Everybody is a genius. But if you judge a fish by its ability to climb a tree, it will live its whole life believing that it is stupid."
```
The **`count()`** method gives the number of ocurrences of a substring in a range. The arguments for the range are optional.
*Syntax:*
`str.count(substring, start, end)`
Here, `start` and `end` are integers that indicate the indices where to start and end the count. For example, if we want to know how many letters `'e'` we have in the whole string, we can do:
```
AE_quote.count('e')
```
If we want to know how many of those `'e'` charachters are in the range `[0:20]`, we do:
```
AE_quote.count('e', 0, 20)
```
We can look for more complex strings, for example:
```
AE_quote.count('Everybody')
```
The **find()** method tells us if a string `'substr'` occurs in the string we are applying the method on. The arguments for the range are optional.
*Syntax:*
`str.find(substr, start, end)`
Where `start` and `end` are indices indicating where to start and end the slice to apply the `find()` method on.
If the string `'substr'`is in the original string, the `find()` method will return the index where the substring starts, otherwise it will return `-1`.
For example, let's find the word "fish" in the Albert Einstein quote.
```
AE_quote.find('fish')
```
If we know the length of our sub-string, we can now apply slice notation to grab the word "fish".
```
len('fish')
AE_quote[42: 42 + len('fish')]
```
Let's see what happens when we try to look for a string that is not in the quote.
```
AE_quote.find('albert')
```
It returns `-1`… but careful, that doesn't mean that the position is at the end of the original string! If we read the [documentation](https://docs.python.org/3/library/stdtypes.html#string-methods), we confirm that a returned value of `-1` indicates that the sub-string we are looking for is _not in the string_ we are searching in.
A similar method is **`index()`**: it works like the `find()` method, but throws an error if the string we are searching for is not found.
*Syntax:*
`str.index(substr, start, end)`
```
AE_quote.index('fish')
AE_quote.index('albert')
```
In the example above, we used the `len()` function to calculate the length of the string `'fish'`, and we used the result to calculate the ending index. However, if the string is too long, having a line that calculates the length might be inconvenient or may make your code look messy. To avoid this, we can use the `find()` or `index()` methods to calculate the end position. In the `'fish'` example, we could look for the index of the word `'by'` (the word that follows `'fish'`) and subtract 1 from that index to get the index that corresponds to the space right after `'fish'`. There are many ways to slice strings, only limited by your imagination!
##### Note:
Remember that the ending index is not inclusive, which is why we want the index of the space that follows the string `'fish'`.
```
idx_start = AE_quote.index('fish')
idx_end = AE_quote.index('by') - 1 # -1 to get the index off the space after 'fish'
AE_quote[idx_start:idx_end]
```
##### Exercises:
1. Use the `count()` method to count how many letters `'a'` are in `AE_quote`?
2. Using the same method, how many isolated letters `'a'` are in `AE_quote`?
3. Use the `index()` method to find the position of the words `'genius'`, `'judge'` and `'tree'` in `AE_quote`.
4. Using slice syntax, extract the words in exercise 3 from `AE_quote`.
Two more string methods turn out to be useful when you are working with texts and you need to clean, separate or categorize parts of the text.
Let's work with a different string, a quote by Eleanor Roosevelt:
```
ER_quote = " Great minds discuss ideas; average minds discuss events; small minds discuss people. "
```
Notice that the string we defined above contains extra white spaces at the beginning and at the end. In this case, we did it on purpose, but bothersome extra spaces are often present when reading text from a file (perhaps due to paragraph indentation).
Strings have a method that allows us to get rid of those extra white spaces.
The **`strip()`** method returns a copy of the string in which all characters given as argument are stripped from the beginning and the end of the string.
*Syntax:*
`str.strip([chars])`
The default argument is the space character. For example, if we want to remove the white spaces in the `ER_quote`, and save the result back in `ER_quote`, we can do:
```
ER_quote = ER_quote.strip()
ER_quote
```
Let's supose you want to strip the period at the end; you could do the following:
`ER_quote = ER_quote.strip('.')`
But if we don't want to keep the changes in our string variable, we don't overwrite the variable as we did above. Let's just see how it looks:
```
ER_quote.strip('.')
```
Check the string variable to confirm that it didn't change (it still has the period at the end):
```
ER_quote
```
Another useful method is **`startswith()`**, to find out if a string starts with a certain character.
Later on in this lesson we'll see a more interesting example; but for now, let's just "check" if our string starts with the word 'great'.
```
ER_quote.startswith('great')
```
The output is `False` because the word is not capitalized! Upper-case and lower-case letters are distinct characters.
```
ER_quote.startswith('Great')
```
It's important to mention that we don't need to match the character until we hit the white space.
```
ER_quote.startswith('Gre')
```
The last string method we'll mention is **`split()`**: it returns a **list** of all the words in a string. We can also define a separator and split our string according to that separator, and optionally we can limit the number of splits to `num`.
*Syntax:*
`str.split(separator, num)`
```
print(AE_quote.split())
print(ER_quote.split())
```
Let's split the `ER_quote` by a different character, a semicolon:
```
print(ER_quote.split(';'))
```
##### Think...
Do you notice something new in the output of the `print()` calls above?
What are those `[ ]`?
## Play with Python lists
The square brackets above indicate a Python **list**. A list is a built-in data type consisting of a sequence of values, e.g., numbers, or strings. Lists work in many ways similarly to strings: their elements are numbered from zero, the number of elements is given by the function `len()`, they can be manipulated with slicing notation, and so on.
The easiest way to create a list is to enclose a comma-separated sequence of values in square brackets:
```
# A list of integers
[1, 4, 7, 9]
# A list of strings
['apple', 'banana', 'orange']
# A list with different element types
[2, 'apple', 4.5, [5, 10]]
```
In the last list example, the last element of the list is actually _another list_. Yes! we can totally do that.
We can also assign lists to variable names, for example:
```
integers = [1, 2, 3, 4, 5]
fruits = ['apple', 'banana', 'orange']
print(integers)
print(fruits)
new_list = [integers, fruits]
print(new_list)
```
Notice that this `new_list` has only 2 elements. We can check that with the `len()` function:
```
len(new_list)
```
Each element of `new_list` is, of course, another list.
As with strings, we access list elements with indices and slicing notation. The first element of `new_list` is the list of integers from 1 to 5, while the second element is the list of three fruit names.
```
new_list[0]
new_list[1]
# Accessing the first two elements of the list fruits
fruits[0:2]
```
##### Exercises:
1. From the `integers` list, grab the slice `[2, 3, 4]` and then `[4, 5]`.
2. Create your own list and design an exercise for grabbing slices, working with your classmates.
### Adding elements to a list
We can add elements to a list using the **append()** method: it appends the object we pass into the existing list. For example, to add the element 6 to our `integers` list, we can do:
```
integers.append(6)
```
Let's check that the `integer` list now has a 6 at the end:
```
print(integers)
```
### List membership
Checking for list membership in Python looks pretty close to plain English!
*Syntax*
To check if an element is **in** a list:
`element in list`
To check if an element is **not in** a list:
`element not in list`
```
'strawberry' in fruits
'strawberry' not in fruits
```
##### Exercises
1. Add two different fruits to the `fruits` list.
2. Check if `'mango'` is in your new `fruits` list.
3. Given the list `alist = [1, 2, 3, '4', [5, 'six'], [7]]` run the following in separate cells and discuss the output with your classmates:
```Python
4 in alist
5 in alist
7 in alist
[7] in alist
```
### Modifying elements of a list
We can not only add elements to a list, we can also modify a specific element.
Let's re-use the list from the exercise above, and replace some elements.
```
alist = [1, 2, 3, '4', [5, 'six'], [7]]
```
We can find the position of a certain element with the `index()` method, just like with strings. For example, if we want to know where the element `'4'` is, we can do:
```
alist.index('4')
alist[3]
```
Let's replace it with the integer value `4`:
```
alist[3] = 4
alist
4 in alist
```
##### Exercise
Replace the last element of `alist` with something different.
Being able to modify elements in a list is a "property" of Python lists; other Python objects we'll see later in the course also behave like this, but not all Python objects do. For example, you cannot modify elements in a a string. If we try, Python will complain.
Fine! Let's try it:
```
string = 'This is a string.'
```
Suppose we want to replace the period ('.') by an exaclamation mark ('!'). Can we just modify this string element?
```
string[-1]
string[-1] = '!'
```
Told you! Python is confirming that we cannot change the elements of a string by item assignment.
## Next: strings and lists in action
You have learned many things about strings and lists in this lesson, and you are probably eager to see how to apply it all to a realistic situation. We created a [full example](http://nbviewer.jupyter.org/github/engineersCode/EngComp/blob/master/modules/1_offtheground/3_Example_play_with_MAEbulletin.ipynb) in a separate notebook to show you the power of Python with text data.
But before jumping in, we should introduce you to the powerful ideas of **iteration** and **conditionals** in Python.
### Iteration with `for` statements
The idea of _iteration_ (in plain English) is to repeat a process several times. If you have any programming experience with another language (like C or Java, say), you may have an idea of how to create iteration with `for` statements. But these are a little different in Python, as you can read in the [documentation](https://docs.python.org/3/tutorial/controlflow.html#for-statements).
A Python `for` statement iterates over the items of a sequence, naturally. Say you have a list called `fruits` containing a sequence of strings with fruit names; you can write a statement like
```Python
for fruit in fruits:
```
to do something with each item in the list.
Here, for the first time, we will encounter a distinctive feature of the Python language: grouping by **indentation**. To delimit _what_ Python should do with each `fruit` in the list of `fruits`, we place the next statement(s) _indented_ from the left.
How much to indent? This is a style question, and everyone has a preference: two spaces, four spaces, one tab… they are all valid: but pick one and be consistent!
Let's use four spaces:
```
fruits = ['apple', 'banana', 'orange', 'cherry', 'mandarin']
for fruit in fruits:
print("Eat your", fruit)
```
##### Pay attention:
* the `for` statement ends with a colon, `:`
* the variable `fruit` is implicitly defined in the `for` statement
* `fruit` takes the (string) value of each element of the list `fruits`, in order
* the indented `print()` statement is executed for each value of `fruit`
* once Python runs out of `fruits`, it stops
* we don't need to know ahead of time how many items are in the list!
##### Challenge question:
— What is the value of the variable `fruit` after executing the `for` statement above? Discuss with your neighbor. (Confirm your guess in a code cell.)
A very useful function to use with `for` statements is **`enumerate()`**: it adds a counter that you can use as an index while your iteration runs. To use it, you implicitly define _two_ variables in the `for` statement: the counter, and the value of the sequence being iterated on.
Study the following block of code:
```
names = ['sam', 'zoe', 'naty', 'gil', 'tom']
for i, name in enumerate(names):
names[i] = name.capitalize()
print(names)
```
##### Challenge question:
— What is the value of the variable `name` after executing the `for` statement above? Discuss with your neighbor. (Confirm your guess in a code cell.)
##### Exercise:
Say we have a list of lists (a.k.a., a _nested_ list), as follows:
```Python
fullnames = [['sam','jones'], ['zoe','smith'],['joe','cheek'],['tom','perez'] ]
```
Write some code that creates two simple lists: one with the first names, another with the last names from the nested list above, but capitalized.
To start, you need to create two _empty_ lists using the square brackets with nothing inside. We've done that for you below. _Hint_: Use the `append()` list method!
```
fullnames = [ ['sam','jones'], ['zoe','smith'],['joe','cheek'],['tom','perez'] ]
firstnames = []
lastnames = []
# Write your code here
```
### Conditionals with `if` statements
Sometimes we need the ability to check for conditions, and change the behavior of our program depending on the condition. We accomplish it with an `if` statement, which can take one of three forms.
(1) **If** statement on its own:
```
a = 8
b = 3
if a > b:
print('a is bigger than b')
```
(2) **If-else** statement:
```
# We pick a number, but you can change it
x = 1547
if x % 17 == 0:
print('Your number is a multiple of 17.')
else:
print('Your number is not a multiple of 17.')
```
*Note:* The `%` represents a modulo operation: it gives the remainder from division of the first argument by the second
*Tip:* You can uncomment this following cell, and learn a good trick to ask the user to insert a number. You can use this instead of assigning a specific value to `x` above.
```
#x = float(input('Insert your number: '))
```
(3) **If-elif-else** statement:
```
a = 3
b = 5
if a > b:
print('a is bigger than b')
elif a < b:
print('a is smaller than b')
else:
print('a is equal to b')
```
*Note:* We can have as many `elif` lines as we want.
##### Exercise
Using `if`, `elif` and `else` statements write a code where you pick a 4-digit number, if it is divisible by 2 and 3 you print: 'Your number is not only divisible by 2 and 3 but also by 6'. If it is divisible by 2 you print: 'Your number is divisible by 2'. If it is divisible by 3 you print: 'Your number is divisible by 3'. Any other option, you print: 'Your number is not divisible by 2, 3 or 6'
## What we've learned
* How to use the Jupyter environnment.
* Playing with strings: accessing values, slicing and string methods.
* Playing with lists: accessing values, slicing and list methods.
* Iteration with `for` statements.
* Conditionals with `if` statements.
## References
1. [Notebook Basics: Modal Editor](http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/Notebook%20Basics.html)
2. ["Indices point between elements,"](https://blog.nelhage.com/2015/08/indices-point-between-elements/) blog post by Nelson Elhage (2015).
3. _Python for Everybody: Exploring Data Using Python 3_ (2016). Charles R. Severance. [PDF available](http://do1.dr-chuck.com/pythonlearn/EN_us/pythonlearn.pdf)
4. _Think Python: How to Think Like a Computer Scientist_ (2012). Allen Downey. Green Tea Press. [PDF available](http://greenteapress.com/thinkpython/thinkpython.pdf)
| github_jupyter |
# Building your First Model in Alteryx
Having built the model from Lesson 3-22 in alteryx, I wanted to try if I can get the same results with Python and statsmodels. It was surprisingly easy to code the model. For the first part, I had to extend the excel data from the given format
<img src="3-22-excel-1.png" width=500>
to a format with dummy variables for the category of Industry.
<img src="3-22-excel-2.png">
As we learned in our lessons, Industry is a nominal value which can't be count or measured. To use it in a linear regression, we have to convert it in at least an ordinal form. You might also recall, that three values given in the nominal data, you only have to add two dummy variables. That is, the third value (the missing value) is the value the regression is compared to. Having extended the given data, I used most of the code from my last example
https://github.com/jegali/DataScience/blob/main/lesson-3-12-multi-ticket-sample.ipynb
and the statsmodel library to calculate the values for the linear regression.
```
# a reference to the pandas library
import pandas as pd
# To visualize the data
import matplotlib.pyplot as plt
# This is new. Let's try a library which does
# the linear regression for us
import statsmodels.api as sm
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
# To visualize the data
import matplotlib.pyplot as plt
# the excel file must be in the same directory as this notebook
# be sure to use the right excel data file.
# Udacity has some files named linear-example-data with different content
# This one is the enriched excel file
excel_file= 'linear-example-data-6.xlsx'
# via pandas, the contents ae read into a variable or data frame named data
data = pd.read_excel(excel_file)
# let's have a look at the data
# print (" Contents of the file ", excel_file)
# print(data)
# We want to calculate the Average number of tickets, so this is our dependent variable
# and has to be put on the Y-Axis
Y = data['Average Number of Tickets']
# We use all other columns as independent values and thus data feed for the X-Axis
# You may notice that column "Manufacturing" is missing. This is the dummy variable I will leave out.
# The column "industry" is also missing - we don't need it as it only contains nominal data.
X = data[['Number of Employees','Value of Contract','Retail','Services']]
# let's to the evaluation with statsmodels
# we have to add a constant to the calculation or
# we do not have a Y-intercept
X = sm.add_constant(X)
# build the model
model = sm.OLS(Y,X).fit()
model_prediction = model.predict(X)
model_details = model.summary()
print(model_details)
```
Here we have the results calculated by Alteryx - you may find them at the end of the video in lesson 3-22:
<img src="3-22-alteryx-1.png">
This solution may not be as convenient as the usage of alteryx as alteryx does a lot of conversion for us, as well as creating the dummy variables - but I think we have a lot more degrees of freedom using python.
```
Intercept = model.params['const']
Number_of_Employees = model.params['Number of Employees']
Value_of_Contract = model.params['Value of Contract']
Industry_Manufacturing=0
Industry_Retail = model.params['Retail']
Industry_Services = model.params['Services']
Help_desk=23
Employees=732
Contract=825000
Tickets_per_Helpdesk = 125
Average_Tickets_per_week = 2800
y = Intercept + Number_of_Employees*Employees + Value_of_Contract*Contract + Industry_Retail + Industry_Services
print("The estimated number of support tickets is:", y)
print("Actual, the Helpdesk handles ",Average_Tickets_per_week, " tickets, which is ", Average_Tickets_per_week/Tickets_per_Helpdesk, " Tickets per Helpdesk agent")
print("The Helpdesk has no more capacities.")
print("Each helpdesk agent can handle ", Tickets_per_Helpdesk, " tickets")
print("To handle ",y," tickets, we need ", y/Tickets_per_Helpdesk, " more agents")
```
| github_jupyter |
# ResNet-50
- Landmark 분류 모델
# GPU 확인
```
import numpy as np
import pandas as pd
import keras
import tensorflow as tf
from IPython.display import display
import PIL
# How to check if the code is running on GPU or CPU?
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
from keras import models, layers
from keras import Input
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers, initializers, regularizers, metrics
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import BatchNormalization, Conv2D, Activation, Dense, GlobalAveragePooling2D, MaxPooling2D, ZeroPadding2D, Add
import os
import matplotlib.pyplot as plt
import numpy as np
import math
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_dir = os.path.join('훈련 클래스 데이터 경로')
val_dir = os.path.join('검증 클래스 데이터 경로')
train_generator = train_datagen.flow_from_directory(train_dir, batch_size=16, target_size=(224, 224), color_mode='rgb')
val_generator = val_datagen.flow_from_directory(val_dir, batch_size=16, target_size=(224, 224), color_mode='rgb')
# 클래스의 총 개수 (학습하는 렌드마크의 장소)
K = 392
input_tensor = Input(shape=(224, 224, 3), dtype='float32', name='input')
def conv1_layer(x):
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(64, (7, 7), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1,1))(x)
return x
def conv2_layer(x):
x = MaxPooling2D((3, 3), 2)(x)
shortcut = x
for i in range(3):
if (i == 0):
x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv3_layer(x):
shortcut = x
for i in range(4):
if(i == 0):
x = Conv2D(128, (1, 1), strides=(2, 2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(128, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv4_layer(x):
shortcut = x
for i in range(6):
if(i == 0):
x = Conv2D(256, (1, 1), strides=(2, 2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv5_layer(x):
shortcut = x
for i in range(3):
if(i == 0):
x = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x)
shortcut = Conv2D(2048, (1, 1), strides=(2, 2), padding='valid')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
x = conv1_layer(input_tensor)
x = conv2_layer(x)
x = conv3_layer(x)
x = conv4_layer(x)
x = conv5_layer(x)
x = GlobalAveragePooling2D()(x)
output_tensor = Dense(K, activation='softmax')(x)
resnet50 = Model(input_tensor, output_tensor)
resnet50.summary()
resnet50.compile(optimizer='adamax',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
# 조기 종료 수행
```
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
earlystop = EarlyStopping(patience=100)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience=30,
verbose=1,
factor=0.5,
min_lr=0.00001)
callbacks=[earlystop, learning_rate_reduction]
```
# 모델 학습
```
history=resnet50.fit_generator(
train_generator,
steps_per_epoch=15,
epochs=100000,
validation_data=val_generator,
validation_steps=5,
callbacks=callbacks
)
```
# 학습 결과
```
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training','validation'], loc = 'upper left')
plt.show()
history.history['val_accuracy'].index(max(history.history['val_accuracy']))
max(history.history['val_accuracy'])
```
- epochs 644 수행 (최고 accuracy : 0.97083336 -552 Eopoch / 최고 val_accuracy : 0.987500011920929-641epoch)
# 모델 저장
```
resnet50.save("res_net50modelWpatience_camp7.h5")
resnet50.save_weights("res_net50modelWpatience_weight_camp7.h5")
```
---
# 모델 테스트
```
from keras.models import load_model
model = load_model('res_net50modelWpatience_camp7.h5')
test_datagen = ImageDataGenerator(rescale=1./255)
test_dir = os.path.join('테스트 클래스 데이터 경로')
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=16, target_size=(224, 224), color_mode='rgb')
```
# 모델 예측 (분류)
# 라벨 인덱싱
```
test_generator.class_indices
labels = {value:key for key, value in train_generator.class_indices.items()}
labels
```
## 테스트 이미지 경로 설정
```
filenames = os.path.join('테스트 이미지 경로')
```
## 이미지 출력 확인
```
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import random
# sample=random.choice(filenames)
image=load_img(filenames, target_size=(224,224))
image
```
## 테스트 이미지 전처리
```
img_to_array(image).shape
image=img_to_array(image)
image=image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
```
test_datagen = ImageDataGenerator(rescale=1./255)
test_dir = os.path.join('C:/Users/USER/Desktop/여기요/kbs')
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=16, target_size=(224, 224), color_mode='rgb')
```
data=img_to_array(image)
data
data.shape
test_datagen = ImageDataGenerator(rescale=1./255)
test_dir = os.path.join('테스트 클래스 데이터 경로')
test_generator = test_datagen.flow_from_directory(test_dir, batch_size=16, target_size=(224, 224), color_mode='rgb')
```
## 예측 결과
```
output=model.predict_generator(test_generator)
print(output)
for out in output:
print(labels.get(out.argmax()))
place=labels.get(out.argmax())
```
| github_jupyter |
# Ens'IA - Session 1 - Introduction to machine learning
```
import keras
from keras.datasets import cifar10
from matplotlib import pyplot as plt
import numpy as np
import math
%matplotlib inline
```
To introduce you to the main notions of Machine Learning, we will present two basic algorithms: KNN and K-MEANS.
They will be applied to the CIFAR 10 dataset, a dataset of 50,000 images belonging to 10 different image classes.
```
# Load the dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Let's observe the dimensions of the dataset
print("Shape of the training samples: {}".format(x_train.shape));
print("Shape of the training targets: {}".format(y_train.shape));
# CIFAR-10 image classes
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# Let's visualize an example and its class
img_index = np.random.randint(0, x_train.shape[0])
plt.imshow(x_train[img_index])
plt.show()
class_indx = y_train[img_index, 0]
print("-> class {} ({})".format(class_indx, classes[class_indx]))
# Grid of examples of each class
n_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
# Randomly select class y samples
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
# Display theses examples in columns
for i, idx in enumerate(idxs):
plt_idx = i * n_classes + y + 1
plt.subplot(samples_per_class, n_classes, plt_idx)
plt.imshow(x_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
```
K-NN (K Nearest Neighbor) is an algorithm that consists in finding in the training dataset the K images that resembles the most the image we want to classify.
To compute the resemblance between two images we can, as a first approximation, simply consider their Euclidean distance (L2 norm). On the K images found, we then look at which class is the most present: we can thus decide the class of our test image.
```
# Resize the images by flattening them to facilitate their manipulation
# We want the following shapes:
# x_train: (50000, 32 * 32 * 3)
# x_test: (10000, 32 * 32 * 3)
# y_train: (training sample count, )
# y_test: (testing sample count, )
x_train =
x_test =
y_train =
y_test =
# Parameter value
k = 20
```
Using all 50 000 training images to classify the 10 000 test images would take .. well .. *some time*. Therefore we're going to use subsets of those:
```
n_imgs_train = 5000
n_imgs_test = 1000
# --- Bruteforce method ---
predictions = np.empty((n_imgs_test, ))
for id_img_test, img_test in enumerate(x_test[:n_imgs_test]):
# "k_nearest" array contains the classes of the k nearest images.
# "distances" array contains the distances between the test image and the k nearest
k_nearest, distances = np.full((k, ), -1), np.full((k, ), float("+inf"))
# We want to fill k_nearest with the classes of the k training images that
# are closest to img_test (w.r. to the euclidean distance).
# Feel free to check out how to compute the euclidean norm of an image
# in the NumPy documentation :)
for id_img_train, img_train in enumerate(x_train[:n_imgs_train]):
# TODO
pass
# Counts the number of items of each class in k_nearest, and puts in
# predictions that which appears most
predictions[id_img_test] = np.argmax(np.bincount(k_nearest))
print("Classified image {}/{} ".format(id_img_test + 1, n_imgs_test))
```
However this code is quite painful to write and read. To simplify and make it
run faster, we'll use the famous package **scikit-learn**.
This package really is *the* toolbox for ML developpers. It includes a lot of ready-to-use learning algorithms. We'll let you read about the KNN implentation by yourself [here](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier).
**Once more, we strongly advise against using all samples from the training and test sets.**
```
n_imgs_train = 2000
n_imgs_test = 500
x_test = x_test[:n_imgs_test]
y_test = y_test[:n_imgs_test]
from sklearn.neighbors import KNeighborsClassifier
# Create a KNN model of parameter k = 7
# TODO !
# Trains the model
# Ready up about the fit() and score() methods !
neigh.fit(x_train[:n_imgs_train], y_train[:n_imgs_train])
# Makes the predictions for the test set and evaluates the accuracy
print(neigh.score(x_test,y_test))
```
Let's visualize a few examples of predicted classes:
```
from itertools import product
n_cols = 4
fig, axes = plt.subplots(nrows=n_cols, ncols=n_cols, figsize=(8, 8))
samples = x_test[:n_cols ** 2]
predictions = neigh.predict(samples)
# Reshapes the samples into the image shape
samples = samples.reshape(samples.shape[0], 32, 32, 3)
for i, j in product(range(n_cols), range(n_cols)):
axes[i, j].imshow(samples[i * n_cols + j])
axes[i, j].axis("off")
axes[i, j].set_title(classes[predictions[i * n_cols + j]])
fig.suptitle("A few predictions...")
plt.show(fig)
```
We obtained a score of 0.29 (this may vary with the amount of training and test samples you used). Moreover you've likely noticed that the algorithm requires quite some time (imagine using the whole datasets !).
We've evaluated the model for $k= 7$, but what's the optimal value for $k$ ?
$k$ is called an **hyperparameter**: it's a value on which the algorithm depends but which is *not* learned.
We'll let you estimate the best value for $k$ on your own:
```
results = []
for k in range(1, 16):
# TODO:
# Train a KNN model of parameter k
# Evaluate its performance on a subset of x_test
# Save the accuracy in results
pass
plt.plot(list(range(1, 16)), results, "-+")
plt.xlabel("K")
plt.ylabel("Accuracy")
```
The K-Means method is a **data clustering** algorithm, among the fundamentals of *unsupervised learning*. This means that the algorithm **does not require any target data**.
The algorithm tries to split the samples into separate groups, which can then be interpreted as classes. It starts by taking $K$ random images. Each of those original images is the *representative* of its class. Then for any training image $I$ we find the *representative* $R$ that is closest to $I$, and place $I$ in the class represented by $R$.
After all training images have been placed in a class, we replace the representatives by the **mean** of their classes.
We repeat this process until a convergence criterion is reached (for example, that the representatives have barely evolved between two successive steps).
```
K_VALUE = 10
min_val = 1
# Initializes the K representatives
K_mean = [255 * np.random.rand(32*32*3) for _ in range(K_VALUE)]
# Precedent values of the representatives
K_save = [255 * np.random.rand(32*32*3) for _ in range(K_VALUE)]
def nearest_K(image):
"""
Returns the class whose representative is closest to the image.
"""
min_dist, min_k = float("+inf"), None
for id_K, K_point in enumerate(K_mean):
dist = np.linalg.norm(image - K_point)
if dist < min_dist:
min_dist, min_k = dist, id_K
return min_k
def mean_point(k, tab):
"""
Replaces the representative of the k_th class with the mean
of tab. tab should be a list of images.
"""
if tab != []:
K_mean[k] = np.mean(tab, axis=0)
def stop_convergence():
"""
Evaluates whether we should stop iterating.
"""
for k in range(10):
# We might want a less strict criterion !
if np.array_equal(K_mean[k], K_save[k]):
return True
return False
#KMEAN
iteration = 0
while not stop_convergence():
iteration += 1
K_nearest = [[] for _ in range(K_VALUE)]
for id_image, image in enumerate(x_train):
K_nearest[nearest_K(image)].append(id_image)
for k in range(K_VALUE):
K_save[k] = K_mean[k]
mean_point(k, K_nearest[k])
print(iteration)
```
Let's try with a built-in function written by some serious Data Scientists:
```
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=10);
kmeans.fit(x_train);
kmeans.score(x_test,y_test)
```
| github_jupyter |
```
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
import numpy as np
import os
import time
# from lossT import sparse_categorical_crossentropy
```
### Parameters
```
# Spatially discretized data into 20 bins
bins=np.arange(-0.9, 1.1, 0.1)
num_bins=len(bins)
# Labels of all possible states in the ranges we considered.
# For 2d systems, this is not the same as the number of representative values.
all_combs = [i for i in range(num_bins)]
vocab=sorted(all_combs)
vocab_size = len(vocab)
# Sequence length and shift in step between past (input) & future (output)
seq_length = 100
shift=1
# Batch size
BATCH_SIZE = 64
# Buffer size to shuffle the dataset.
BUFFER_SIZE = 50000
# Model parameters
embedding_dim = 128
rnn_units = 1024
# Training epochs
EPOCHS=40
# Prediction
num_generate = 2000000
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
def split_input_target(chunk):
"""
split sequences into input and target.
"""
input_text = chunk[:-shift]
target_text = chunk[shift:]
return input_text, target_text
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
rnn(rnn_units,
return_sequences=True,
recurrent_initializer='glorot_uniform',
stateful=True),
tf.keras.layers.Dense(vocab_size)
])
return model
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
# return sparse_categorical_crossentropy(labels, logits, from_logits=True)
def generate_text(pmodel, num_generate, temperature, start_string):
"""
# Define function for generating prediction.
"""
# Converting the start string to numbers (vectorizing)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store the results
text_generated = np.empty(1)
# Here batch size = 1
pmodel.reset_states()
for i in range(num_generate):
predictions = pmodel(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a multinomial distribution to predict the word returned by the model
predictions = predictions / temperature
predicted_id = tf.multinomial(predictions, num_samples=1)[-1,0].numpy()
# We pass the predicted word as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated = np.vstack((text_generated, idx2char[predicted_id].tolist()))
return text_generated
```
### Read data
```
infile = 'DATA_aladip/COLVAR_T450'
phi, psi=np.loadtxt(infile, unpack=True, usecols=(1,2), skiprows=7)
cos_phi=np.cos(phi)
sin_phi=np.sin(phi)
cos_psi=np.cos(psi)
sin_psi=np.sin(psi)
# Spatially discretized data
idx_sin_phi=np.digitize(sin_phi, bins)
idx_sin_psi=np.digitize(sin_psi, bins)
```
### Training data
```
idx_2d=list(idx_sin_phi[:10000])
text = idx_2d
char2idx = {u:i for i, u in enumerate(vocab)} # Mapping from characters to indices
idx2char = np.array(vocab)
text_as_int = np.array([char2idx[c] for c in text])
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length+shift, drop_remainder=True)
dataset = sequences.map(split_input_target)
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
```
### Use the same trajectory as the validation data
```
idx_sin_phi_v=np.digitize(sin_phi, bins)
idx_2dv=list(idx_sin_phi_v)
vali = idx_2dv[:200000]
vali_as_int = np.array([char2idx[c] for c in vali])
# Create validation examples/targets
vali_dataset = tf.data.Dataset.from_tensor_slices(vali_as_int)
sequences = vali_dataset.batch(seq_length+shift, drop_remainder=True)
vdataset = sequences.map(split_input_target)
vdataset = vdataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
```
### Read the same trajectory and use the first few to activate the model for prediction
```
idx_sin_phi_p=np.digitize(sin_phi, bins)
idx_2dp=list(idx_sin_phi_p)
text4activation = idx_2dp[:100000]
```
### Decide whether to use GPU and build model of training
```
if tf.test.is_gpu_available():
rnn = tf.keras.layers.CuDNNLSTM
else:
import functools
rnn = functools.partial(
tf.keras.layers.LSTM, recurrent_activation='sigmoid')
model = build_model(vocab_size = vocab_size,
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
print(model.summary())
model.compile(optimizer = tf.train.AdamOptimizer(), loss = loss)
```
### Save checkpoint
```
# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True)
```
### Training
```
examples_per_epoch = len(text)//(seq_length+shift)
steps_per_epoch = examples_per_epoch//BATCH_SIZE
v_examples=len(vali_as_int)//(seq_length+shift)
v_steps_per_epoch=v_examples//BATCH_SIZE
history = model.fit(dataset.repeat(EPOCHS), epochs=EPOCHS, steps_per_epoch=steps_per_epoch, validation_data=vdataset.repeat(EPOCHS), validation_steps=v_steps_per_epoch, callbacks=[checkpoint_callback])
# Rebuild model with batch_size=1:
pmodel = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
pmodel.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
pmodel.build(tf.TensorShape([1, None]))
print(pmodel.summary())
# Print the length of seed for activating the model
print('length of seed: {}'.format(len(text4activation)))
```
### Generate prediction sequentially
```
start0 = time.time()
prediction=generate_text(pmodel, num_generate, temperature, start_string=text4activation)
print ('Time taken for total {} sec\n'.format(time.time() - start0))
```
### Save prediction
```
np.savetxt('prediction',prediction[1:])
```
| github_jupyter |
# Introduction to Python SQL Libraries
Source: https://realpython.com/python-sql-libraries/#deleting-table-records
Tools: DB Brower for SQlite, website https://sqlitebrowser.org/
All software applications interact with data, most commonly through a database management system (DBMS). Some programming languages come with modules that you can use to interact with a DBMS, while others require the use of third-party packages. In this tutorial, you’ll explore the different Python SQL libraries that you can use. You’ll develop a straightforward application to interact with SQLite, MySQL, and PostgreSQL databases.
In this tutorial, you’ll learn how to:
- Connect to different database management systems with Python SQL libraries
- Interact with SQLite, MySQL, and PostgreSQL databases
- Perform common database queries using a Python application
- Develop applications across different databases using a Python script
To get the most out of this tutorial, you should have knowledge of basic Python, SQL, and working with database management systems. You should also be able to download and import packages in Python and know how to install and run different database servers locally or remotely.
**This notebook will use the SQLite only, because this is the simplest method on your device.**
Understanding the Database Schema
In this tutorial, you’ll develop a very small database for a social media application. The database will consist of four tables:
- users
- posts
- comments
- likes
A high-level diagram of the database schema is shown below:

## Using Python SQL Libraries to Connect to a Database
Before you interact with any database through a Python SQL Library, you have to connect to that database. In this section, you’ll see how to connect to SQLite, MySQL, and PostgreSQL databases from within a Python application.
> **Note**: You’ll need MySQL and PostgreSQL servers up and running before you execute the scripts in the MySQL and PostgreSQL database sections. For a quick intro on how to start a MySQL server, check out the MySQL section of Starting a Django Project. To learn how to create a database in PostgreSQL, check out the Setting Up a Database section of Preventing SQL Injection Attacks With Python.
It’s recommended that you create three different Python files, so you have one for each of the three databases. You’ll execute the script for each database in its corresponding file.
## SQLite
[SQLite](https://www.sqlite.org/docs.html) is probably the most straightforward database to connect to with a Python application since you don’t need to install any external Python SQL modules to do so. By default, your Python installation contains a Python SQL library named sqlite3 that you can use to interact with an SQLite database.
What’s more, SQLite databases are serverless and self-contained, since they read and write data to a file. This means that, unlike with MySQL and PostgreSQL, you don’t even need to install and run an SQLite server to perform database operations!
### Create / Connect to SQLite Database
Here’s how you use sqlite3 to connect to an SQLite database in Python:
```
import sqlite3
from sqlite3 import Error
def create_connection(path):
connection = None
try:
connection = sqlite3.connect(path)
print("Connection to SQLite DB successful")
except Error as e:
print(f"The error '{e}' occurred")
return connection
```
Here’s how this code works:
- Lines 1 and 2 import sqlite3 and the module’s Error class.
- Line 4 defines a function .create_connection() that accepts the path to the SQLite database.
- Line 7 uses .connect() from the sqlite3 module and takes the SQLite database path as a parameter. If the database exists at the specified location, then a connection to the database is established. Otherwise, a new database is created at the specified location, and a connection is established.
- Line 8 prints the status of the successful database connection.
- Line 9 catches any exception that might be thrown if .connect() fails to establish a connection.
- Line 10 displays the error message in the console.
`sqlite3.connect(path)` returns a connection object, which is in turn returned by create_connection(). This connection object can be used to execute queries on an SQLite database. The following script creates a connection to the SQLite database:
```
connection = create_connection("sm_app.sqlite")
```
Once you execute the above script, you’ll see that a database file sm_app.sqlite is created in the root directory. Note that you can change the location to match your setup.
## SQLite Creating Tables
In the previous section, you saw how to connect to SQLite, MySQL, and PostgreSQL database servers using different Python SQL libraries. You created the sm_app database on all three database servers. In this section, you’ll see how to create tables inside these three databases.
As discussed earlier, you’ll create four tables:
- users
- posts
- comments
- likes
You’ll start with SQLite.
To execute queries in SQLite, use cursor.execute(). In this section, you’ll define a function execute_query() that uses this method. Your function will accept the connection object and a query string, which you’ll pass to cursor.execute().
.execute() can execute any query passed to it in the form of string. You’ll use this method to create tables in this section. In the upcoming sections, you’ll use this same method to execute update and delete queries as well.
> **Note**: This script should be executed in the same file where you created the connection for your SQLite database.
Here’s your function definition:
```
def execute_query(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
connection.commit()
print("Query executed successfully")
except Error as e:
print(f"The error '{e}' occurred")
```
This code tries to execute the given query and prints an error message if necessary.
Next, write your **query**:
```
create_users_table = """
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
age INTEGER,
gender TEXT,
nationality TEXT
);
"""
```
This says to create a table users with the following five columns:
- id
- name
- age
- gender
- nationality
Finally, you’ll call execute_query() to create the table. You’ll pass in the connection object that you created in the previous section, along with the create_users_table string that contains the create table query:
```
execute_query(connection, create_users_table)
```
The following query is used to create the posts table:
```
create_posts_table = """
CREATE TABLE IF NOT EXISTS posts(
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
description TEXT NOT NULL,
user_id INTEGER NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
"""
```
Since there’s a one-to-many relationship between users and posts, you can see a foreign key user_id in the posts table that references the id column in the users table. Execute the following script to create the posts table:
```
execute_query(connection, create_posts_table)
```
Finally, you can create the comments and likes tables with the following script:
```
create_comments_table = """
CREATE TABLE IF NOT EXISTS comments (
id INTEGER PRIMARY KEY AUTOINCREMENT,
text TEXT NOT NULL,
user_id INTEGER NOT NULL,
post_id INTEGER NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id) FOREIGN KEY (post_id) REFERENCES posts (id)
);
"""
create_likes_table = """
CREATE TABLE IF NOT EXISTS likes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER NOT NULL,
post_id integer NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id) FOREIGN KEY (post_id) REFERENCES posts (id)
);
"""
execute_query(connection, create_comments_table)
execute_query(connection, create_likes_table)
```
You can see that creating tables in SQLite is very similar to using raw SQL. All you have to do is store the query in a string variable and then pass that variable to cursor.execute().
## SQLite Inserting Records
In the previous section, you saw how to create tables in your SQLite, MySQL, and PostgreSQL databases by using different Python SQL modules. In this section, you’ll see how to insert records into your tables.
To insert records into your SQLite database, you can use the same execute_query() function that you used to create tables. First, you have to store your INSERT INTO query in a string. Then, you can pass the connection object and query string to execute_query(). Let’s insert five records into the users table:
```
create_users = """
INSERT INTO
users (name, age, gender, nationality)
VALUES
('James', 25, 'male', 'USA'),
('Leila', 32, 'female', 'France'),
('Brigitte', 35, 'female', 'England'),
('Mike', 40, 'male', 'Denmark'),
('Elizabeth', 21, 'female', 'Canada');
"""
execute_query(connection, create_users)
```
Since you set the id column to auto-increment, you don’t need to specify the value of the id column for these users. The users table will auto-populate these five records with id values from 1 to 5.
Now insert six records into the posts table:
```
create_posts = """
INSERT INTO
posts (title, description, user_id)
VALUES
("Happy", "I am feeling very happy today", 1),
("Hot Weather", "The weather is very hot today", 2),
("Help", "I need some help with my work", 2),
("Great News", "I am getting married", 1),
("Interesting Game", "It was a fantastic game of tennis", 5),
("Party", "Anyone up for a late-night party today?", 3);
"""
execute_query(connection, create_posts)
```
It’s important to mention that the user_id column of the posts table is a foreign key that references the id column of the users table. This means that the user_id column must contain a value that already exists in the id column of the users table. If it doesn’t exist, then you’ll see an error.
Similarly, the following script inserts records into the comments and likes tables:
```
create_comments = """
INSERT INTO
comments (text, user_id, post_id)
VALUES
('Count me in', 1, 6),
('What sort of help?', 5, 3),
('Congrats buddy', 2, 4),
('I was rooting for Nadal though', 4, 5),
('Help with your thesis?', 2, 3),
('Many congratulations', 5, 4);
"""
create_likes = """
INSERT INTO
likes (user_id, post_id)
VALUES
(1, 6),
(2, 3),
(1, 5),
(5, 4),
(2, 4),
(4, 2),
(3, 6);
"""
execute_query(connection, create_comments)
execute_query(connection, create_likes)
```
In both cases, you store your INSERT INTO query as a string and execute it with execute_query().
## SQLite Selecting Records
In this section, you’ll see how to select records from database tables using the different Python SQL modules. In particular, you’ll see how to perform SELECT queries on your SQLite, MySQL, and PostgreSQL databases.
To select records using SQLite, you can again use cursor.execute(). However, after you’ve done this, you’ll need to call .fetchall(). This method returns a list of tuples where each tuple is mapped to the corresponding row in the retrieved records.
To simplify the process, you can create a function execute_read_query():
```
def execute_read_query(connection, query):
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
return result
except Error as e:
print(f"The error '{e}' occurred")
```
This function accepts the connection object and the SELECT query and returns the selected record.
### SELECT
Let’s now select all the records from the users table:
```
select_users = "SELECT * from users"
users = execute_read_query(connection, select_users)
for user in users:
print(user)
```
---
In the above script, the SELECT query selects all the users from the users table. This is passed to the execute_read_query(), which returns all the records from the users table. The records are then traversed and printed to the console.
Note: It’s not recommended to use SELECT * on large tables since it can result in a large number of I/O operations that increase the network traffic.
The output of the above query looks like this:
```bash
(1, 'James', 25, 'male', 'USA')
(2, 'Leila', 32, 'female', 'France')
(3, 'Brigitte', 35, 'female', 'England')
(4, 'Mike', 40, 'male', 'Denmark')
(5, 'Elizabeth', 21, 'female', 'Canada')
```
In the same way, you can retrieve all the records from the posts table with the below script:
```
select_posts = "SELECT * FROM posts"
posts = execute_read_query(connection, select_posts)
for post in posts:
print(post)
```
The result shows all the records in the posts table.
### JOIN
You can also execute complex queries involving JOIN operations to retrieve data from two related tables. For instance, the following script returns the user ids and names, along with the description of the posts that these users posted:
```
select_users_posts = """
SELECT
users.id,
users.name,
posts.description
FROM
posts
INNER JOIN users ON users.id = posts.user_id
"""
users_posts = execute_read_query(connection, select_users_posts)
for users_post in users_posts:
print(users_post)
```
You can also select data from three related tables by implementing multiple JOIN operators. The following script returns all posts, along with the comments on the posts and the names of the users who posted the comments:
```
select_posts_comments_users = """
SELECT
posts.description as post,
text as comment,
name
FROM
posts
INNER JOIN comments ON posts.id = comments.post_id
INNER JOIN users ON users.id = comments.user_id
"""
posts_comments_users = execute_read_query(
connection, select_posts_comments_users
)
for posts_comments_user in posts_comments_users:
print(posts_comments_user)
```
You can see from the output that the column names are not being returned by .fetchall(). To return column names, you can use the .description attribute of the cursor object. For instance, the following list returns all the column names for the above query:
```
cursor = connection.cursor()
cursor.execute(select_posts_comments_users)
cursor.fetchall()
column_names = [description[0] for description in cursor.description]
print(column_names)
```
### WHERE
Now you’ll execute a SELECT query that returns the post, along with the total number of likes that the post received:
```
select_post_likes = """
SELECT
description as Post,
COUNT(likes.id) as Likes
FROM
likes,
posts
WHERE
posts.id = likes.post_id
GROUP BY
likes.post_id
"""
post_likes = execute_read_query(connection, select_post_likes)
for post_like in post_likes:
print(post_like)
```
By using a WHERE clause, you’re able to return more specific results.
### SQLite Updating Table Records
In the last section, you saw how to select records from SQLite, MySQL, and PostgreSQL databases. In this section, you’ll cover the process for updating records using the Python SQL libraries for SQLite, PostgresSQL, and MySQL.
Updating records in SQLite is pretty straightforward. You can again make use of execute_query(). As an example, you can update the description of the post with an id of 2. First, SELECT the description of this post:
```
select_post_description = "SELECT description FROM posts WHERE id = 2"
post_description = execute_read_query(connection, select_post_description)
for description in post_description:
print(description)
update_post_description = """
UPDATE
posts
SET
description = "The weather has become pleasant now"
WHERE
id = 2
"""
execute_query(connection, update_post_description)
```
Now, if you execute the SELECT query again, you should see the following result:
```
post_description = execute_read_query(connection, select_post_description)
for description in post_description:
print(description)
```
### SQLite Deleting Table Records
In this section, you’ll see how to delete table records using the Python SQL modules for SQLite, MySQL, and PostgreSQL databases. The process of deleting records is uniform for all three databases since the DELETE query for the three databases is the same.
You can again use execute_query() to delete records from YOUR SQLite database. All you have to do is pass the connection object and the string query for the record you want to delete to execute_query(). Then, execute_query() will create a cursor object using the connection and pass the string query to cursor.execute(), which will delete the records.
As an example, try to delete the comment with an id of 5:
```
delete_comment = "DELETE FROM comments WHERE id = 5"
execute_query(connection, delete_comment)
```
Now, if you select all the records from the comments table, you’ll see that the fifth comment has been deleted.
```
select_posts = "SELECT * FROM comments"
posts = execute_read_query(connection, select_posts)
for post in posts:
print(post)
```
## Conclusion
In this tutorial, you’ve learned how to use three common Python SQL libraries. sqlite3, mysql-connector-python, and psycopg2 allow you to connect a Python application to SQLite, MySQL, and PostgreSQL databases, respectively.
Now you can:
Interact with SQLite, MySQL, or PostgreSQL databases
Use three different Python SQL modules
Execute SQL queries on various databases from within a Python application
However, this is just the tip of the iceberg! There are also Python SQL libraries for object-relational mapping, such as SQLAlchemy and Django ORM, that automate the task of database interaction in Python. You’ll learn more about these libraries in other tutorials in our Python databases section.
## Appdendix
### MySQL
Unlike SQLite, there’s no default Python SQL module that you can use to connect to a MySQL database. Instead, you’ll need to install a Python SQL driver for MySQL in order to interact with a MySQL database from within a Python application. One such driver is mysql-connector-python. You can download this Python SQL module with pip:
```bash
$ pip install mysql-connector-python
```
Note that MySQL is a server-based database management system. One MySQL server can have multiple databases. Unlike SQLite, where creating a connection is tantamount to creating a database, a MySQL database has a two-step process for database creation:
1. Make a connection to a MySQL server.
2. Execute a separate query to create the database.
Define a function that connects to the MySQL database server and returns the connection object:
```python
import mysql.connector
from mysql.connector import Error
def create_connection(host_name, user_name, user_password):
connection = None
try:
connection = mysql.connector.connect(
host=host_name,
user=user_name,
passwd=user_password
)
print("Connection to MySQL DB successful")
except Error as e:
print(f"The error '{e}' occurred")
return connection
connection = create_connection("localhost", "root", "")
```
In the above script, you define a function create_connection() that accepts three parameters:
1. host_name
2. user_name
3. user_password
The mysql.connector Python SQL module contains a method .connect() that you use in line 7 to connect to a MySQL database server. Once the connection is established, the connection object is returned to the calling function. Finally, in line 18 you call create_connection() with the host name, username, and password.
So far, you’ve only established the connection. The database is not yet created. To do this, you’ll define another function create_database() that accepts two parameters:
1. connection is the connection object to the database server that you want to interact with.
2. query is the query that creates the database.
Here’s what this function looks like:
```
def create_database(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
print("Database created successfully")
except Error as e:
print(f"The error '{e}' occurred")
```
To execute queries, you use the cursor object. The query to be executed is passed to cursor.execute() in string format.
Create a database named sm_app for your social media app in the MySQL database server:
```python
create_database_query = "CREATE DATABASE sm_app"
create_database(connection, create_database_query)
```
Now you’ve created a database sm_app on the database server. However, the connection object returned by the create_connection() is connected to the MySQL database server. You need to connect to the sm_app database. To do so, you can modify create_connection() as follows:
```
def create_connection(host_name, user_name, user_password, db_name):
connection = None
try:
connection = mysql.connector.connect(
host=host_name,
user=user_name,
passwd=user_password,
database=db_name
)
print("Connection to MySQL DB successful")
except Error as e:
print(f"The error '{e}' occurred")
return connection
```
You can see in line 8 that create_connection() now accepts an additional parameter called db_name. This parameter specifies the name of the database that you want to connect to. You can pass in the name of the database you want to connect to when you call this function:
```python
connection = create_connection("localhost", "root", "", "sm_app")
```
The above script successfully calls create_connection() and connects to the sm_app database.
| github_jupyter |
# Advanced SQL II: Subqueries
_**Author**: Boom Devahastin Na Ayudhya_
***
## Additional Learning Tools after the course
The dataset I've used for this lesson is from [Udemy's Master SQL for Data Science](https://www.udemy.com/master-sql-for-data-science/learn/lecture/9790570#overview) course. In the repo, you should copy and paste the database construction queries from the `employees_udemy.txt` script into PostgreSQL if you wish to explore the dataset on your own.
## What is a subquery?
Exactly what it sounds like: literally inception because **it's a query within a query**!
...What?! Sounds complicated...why do we need this?
**Motivation:** The `employees` table has a department column amongst other employee-specific information. The `departments` table shows information on each of the departments. However, some departments have recently turned over their entire team and so there may not be any employees listed in those departments. How can we figure out which departments did this?
TL;DR - How do we determine which departments exist in the `employees` table but not the `departments` table? Think through the logic in English first before you attempt to convert it to code.
_**DO NOT USE JOINS - we'll talk about why not in a bit!**_
_Answer:_
```MySQL
```
### Subqueries in `WHERE`
How did we think about this?
- The output of a subquery is a "dataframe" (or rather a subset of a table).
- If we choose to extract just one column from a table using a query, we essentially have a list
- We've written WHERE statements before with `IN` and `NOT IN` and compared results to a list
- Connecting the dots: we can replace the list in a WHERE clause with a subquery to make things more dynamic
**Exercise 1:** Write a query that returns all information about employees who work in the Electronics division.
_Answer:_
```MySQL
```
**Exercise 2:** Switching back to tables in the the `GoT_schema.txt` file now. Write a query that shows the name of characters (in the `people` table) who are not from any of the great noble houses (in the `houses` table).
_Answer:_
```MySQL
```
**Exercise 3:** You might have noticed there are some noble houses that do not have any bannermen. Write a query that shows the name of the great noble houses without any bannermen (vassal houses) serving them.
_Answer:_
```MySQL
```
_**Short Note on Efficient Queries**_
Some `JOIN` commands (especially `INNER JOIN`) can be very computationally intensive. This is why sometimes we would prefer to write subqueries.
_Example:_ Without using any kind of`JOIN`, find all employees who work in the Asia and Canada regions who make more than 13,000 dollars.
```MySQL
SELECT * from employees
WHERE salary > 13000
AND region_id IN (SELECT region_id
FROM regions
WHERE country IN ('Asia', 'Canada'))
```
### Subqueries in `SELECT`
Subqueries can show up almost anywhere in the query! If we want to compare values to a single value, we could include the result of a subquery in the `SELECT` clause. This is especially important when you want to construct some sort of **_benchmark_** (e.g. how much you have missed/beaten a sales target by, what the active returns of a mutual fund is compared to its benchmark index, etc.)
_Example:_ Show me the first_name, last_name, and salary of all employees next to the salary of the employee who earns the least at the company.
```MySQL
SELECT first_name,
department,
salary,
(SELECT MIN(salary) FROM employees) AS "lowest_salary"
FROM employees
```
#### _Short Note on Order of Execution in SQL Queries_
Across clauses, there is a sequence that queries follow. SQL queries will run FROM first, then WHERE and other filters, and then SELECT last. So in the exercise **below**, the `lowest_salary` is already going to be calculated based on Asia and Canada employees because WHERE executes before SELECT
However, within a clause (e.g. within SELECT) everything runs _**simultaneously**_, not sequentially! So you cannot use `lowest_salary` in say a calculation for "difference" -- you will need to use the actual subquery in the calculation.
**Exercise 4:** Among all employees who work in Asia and Canada, calculate the how much less each employee makes compared to the highest earner across those regions.
_Answer:_
```MySQL
```
### Subqueries using `ALL` keyword
**Motivation:** We've learned convenient functions like `MAX` and `MIN` which helps us find the highest or lowest value in a field/column.
```MySQL
SELECT MAX(salary) FROM employees
```
What if your interviewer asked you to find the highest salary of all employees in the company **WITHOUT** using any built in SQL functions though?
```MySQL
SELECT salary
FROM employees
WHERE salary >= ALL(SELECT salary
FROM employees)
```
Interview aside though, here's a more practical problem. You're not going to be able to use MAX or MIN when it comes to this situation:
**Exercise 5:** Find the mode salar(ies) of all employees in the company.
_Answer:_
```MySQL
```
### Challenge Interview Question \#1
A retailer store information about all of its products in a `Products` table, which contain the following columns:
- `id`: the unique identification number for the product
- `name`: the name the product
- `manuf_id`: the identification number of the manufacturer we acquired this from
- `grade`: the quality score on a scale of 1 (bad) to 100 (good) of the product according to reviews.
Write a SQL query that returns the names of all products (there are ties) that have the **_SECOND_ lowest** score.
_Answer:_
### Challenge Interview Question \#2
A table called `eval` has 3 columns: <br>
- case_id (int) <br>
- timestamp (datetime) <br>
- score (int) <br>
But case_id is not unique. For a given case_id, there may be scores on different dates.
Write a query to get the score for each case_id at most recent date.
_Answer:_
```MySQL
```
**_Need some help?_** While it is probably better that you do this under interview conditions (i.e. no help from pgAdmin), the option is there if you want to use this code to construct the database and visualize the outputs of your queries
```MySQL
create table eval (
case_id int,
timestamp date,
score int);
insert into eval values (123, '2019-05-09', 7);
insert into eval values (123, '2019-05-03', 6);
insert into eval values (456, '2019-05-07', 1);
insert into eval values (789, '2019-05-06', 3);
insert into eval values (456, '2019-05-02', 9);
insert into eval values (789, '2019-05-08', 2);```
| github_jupyter |
#Spectral clustering para documentos
El clustering espectral es una técnica de agrupamiento basada en la topología de gráficas. Es especialmente útil cuando los datos no son convexos o cuando se trabaja, directamente, con estructuras de grafos.
##Preparación d elos documentos
Trabajaremos con documentos textuales. Estos se limpiarán y se convertirán en vectores. Posteriormente, podremos aplicar el método de spectral clustering.
```
#Se importan las librerías necesarias
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
```
La librería de Natural Language Toolkit (nltk) proporciona algunos corpus con los que se puede trabajar. Por ejemplo, el cropus Gutenberg (https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html) del que usaremos algunos datos. Asimismo, obtendremos de esta librería herramientas de preprocesamiento: stemmer y lista de stopwords.
```
import nltk
#Descarga del corpus
nltk.download('gutenberg')
#Descarga de la lista de stopwords
nltk.download('stopwords')
from nltk.corpus import gutenberg
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
```
Definimos los nombres de los archivos (ids) y la lista de paro
```
#Obtiene ids de los archivos del corpus gutenberg
doc_labels = gutenberg.fileids()
#Lista de stopwords para inglés
lista_paro = stopwords.words('english')
```
Definiremos una función que se encargará de preprocesar los textos. Se eliminan símbolos, se quitan elementos de la lista de stopwords y se pasa todo a minúsculas.
```
def preprocess(document):
#Lista que guarda archivos limpios
text = []
for word in document:
#Minúsculas
word = word.lower()
#Elimina stopwords y símbolos
if word not in lista_paro and word.isalpha() == True:
#Se aplica stemming
text.append(PorterStemmer().stem(word))
return text
```
Por cada documento, obtenemos la lista de sus palabras (stems) aplicando un preprocesado. Cada documento, entonces, es de la forma $d_i = \{w_1, w_2, ..., w_{N_i}\}$, donde $w_k$ son los stems del documento.
```
docs = []
for doc in doc_labels:
#Lista de palabras del documentos
arx = gutenberg.words(doc)
#Aplica la función de preprocesado
arx_prep = preprocess(arx)
docs.append(arx_prep)
#Imprime el nombre del documento, su longitud original y su longitud con preproceso
print(doc,len(arx), len(arx_prep))
```
Posteriormente, convertiremos cada documento en un vector en $\mathbb{R}^d$. Para esto, utilizaremos el algoritmo Doc2Vec.
```
#Dimensión de los vectores
dim = 300
#tamaño de la ventana de contexto
windows_siz = 15
#Indexa los documentos con valores enteros
documents = [TaggedDocument(doc_i, [i]) for i, doc_i in enumerate(docs)]
#Aplica el modelo de Doc2Vec
model = Doc2Vec(documents, vector_size=dim, window=windows_siz, min_count=1)
#Matriz de datos
X = np.zeros((len(doc_labels),dim))
for j in range(0,len(doc_labels)):
#Crea la matriz con los vectores de Doc2Vec
X[j] = model.docvecs[j]
print(X)
```
###Visualización
```
#Función para plotear
def plot_words(Z,ids,color='blue'):
#Reduce a dos dimensiones con PCA
Z = PCA(n_components=2).fit_transform(Z)
r=0
#Plotea las dimensiones
plt.scatter(Z[:,0],Z[:,1], marker='o', c=color)
for label,x,y in zip(ids, Z[:,0], Z[:,1]):
#Agrega las etiquetas
plt.annotate(label, xy=(x,y), xytext=(-1,1), textcoords='offset points', ha='center', va='bottom')
r+=1
plot_words(X, doc_labels)
plt.show()
```
##Aplicación de spectral clustering
Una vez obtenidos los vectores d elos documentos. Podemos aplicar el algoritmo de spectral clustering. Lo primero que tenemos que hacer es crear un grafo a partir de los documentos.
```
#Importamos las librerías necesarias
from scipy import linalg
from itertools import combinations
from operator import itemgetter
import pandas as pd
import networkx as nx
```
Necesitamos definir un graph kernel:
```
#Kernel gaussiano
kernel = lambda weight: np.exp(-(weight**2)/2)
#Kernel euclidiano inverso
#kernel = lambda weight: 1./(1.+weight**2)
#Número de nodos
n = X.shape[0]
#Matriz de adyacencia del grafo
M = np.zeros((n,n))
for i,x in enumerate(X):
#Se hará una lista de candidatos
candidates_for_x = {}
for j,y in enumerate(X):
#Calcula la distancia euclideana
dist = linalg.norm(x-y)
#Determina los candidatos
candidates_for_x[j] = dist
#Criterio de selección
if dist < 3:
M[i,j] = kernel(dist)
#Se obtienen los k vecinos más cercanos
#closest_neighs = sorted(candidates_for_x.items(), key=itemgetter(1), reverse=False)[:3]
#for neigh, weight in closest_neighs:
#Se llenan las columnas de la matriz, esta es simétrica
#M[i,neigh] = kernel(weight)
#M[neigh,i] = kernel(weight)
#Elimina la diagonal (equivale a eliminar lazos)
M = M-np.identity(n)
#Comprueba que es simétrica
print((M == M.T).all())
print(M.shape)
```
####Visualización del grafo
Visualización en tabla:
```
df = pd.DataFrame(M, index=doc_labels, columns=doc_labels)
print(df.to_string())
```
Visualización en red:
```
#Indexado de labels
edges = {i:dat for i,dat in enumerate(doc_labels)}
nx.draw_networkx(nx.from_numpy_array(M), with_labels=True, labels=edges, font_size=8)
```
Obtenido el grafo, se obtienen la matriz Laplaciana, así como la descomposición espectral de ésta. Además, se ordena los eigen.
```
#Se obtiene la matriz Laplaciana
L = np.diag(M.sum(0)) - M
#Se calculan los eigen valores y eigen vectores de L
eig_vals, eig_vecs = linalg.eig(L)
#Se ordenan con respecto a los eigenvalores
values = sorted(zip(eig_vals.real,eig_vecs), key=itemgetter(0))
#Obtenemos ambos eigens
vals, vecs = zip(*values)
#Se crea una matriz de eigenvectores
matrix = np.array(vecs)
#Visualización de eigenvalores
plt.plot(np.array(vals),'o')
plt.show()
```
Finalmente, obtenemos la matriz a partir de los $k$ eigenvectores con eigenvalores más pequeños.
```
#Dimensión de los vectores resultantes
vec_siz = 2
#Obtiene la matriz
M_hat = matrix[1:vec_siz+1].T.real
print(M_hat.shape)
plot_words(M_hat,doc_labels)
```
Finalmente, aplicamos el algoritmo de k-means para clusterizar los datos.
```
from sklearn.cluster import KMeans
#Número de centroides
centroids=5
#Aplicación de kmenas
kmeans = KMeans(n_clusters=centroids).fit(M_hat)
#Obtención de los clusters
pred_lables = kmeans.predict(M_hat)
#Plot de clusters
plot_words(M_hat, doc_labels, color=pred_lables)
plt.show()
```
| github_jupyter |
# Qiskit Aer: Noise Transformation
The latest version of this notebook is available on https://github.com/Qiskit/qiskit-iqx-tutorials.
## Introduction
This notebook shows how to use the Qiskit Aer utility functions `approximate_quantum_error` and `approximate_noise_model` to transform quantum noise channels into a different, more suitable, noise channel.
Our guiding example is Clifford simulation. A Clifford simulator can efficiently simulate quantum computations which include gates only from a limited, non-universal set of gates (the Clifford gates). Not all quantum noises can be added to such simulations; hence, we aim to find a "close" noise channel which can be simulated in a Clifford simulator.
We begin by importing the transformation functions from the Aer provider utilities
```
from qiskit.providers.aer.utils import approximate_quantum_error
from qiskit.providers.aer.utils import approximate_noise_model
```
The name "approximate" suggests that these functions generate the closest (in the Hilbert-Schmidt metric) error possible to the given one.
We demonstrate the approximation using several standard error channels defined in Qiskit.
```
import numpy as np
# Import Aer QuantumError functions that will be used
from qiskit.providers.aer.noise import amplitude_damping_error
from qiskit.providers.aer.noise import reset_error
from qiskit.providers.aer.noise import pauli_error
```
## Overview
A 1-qubit quantum channel is a function $\mathcal{C}:\mathbb{C}^{2\times2}\to\mathbb{C}^{2\times2}$ mapping density operators to density operators (to ensure the image is a density operator $\mathcal{C}$ is required to be completely positive and trace preserving, **CPTP**).
Given quantum channels $\mathcal{E}_{1},\dots,\mathcal{E}_{r}$, and probabilities $p_1, p_2, \dots, p_r$ such that $0\le p_i \le 1$ and $p_1+\dots +p_r = 1$, a new quantum channel $\mathcal{C}_\mathcal{E}$ can be constructed such that $\mathcal{C}_\mathcal{E}(\rho)$ has the effect of choosing the channel $\mathcal{E}_i$ with probability $p_i$ and applying it to $\rho$.
The noise transformation function solves the following optimization problem: Given a channel $\mathcal{C}$ ("goal") and a list of channels $\mathcal{E}_{1},\dots,\mathcal{E}_{r}$, find the probabilities $p_1, p_2, \dots, p_r$ minimizing $D(\mathcal{C}, \mathcal{C}_\mathcal{E})$ according to some distance metric $D$ (the Hilbert-Schmidt metric is currently used).
To ensure the approximation is honest, in the sense that the approximate error channel serves as an "upper bound" for the actual error channel, we add the additional honesty constraint:
$$\text{F}(I,\mathcal{C})\ge F(I,\mathcal{C}_\mathcal{E})$$
Where $\text{F}$ is a fidelity measure and $I$ is the identity channel.
## Example: Approximating amplitude damping noise with reset noise.
**Amplitude damping** noise is described by a single parameter $0\le \gamma \le 1$ and given by the Kraus operators:
$$\left(\begin{array}{cc}
1 & 0\\
0 & \sqrt{1-\gamma}
\end{array}\right),\left(\begin{array}{cc}
0 & \sqrt{\gamma}\\
0 & 0
\end{array}\right)$$
**Reset** error is described by probabilities $0\le p, q\le 1$ such that $p+q\le 1$ and given by the Kraus operators:
$$\left(\begin{array}{cc}
\sqrt{p} & 0\\
0 & 0
\end{array}\right),\left(\begin{array}{cc}
0 & \sqrt{p}\\
0 & 0
\end{array}\right),\left(\begin{array}{cc}
0 & 0\\
\sqrt{q} & 0
\end{array}\right),\left(\begin{array}{cc}
0 & 0\\
0 & \sqrt{q}
\end{array}\right)$$
This can be thought of as "resetting" the quantum state of the affected qubit to $\left|0\right\rangle$ with probability $p$, to $\left|1\right\rangle$ with probability $q$, and do nothing with probability $1-(p+q)$.
It is not too difficult to determine analytically the best values of $p,q$ to approximate a $\gamma$ amplitude damping channel, see the details __[here](https://arxiv.org/abs/1207.0046)__. The best approximation is:
$$p=\frac{1}{2}\left(1+\gamma-\sqrt{1-\gamma}\right), q=0$$
```
gamma = 0.23
error = amplitude_damping_error(gamma)
results = approximate_quantum_error(error, operator_string="reset")
```
We only needed the above code to perform the actual approximation.
```
print(results)
p = (1 + gamma - np.sqrt(1 - gamma)) / 2
q = 0
print("")
print("Expected results:")
print("P(0) = {}".format(1-(p+q)))
print("P(1) = {}".format(p))
print("P(2) = {}".format(q))
```
We got the results predicted analytically.
## Different input types
The approximation function is given two inputs: The error channel to approximate, and a set of error channels that can be used in constructing the approximation.
The **error channel** to approximate can be given as any input that can be converted to the `QuantumError` object.
As an example, we explicitly construct the Kraus matrices of amplitude damping and pass to the same approximation function as before:
```
gamma = 0.23
K0 = np.array([[1,0],[0,np.sqrt(1-gamma)]])
K1 = np.array([[0,np.sqrt(gamma)],[0,0]])
results = approximate_quantum_error((K0, K1), operator_string="reset")
print(results)
```
The **error operators** that are used to construct the approximating channel can be either given as a list, a dictionary or a string indicating hard-coded channels.
Any channel can be either a list of Kraus operators, or 'QuantumError' objects.
The identity channel does not need to be passed directly; it is always implicitly used.
As an example, we approximate amplitude damping using an explicit Kraus representation for reset noises:
```
reset_to_0 = [np.array([[1,0],[0,0]]), np.array([[0,1],[0,0]])]
reset_to_1 = [np.array([[0,0],[1,0]]), np.array([[0,0],[0,1]])]
reset_kraus = (reset_to_0, reset_to_1)
gamma = 0.23
error = amplitude_damping_error(gamma)
results = approximate_quantum_error(error, operator_list=reset_kraus)
print(results)
```
Note the difference in the output channel: The probabilities are the same, but the input Kraus operators were converted to general Kraus channels, which cannot be used in a Clifford simulator. Hence, it is always better to pass a `QuantumError` object instead of the Kraus matrices, when possible.
```
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
| github_jupyter |
# Lab 9 Quantum Simulation as a Search Algorithm
Prerequisites:
- [Ch.3.8 Grover's Algorithm](https://qiskit.org/textbook/ch-algorithms/grover.html)
- [Ch.2.5 Proving Universality](https://qiskit.org/textbook/ch-gates/proving-universality.html#2.2-Unitary-and-Hermitian-matrices-)
Other relevant materials:
- [Ch 6.2 in QCQI] Michael A. Nielsen and Isaac L. Chuang. Quantum Computation and Quantum Information, p255
```
from qiskit import *
from qiskit.quantum_info import Statevector, partial_trace
from qiskit.visualization import plot_state_qsphere, plot_histogram
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as la
sim = Aer.get_backend('qasm_simulator')
```
<h2 style="font-size:24px;">Part 1: Hamiltonian Simulation</h2>
<br>
<div style="background: #E8E7EB; border-radius: 5px;
-moz-border-radius: 5px;">
<p style="background: #800080;
border-radius: 5px 5px 0px 0px;
padding: 10px 0px 10px 10px;
font-size:18px;
color:white;
"><b>Goal</b></p>
<p style=" padding: 0px 0px 10px 10px;
font-size:16px;"> In this lab, we consider changes to a quantum state veiwed as an evolution process generated by a given Hamiltonian. For a specified Hamiltonian, there is a corresponding unitary operator that determines the final state for any given initial state.
</p>
</div>
For an initial state, $|\psi(0)\rangle$ and a time independent Hamiltonian $H$ , the final state $|\psi(t)\rangle$ is $|\psi(t)\rangle = e^{-iHt}|\psi(0)\rangle$. Therefore, by constructing an appropriate gate for the unitary operator $e^{-iHt}$, we can build a quantum circuit that simulates the evolution of the quantum state $|\psi\rangle$.
<h3 style="font-size: 20px">1. Build a quantum circuit for a given Hamiltonian. </h3>
When the hamiltonian $H$ and the initial state of the system, $|\psi(0)\rangle$, are given by
$H = |0\rangle\langle0| + |+\rangle\langle+|, ~~~~ |\psi(0)\rangle = |+\rangle = \frac{1}{\sqrt 2}(|0\rangle + |1\rangle)$.
Build the circuit with two qubits to evolve the state, $|\psi(0\rangle$, by $H$ for a time $\Delta t = \theta$, where the state of the system is encoded on the 0th qubit and the 1st qubit is an auxiliary. Then, the final state $|\psi(\theta)\rangle$ is $|\psi(\theta)\rangle = e^{-i\theta ~ ( |0\rangle\langle0| ~ + ~ |+\rangle\langle+| )}~|\psi(0)\rangle$.
<h4 style="font-size: 17px">📓Step A. Show that the gate H1 from the following circuit performs the operation $e^{-i\frac{\pi}{9}|0\rangle\langle0|}$ on the 0th qubit when the state of the system is encoded on the 0th qubit and the 1st qubit, auxiliary, is set to the $|0\rangle$ state.</h4>
```
h1 = QuantumCircuit(2, name = 'H1')
h1.cnot(0, 1)
h1.p(np.pi/9, 1)
h1.cnot(0, 1)
H1 = h1.to_gate()
h1.draw()
```
**Your Solution**:
<h4 style="font-size: 17px">📓Step B. Construct the gate H2 by completing the following code for the circuit `h2` to performs the operation $e^{-i\frac{\pi}{9}|+\rangle\langle+|}$ on the 0th qubit when the state of the system is encoded on the 0th qubit and the 1st qubit, auxiliary, is set to the $|0\rangle$ state. </h4>
```
h2 = QuantumCircuit(2, name='H2')
#### Your code goes here ###
#############################
H2 = h2.to_gate()
h2.draw()
```
<h3 style="font-size: 20px">2. Execute the cell below to generate the state of the 0th qubit after every iteration.
The circuit performs $(H1H2)^7|+\rangle = (~ e^{-i\frac{\pi}{9} ~ |0\rangle\langle0|}e^{-i\frac{\pi}{9}~|+\rangle\langle+|} ~)^7~|+\rangle$ on the 0th qubit. The state of the 0th qubit after each `H1H2` operation is stored in the list variable 'myst'.
```
from qiskit.quantum_info import Statevector, partial_trace
def st_out(qc):
out = Statevector.from_instruction(qc)
out_red = partial_trace(out, [1])
prob, st_all = la.eig(out_red.data)
cond = (prob>0.99) & (prob<1.01)
st = st_all[:, cond].ravel()
return(st)
myst = []
circ = QuantumCircuit(2)
circ.h(0)
st = st_out(circ)
myst.append(Statevector(st))
for _ in range(7):
circ.append(H1, range(2))
circ.append(H2, range(2))
st = st_out(circ)
myst.append(Statevector(st))
circ.draw()
```
The following Bloch sphere picture shows the evolution of the 0th qubit state. As it shows, the state starts from the $|+\rangle$ state rotate toward to and passes the $|0\rangle$ state. Therefore, with appropriate the angle of the `H1` and `H2` operations, $|+\rangle$ state evolves to $|0\rangle$ state by applying $H1H2 = e^{-i\theta ~ |0\rangle\langle0|}e^{-i\theta~|+\rangle\langle+|}$ proper number of times.
<img src="images/L9_bloch_sphere.png" alt="Drawing" style="width: 300px; float: left!important;">
If you have installed `kaleidoscope` or run this lab on [IQX](https://quantum-computing.ibm.com), you can excute the cell below to visualize the state evolution through the interactive bloch sphere.
```
from kaleidoscope import bloch_sphere
from matplotlib.colors import LinearSegmentedColormap, rgb2hex
cm = LinearSegmentedColormap.from_list('graypurple', ["#999999", "#AA00FF"])
vectors_color = [rgb2hex(cm(kk)) for kk in np.linspace(-1,1,len(myst))]
bloch_sphere(myst, vectors_color = vectors_color)
```
<h2 style="font-size:24px;">Part 2: Quantum Search as a Quantum Simulation</h2>
<br>
<div style="background: #E8E7EB; border-radius: 5px;
-moz-border-radius: 5px;">
<p style="background: #800080;
border-radius: 5px 5px 0px 0px;
padding: 10px 0px 10px 10px;
font-size:18px;
color:white;
"><b>Goal</b></p>
<p style=" padding: 0px 0px 10px 10px;
font-size:16px;"> In this part of the lab, we solve a search problem through quantum simulation.
</p>
</div>
In Part1, we showed that the Hamiltonian, $H$, transforms the state, $|\psi_i\rangle$, to $|\psi_j\rangle$ when its structure depends on both states as $ H =|\psi_j\rangle\langle\psi_j| + |\psi_i\rangle\langle\psi_i| $ with a proper time duration.
Considering a search problem with a unique solution, we should be able to find the solution with the form of the Hamiltonian, $ H = |x\rangle\langle x| + |\psi\rangle\langle\psi|, $ when all possible items are encoded in a superposition state $|\psi\rangle$ and given as the initial state, same as in Grover's algorithm, while $|x\rangle$ represents the unknown solution.
Applying the unitary operator, $U = e^{-iH\Delta t}$ on the initial state, $|\psi\rangle$, right number of times with the properly chosen $\Delta t$, should evolve the state $|\psi\rangle$ into the solution $|x\rangle$ or close enough to it. The following code constructs the oracle gate for the search problem. Execute the cell below.
```
n = 5
qc = QuantumCircuit(n+1, name='Oracle')
qc.mct(list(range(n)), n)
Oracle = qc.to_gate()
```
The following circuit encodes the phase $\pi$ on the solution state and zero on the other items through phase kickback with the 5th qubit as an auxiliary. Therefore, the output state of the circuit is $(|\psi\rangle - |x\rangle) + e^{i\pi}|x\rangle$, which can be confirmed visually using a qsphere plot where the color indicates the phase of each basis state. Run the following two cells.
```
test = QuantumCircuit(n+1)
test.x(n)
test.h(range(n+1))
test.append(Oracle, range(n+1))
test.h(n)
test.draw()
st = Statevector.from_instruction(test)
st_red = partial_trace(st, [5])
plot_state_qsphere(st_red)
```
<h3 style="font-size: 20px">1. Construct a circuit to approximate the Hamiltonian, $H = |x\rangle\langle x| + |\psi\rangle\langle\psi|$, when all possible items are encoded in a superposition state $|\psi\rangle$ and given as the initial state while $|x\rangle$ represents the unique unknown solution.</h3>
As we did in the Part1, we build the circuit for the simulation with the Hamiltonian, but with more qubits to examine all the items in the question. Regrad the search problem having one solution out of 32 items.
<h4 style="font-size: 17px">📓Step A. Construct the gate H1 performing the operation $e^{-i\Delta t|\psi\rangle\langle\psi|}$ by completing the following code.</h4>
```
def H1(delt, n=5):
h1 = QuantumCircuit(n+1, name='H1')
#### Your code goes here ######
###############################
return h1.to_gate()
```
<h4 style="font-size: 17px">📓Step B. Construct the gate H2 performing the operation $e^{-i\Delta t|x\rangle\langle x|}$ by completing the following code.</h4>
```
def H2(delt, n=5):
h2 = QuantumCircuit(n+1, name='H2')
#### Your code goes here ######
###############################
return h2.to_gate()
```
<h4 style="font-size: 17px">📓Step C. Create the circuit, 'sim_h', to compute $e^{-i \pi H_{app}}|\psi\rangle = (~e^{-i\pi~|x\rangle\langle x|}e^{-i\pi~|\psi\rangle\langle\psi|}~)|\psi\rangle $ which evolves the state $|\psi\rangle$ under the Hamiltonian $H = |x\rangle\langle x| + |\psi\rangle\langle\psi|$ approximately over the time duration $\Delta t = \pi$.</h4>
Th state $|\psi\rangle$ represents the superposition state of all possible items.
Utilize the gates `H1` and `H2`.
```
#### Your code goes here ####
############
sim_h.draw()
```
<h3 style="font-size: 20px">2. Show that the search problem can be solved through quantum simulation with $H_{appr}$ by verifying the two operations, Grover's algorithm and $U = e^{-i\Delta t~H_{appr}}$ with $\Delta t = \pi$, are equivalent. </h3>
<h4 style="font-size: 17px">Step A. The following circuit, `grover`, runs the Grover's algorithm for the problem to find a solution for the oracle that we built above. Run the cell below. </h4>
```
qc = QuantumCircuit(n+1, name='Amp')
qc.h(range(n))
qc.x(range(n))
qc.mct(list(range(n)), n)
qc.x(range(n))
qc.h(range(n))
Amp = qc.to_gate()
grover = QuantumCircuit(n+1)
grover.x(n)
grover.h(range(n+1))
grover.append(Oracle, range(n+1))
grover.append(Amp, range(n+1))
grover.h(n)
grover.x(n)
grover.draw()
```
<h4 style="font-size: 17px">Step B. Upon executing the cells below, the result shows that the circuits, 'grover' and 'sim_h' are identical up to a global phase. </h4>
```
st_simh = Statevector.from_instruction(sim_h)
st_grover = Statevector.from_instruction(grover)
print('grover circuit and sim_h circuit genrate the same output state: ' ,st_simh == st_grover)
plot_state_qsphere(st_simh)
plot_state_qsphere(st_grover)
```
<h4 style="font-size: 17px">📓Step C. Find the number of the Grover interations, R, needed to find the solutions of the Oracle that we built.</h4>
```
#### your code goes here ####
######
print(R)
```
<h4 style="font-size: 17px">Step D. Find the solution to the serach problem, for the Oracle that we built, through Grover's algorithm and the simulation computing $e^{-i R\pi H_{app}}|\psi\rangle = (~e^{-i\pi~|x\rangle\langle x|}e^{-i\pi~|\psi\rangle\langle\psi|}~)^R|\psi\rangle $ where R is the number of iterations.</h4>
```
## The circuit to solve the search problem through Grover's algorithm.
n = 5
qc_grover = QuantumCircuit(n+1, n)
qc_grover.x(n)
qc_grover.h(range(n+1))
for _ in range(int(R)):
qc_grover.append(Oracle, range(n+1))
qc_grover.append(Amp, range(n+1))
qc_grover.h(n)
qc_grover.x(n)
qc_grover.barrier()
qc_grover.measure(range(n), range(n))
qc_grover.draw()
```
📓 Complete the code to build the circuit, `qc_sim`, to solve the search problem through the simulation.
```
qc_sim = QuantumCircuit(n+1, n)
qc_sim.h(range(n))
#### Your code goes here ####
```
Run the following cell to simulate both circuits, `qc_grover` and `qc_sim` and compare their solutions.
```
counts = execute([qc_grover, qc_sim], sim).result().get_counts()
plot_histogram(counts, legend=['Grover', 'Hamiltonian'])
```
<h3 style="font-size: 20px">3. The following result shows an example where the solution can be found with probability exactly equal to one through quantum simulation by the choosing the proper time duration $\Delta t$.</h3>
```
n = 5
qc = QuantumCircuit(n+1, n)
qc.h(range(n))
delt, R = np.pi/2.1, 6
for _ in range(int(R)):
qc.append(H1(delt), range(n+1))
qc.append(H2(delt), range(n+1))
qc.measure(range(n) ,range(n))
qc.draw()
count = execute(qc, sim).result().get_counts()
plot_histogram(count)
```
| github_jupyter |
# Orders of Magnitude
The simulation examples in the previous chapters are conceptual. As we begin to build simulation models of realistic biological processes, the need to obtain information such as the numerical values of the parameters that appear in the dynamic mass balances. We thus go through a process of estimating the approximate numerical values of various quantities and parameters. Size, mass, chemical composition, metabolic complexity, and genetic makeup represent characteristics for which we now have extensive data available. Based on typical values for these quantities we show how one can make useful estimates of concentrations and the dynamic features of the intracellular environment.
## Cellular Composition and Ultra-structure
It is often stated that all biologists have two favorite organisms, _E. coli_ and another one. Fortunately, much data exists for _E. coli,_ and we can go through parameter and variable estimation procedures using it as an example. These estimation procedures can be performed for other target organisms, cell types, and cellular processes in an analogous manner if the appropriate data is available. We organize the discussion around key questions.
### The interior of a cell
The typical bacterial cell, like _E. coli,_ is on the order of microns in size (Figure 7.1a). The _E. coli_ cell is a short cylinder, about 2-4 micron in length with a 0.5 to 1.5 micron diameter, Figure 7.1b. The size of the _E. coli_ cell is growth rate dependent; the faster the cell grows, the larger it is.
It has a complex intra-cellular environment. One can isolate and crystallize macromolecules and obtain their individual structure. However, this approach gives limited information about the configuration and location of a protein in a living functional cell. The intracellular milieu can to be reconstructed from available data to yield an indirect picture of the interior of an cell. Such a reconstruction has been carried out Goodsell93. Based on well known chemical composition data, this image provides us with about a million-fold magnification of the interior of an _E. coli_ cell, see Figure 7.1c. Examination of this picture of the interior of the _E. coli_ cell is instructive:
* The intracellular environment is very crowded and represents a dense solution. Protein density in some sub-cellular structures can approach that found in protein crystals, and the intracellular environment is sometimes referred to as 'soft glass,' suggesting that it is close to a crystalline state.
* The chemical composition of this dense mixture is very complex. The majority of cellular mass are macromolecules with metabolites, the small molecular weight molecules interspersed among the macromolecules.
* In this crowded solution, the motion of the macromolecules is estimated to be one hundred to even one thousand-fold slower than in a dilute solution. The time it takes a 160 kDa protein to move 10 nm - a distance that corresponds approximately to the size of the protein molecule - is estimated to be 0.2 to 2 milliseconds. Moving one cellular diameter of approximately 0.64 $\mu m$, or 640 nm, would then require 1-10 min. The motion of metabolites is expected to be significantly faster due to their smaller size.

**Figure 7.1:** (a) An electron micrograph of the _E. coli_ cell, from Ingraham83. (b) Characteristic features of an _E. coli_ cell. (c) The interior of an _E. coli_ cell, © David S. Goodsell 1999.
### The overall chemical composition of a cell
With these initial observations, let's take a closer look at the chemical composition of the cell. Most cells are about 70% water. It is likely that cellular functions and evolutionary design is constrained by the solvent capacity of water, and the fact that most cells are approximately 70% water suggests that all cells are close to these constraints.
The 'biomass' is about 30% of cell weight. It is sometimes referred to as the dry weight of a cell, and is denoted by gDW. The 30% of the weight that is biomass is comprised of 26% macromolecules, 1% inorganic ions, and 3% low molecular weight metabolites. The basic chemical makeup of prokaryotic cells is shown in Table 7.1, and contrasted to that of a typical animal cell. The gross chemical composition is similar except that animal cells, and eukaryotic cells in general, have a higher lipid content because of the membrane requirement for cellular compartmentalization. Approximate cellular composition is available or relatively easy to get for other cell types.
**Table 7.1:** Approximate composition of cells, from (Alberts, 1983). The numbers given are weight percent.

### The detailed composition of _E. coli_
The total weight of a bacterial cell is about 1 picogram. The density of cells barely exceeds that of water, and cellular density is typically around 1.04 to 1.08 $gm/cm^3$. Since the density of cells is close to unity, a cellular concentration of about $10^12$ cells per milliliter represents packing density of _E. coli_ cells. Detailed and recent data is found later in Figure 7.2.
This information provides the basis for estimating the numerical values for a number of important quantities that relate to dynamic network modeling. Having such order of magnitude information provides a frame of reference, allows one to develop a conceptual model of cells, evaluate the numerical outputs from models, and perform any approximation or simplification that is useful and justified based on the numbers. "Numbers count," even in biology.
### Order of magnitude estimates
It is relatively easy to estimate the approximate order of magnitude of the numerical values of key quantities. Enrico Fermi the famous physicist was well-known for his skills with such calculations and they thus know as Fermi problems. We give a couple examples to illustrate the order of magnitude estimation process.
#### 1. How many piano tuners are in Chicago?
This question represents a classical Fermi problem. First we state assumptions or key numbers:
**1.** There are approximately 5,000,000 people living in Chicago.
**2.** On average, there are two persons in each household in Chicago.
**3.** Roughly one household in twenty has a piano that is tuned regularly.
**4.** Pianos that are tuned regularly are tuned on average about once per year.
**5.** It takes a piano tuner about two hours to tune a piano, including travel time.
**6.** Each piano tuner works eight hours in a day, five days in a week, and 50 weeks in a year.
From these assumptions we can compute:
* that the number of piano tunings in a single year in Chicago is:
(5,000,000 persons in Chicago)/(2 persons/household) $*$ (1 piano/20 households) $*$ (1 piano tuning per piano per year) = 125,000 piano tunings per year in Chicago.
* that the average piano tuner performs (50 weeks/year) $*$ (5 days/week) $*$ (8 hours/day)/(1 piano tuning per 2 hours per piano tuner) = 1000 piano tunings per year per piano tuner.
* then dividing gives (125,000 piano tuning per year in Chicago) / (1000 piano tunings per year per piano tuner) = 125 piano tuners in Chicago, which is the answer that we sought.
#### 2. How far can a retrovirus diffuse before it falls apart?
A similar procedure that relies more on scientific principles can be used to answer this question. The half-live of retroviruses, $t_{0.5}$, are measured to be about 5 to 6 hours. The time constant for diffusion is:
$$\begin{equation} t_{diff} = t^2 / D \tag{7.1} \end{equation}$$
where l is the diffusion distance and $D$ is the diffusion constant. Then the distance $l_{0.5}$ that a virus can travel over a half life is
$$\begin{equation} l_{0.5} = \sqrt{D\ t_{0.5}} \tag{7.2} \end{equation}$$
Using a numerical value for $D$ of $6.5*10^{-8}\ cm^2/sec$ that is computed from the Stokes-Einstein equation for a 100 nm particle (approximately the diameter of the retrovirus) the estimate is about 500 $\mu m$ (Chuck, 1996). A fairly short distance that limits how far a virus can go to infect a target cell.
### A multi-scale view
The cellular composition of cells is complex. More complex yet is the intricate and coordinated web of complex functions that underlie the physiological state of a cell. We can view this as a multi-scale relationship, Figure 7.2. Based on cellular composition and other data we can estimate the overall parameters that are associated with cellular functions. Here we will focus on metabolism, macro-molecular synthesis and overall cellular states.

**Figure 7.2:** A multi-scale view of metabolism, macromolecular synthesis, and cellular functions. Prokaryotic cell (Synechocytis image from W. Vermaas, Arizona State University). Prokaryotic cell structures (purified carboxysomes) image from T. Yates, M. Yeager, and K. Dryden. Macromolecular complexes image © 2000, David S. Goodsell.
## Metabolism
Biomass composition allows the estimation of important overall features of metabolic processes. These quantities are basically concentrations (abundance), rates of change (fluxes), and time constants (response times, sensitivities, etc). For metabolism, we can readily estimate reasonable values for these quantities, and we again organize the discussion around key questions.
### What are typical concentrations?
#### Estimation
The approximate number of different metabolites present in a given cell is on the order of 1000 (Feist, 2007). By assuming that metabolite has a median molecular weight of about 312 gm/mol (Figure 7.3a) and that the fraction of metabolites of the wet weight is 0.01, we can estimate a typical metabolite concentration of:
$$\begin{equation} x_{avg} \approx \frac{1 gm/cm^3 * 0.01}{1000*312\ gm/mol} \approx 32 \mu M \tag{7.3} \end{equation}$$
The volume of a bacterial cell is about one cubic micron, or about one femto-liter $(=(10^-15)$ liter). Since a cubic micron is a logical reference volume, we convert the concentration unit as follows:
$$\begin{align} 1 \mu M &= \frac{10^{-6} mole}{1\ L} * \frac{10^{-15}\ L}{1\ \mu m^3} * \frac{6 * 10^{23}\ molecules}{mol} \tag{7.4} \\ &= 600\ molecules/\ \mu m^3 \tag{7.5} \end{align}$$
This number is remarkably small. A typical metabolite concentration of $32 \mu M$ then translates into mere $19,000$ molecules per cubic micron. One would expect that such low concentrations would lead to slow reaction rates. However, metabolic reaction rates are fairly rapid. As discussed in Chapter 5, cells have evolved highly efficient enzymes to achieve high reaction rates that occur even in the presence of low metabolite concentrations.

**Figure 7.3:** Details of _E. coli_ K12 MG1655 composition and properties. (a) The average molecular weight of metabolites is 500 gm/mol and the median is 312 gm/mol. Molecular weight distribution. (b) Thermodynamic properties of the reactions in the iAF1260 reconstruction the metabolic network. (c) Size distribution of ORF lengths or protein sizes: protein size distribution. The average protein length is 316 amino acids, the median is 281. Average molecular weight of _E. coli's_ proteins (monomers): 34.7 kDa Median: 30.828 kDa (d) distribution of protein concentrations: relative protein abundance distribution. See (Feist, 2007) and (Riley, 2006) for details. Prepared by Vasiliy Portnoy.
#### Measurement
Experimentally determined ranges of metabolite concentrations fall around the estimated range; an example is provided in Table 7.2. Surprisingly, glutamate is at a concentration that falls within the 100 millimolar range in _E. coli_. Other important metabolites such as ATP tend to fall in the millimolar range. Intermediates of pathways are often in the micromolar range. Several on-line resources are now available for metabolic concentration data, Table 7.3.
**Table 7.2:** Measured and predicted parameters for _E. coli_ growing on minimal media. Taken from (Yuan, 2006).

**Table 7.3:** Publicly available metabolic resources (above the dashed line) and proteomic resources (below the dashed line). Assembled by Vasiliy Portnoy.

### What are typical metabolic fluxes?
#### Rates of diffusion
In estimating reaction rates we first need to know if they are diffusion limited. Typical cellular dimensions are on the order of microns, or less. The diffusion constants for metabolites is on the order of $10^{-5}\ cm^2/sec$ and $10^{-6}\ cm^2/sec$. These figures translate into diffusional response times that are on the order of:
$$\begin{equation} t_{diff} = \frac{t^2}{D} = \frac{(10^{-4} cm)^2}{10^{-5}\ \text{to}\ 10^{-6} cm^2 / sec} \approx 1-10 msec \tag{7.6} \end{equation}$$
or faster. The metabolic dynamics of interest are much slower than milliseconds. Although more detail about the cell's finer spatial structure is becoming increasingly available, it is unlikely, from a dynamic modeling standpoint, that spatial concentration gradients will be a key concern for dynamic modeling of metabolic states in bacterial cells (Weisz, 1973).
#### Estimating maximal reaction rates
Reaction rates in cells are limited by the achievable kinetics. Few collections of enzyme kinetic parameters are available in the literature, see Table 7.3. One observation from such collections is that the biomolecular association rate constant, $k_1$, for a substrate $(S)$ to an enzyme $(E)$;
$$\begin{equation} S + E \stackrel{k_1}{\longrightarrow} \tag{7.7} \end{equation}$$
is on the order of $10^8 M^{-1} sec^{-1}$. This numerical value corresponds to the estimated theoretical limit, due to diffusional constraints (Gutfreund, 1972). The corresponding number for macromolecules is about three orders of magnitude lower.
Using the order of magnitude values for concentrations of metabolites given above and for enzymes in the next section, we find the representative association rate of substrate to enzymes to be on the order of
$$\begin{equation} k_1 s e = 10^8 (M*sec)^{-1} * 10^{-4}M * 10^{-6}M = 0.01\ M/sec \tag{7.8} \end{equation}$$
that translates into about
$$\begin{equation} k_1 s e = 10^6\ molecules / \mu m^3 sec \tag{7.9} \end{equation}$$
that is only one million molecules per cubic micron per second. However, the binding of the substrate to the enzyme is typically reversible and a better order of magnitude estimate for _net_ reaction rates is obtained by considering the release rate of the product from the substrate-enzyme complex, $X$. This release step tends to be the slowest step in enzyme catalysis (Albery 1976, Albery 1977, Cleland 1975). Typical values for the release rate constant, $k_2$,
$$\begin{equation} X \stackrel{k_2}{\rightarrow} P + E \tag{7.10} \end{equation}$$
are $100-1000\ sec^{-1}$. If the concentration of the intermediate substrate-enzyme complex, $X$, is on the order of 1 $\mu M$ we get a release rate of about
$$\begin{equation} k_2x = 10^4\ \text{to}\ 10^5 molecules/\mu m^3 sec \tag{7.11} \end{equation}$$
We can compare the estimate in Eq. (7.9) to observed metabolic fluxes, see Table 7.4. Uptake and secretion rates of major metabolites during bacterial growth represent high flux pathways.
**Table 7.4:** Typical metabolic fluxes measured in _E. coli_ K12 MG1655 grown under oxic and anoxic conditions.

#### Measured kinetic constants
There are now several accessible sources of information that contain kinetic data for enzymes and the chemical transformation that they catalyze. For kinetic information, both BRENDA and SABIO-RK (Wittig, 2006) are resources of literature curated constants, including rates and saturation levels, Table 7.2.2. Unlike stoichiometric information which is universal, kinetic parameters are highly condition-dependent. _In vitro_ kinetic assays typically do not represent in vivo conditions. Factors such as cofactor binding, pH, and unknown interactions with metabolites and proteins are likely causes.
#### Thermodynamics
While computational prediction of enzyme kinetic rates is difficult, obtaining thermodynamic values is more feasible. Estimates of metabolite standard transformed Gibbs energy of formation can be derived using an approach called _group contribution method_ (Mavrovouniotis 1991). This method considers a single compound as being made up of smaller structural subgroups. The metabolite standard Gibbs energy of formation associated with structural subgroups commonly found in metabolites are available in the literature and in the NIST database urlnist. To estimate the metabolite standard Gibbs energy of formation of the entire compound, the contributions from each of the subgroups are summed along with an origin term. The group contribution approach has been used to estimate standard transformed Gibbs energy of formation for 84% of the metabolites in the genome scale model of _E. coli_ (Feist 2007, Henry 2006, Henry 2007).
Thermodynamic values can also be obtained by integrating experimentally measured parameters and algorithms which implement sophisticated theory from biophysical chemistry (Alberty 2003, Alberty 2006). Combining this information with the results from group contribution method provides standard transformed Gibbs energy of formation for 96% of the reactions in the genome-scale _E. coli_ model (Feist 2007), see Figure 7.3b.
### What are typical turnover times?
As outlined in Chapter 2, turnover times can be estimated by taking that ratio of the concentration relative to the flux of degradation. Both concentrations and fluxes have been estimated above. Some specific examples of estimated turnover times are now provided.
#### Glucose turnover in rapidly growing E. coli cells
With an intracellular concentration of glucose of 1 to 5 mM, the estimate of the internal glucose turnover time is
$$\begin{equation} \tau_{glu} = \frac{6-30*10^5\ molecules/cell}{4.2 * 10^5\ \text{to}\ 8.4*10^5\ molecules/ \mu m^3 /sec} = 1\ \text{to} \ 8\ sec \tag{7.12} \end{equation}$$
#### Response of red cell glycolytic intermediates
A typical glycolytic flux in the red cell is about 1.25 mM/hr. By using this number and measured concentrations, we can estimate the turnover times for the intermediates of glycolysis by simply using:
$$\begin{equation} t_R = \frac{x_{avg}}{1.25 \ mM/hr} \tag{7.13} \end{equation}$$
The results are shown in Table 7.5. We see the sharp distribution of turnover times that appears. Note that the turnover times are set by the relative concentrations since the flux through the pathway is the same. Thus the least abundant metabolites will have the fastest turnover. At a constant flux, the relative concentrations are set by the kinetic constants.
**Table 7.5:** Turnover times for the glycolytic intermediates in the red blood cell. The glycolytic flux is assumed to be 1.25 mM/hr = 0.35 $\mu M/sec$ and the Rapoport-Luebering shunt flux is about 0.5 mM/hr. Table adapted from (Joshi, 1990).

#### Response of the energy charge
Exchange of high energy bonds between the various carriers is on the order of minutes. The dynamics of this energy pool occur on the middle time scale of minutes as described earlier, see Figure 7.4.

**Figure 7.4:** Responses in energy transduction processes in cells. (a) Effect of addition of glucose on the energy charge of Ehrlich ascites tumor cells. Redrawn based on (Atkinson, 1977). (b) Transient response of the transmembrane gradient, from (Konings, 1983). Generation of a proton motive force in energy starved _S. cremoris_ upon addition of lactose (indicated by arrows) at different times after the start of starvation (t=0).
#### Response of transmembrane charge gradients
Cells store energy by extruding protons across membranes. The consequence is the formation of an osmotic and charge gradient that results in the so-called _proton motive force,_ denoted as $\Delta \mu_{H^+}$. It is defined by:
$$\begin{equation} \Delta \mu_{H^+} = \Delta \Psi - Z\Delta pH \tag{7.14} \end{equation}$$
where $\Delta \Psi$ is the charge gradient and $\Delta pH$ is the hydrogen ion gradient. The parameter $Z$ takes a value of about 60 mV under physiological conditions. The transient response of gradient establishment is very rapid, Figure 7.4.
(Konings, 1983)
#### Conversion between different forms of energy
If energy is to be readily exchanged between transmembrane gradients and the high energy phosphate bond system, their displacement from equilibrium should be about the same. Based on typical ATP, ADP, and $P_i$ concentrations, one can calculate the transmembrane gradient to be about -180mV. Table 7.6 shows that observed values for the transmembrane gradient, $\Delta \widetilde{\mu}$, are on the order of -180 to -220 mV. It is interesting to note that the maximum gradient that a bi-lipid layer can withstand is on the order of -280mV (Konings, 1983), based on electrostatic considerations.
**Table 7.6:** Typical values (mV) for the transmembrane electrochemical potential gradient, reproduced from (Konings, 1983).

#### What are typical power densities?
The metabolic rates estimated above come with energy transmission through key cofactors. The highest energy production and dissipation rates are associated with energy transducing membranes. The ATP molecule is considered to be an energy currency of the cell, allowing one to estimate the power density in the cell/organelle based on the ATP production rate.
#### Power density in mitochondria
The rate of ATP production in mitochondria can be measured. Since we know the energy in each phosphate bond and the volume of the mitochondria, we can estimate the volumetric rate of energy production in the mitochondria.
Reported rates of ATP production in rat mitochondria from succinate are on the order of $6*10^{-19}$ mol ATP/mitochondria/sec Schwerzmann86, taking place in a volume of about 0.27 $\mu m^3$. The energy in the phosphate bond about is -52kJ/mol ATP at physiological conditions. These numbers lead to the computation of a per unit volume energy production rate of $2.2*10^{-18}$ mol ATP/$\mu m^3/sec$, or $10^{-13}W/\mu m^3\ (0.1 pW/\mu m^3)$.
#### Power density in chloroplast of green algae
In _Chlamydomonas reinhardtii_, the rate of ATP production of chloroplast varies between $9.0*10^{-17}\ \text{to}\ 1.4*10^{-16}$ mol ATP/chloroplast/sec depending on the light intensity (Baroli 2003, Burns 1990, Melis 2000, Ross 1995) in the volume of 17.4 $\mu m^3$ (Harris 1989a). Thus, the volumetric energy production rate of chloroplast is on the order of $5*10^{-18}$ mol ATP/$\mu m^3/sec$, or $3*10^{-13}W/\mu m^3\ (0.3 pW/\mu m^3)$.
#### Power density in rapidly growing E. coli cells:
A similar estimate of energy production rates can be performed for microorganisms. The aerobic glucose consumption of _E. coli_ is about 10 mmol/gDW/hr. The weight of a cell is about $2.8*10^{-13}$ gDW/cell. The ATP yield on glucose is about 17.5 ATP/glucose. These numbers allow us to compute the energy generation density from the ATP production rate of $1.4*10^{-17}$ mol ATP/cell/sec. These numbers lead to the computation of the power density of or $7.3*10^{-13}W/\mu m^3 O_2\ (0.7 pW/\mu m^3)$., that is a similar numbers computed for the mitochondria and chloroplast above.
## Macromolecules
We now look at the abundance, concentration and turnover rates of macromolecules in the bacterial cell. We are interested in the genome, RNA and protein molecules.
### What are typical characteristics of a genome?
Sizes of genomes vary significantly amongst different organisms, see Table 7.7. For bacteria, they vary from about 0.5 to 9 million base pairs. The key features of the _E. coli_ K-12 MG1655 genome are summarized in Table 7.8. There are about 4500 ORFs on the genome of an average length of about 1kb. This means that the average protein size is 316 amino acids, see Figure 7.3c.
**Table 7.7:** Genome sizes. A selection of representative genome sizes from the rapidly growing list of organisms whose genomes have been sequenced. Adapted from Kimball's Biology Pages.

**Table 7.8:** Some features of the _E. coli_ genome. From (Blattner 1997).

The number of RNA polymerase binding sites is estimated to be about 2800, leading to a estimate of about 1.6 ORFs $(\approx 4.4*10^6/2400)$ per transcription unit. There are roughly 3000 copies of the RNA polymerase present in an _E. coli_ cell (Wagner 2000). Thus if 1000 of the transcription units are active at any given time, there are only 2-3 RNA polymerase molecules available for each transcription unit. The promoters have different binding strength and thus recruit a different number of RNA polymerase molecules each. ChIP-chip data can be used to estimate this distribution.
### What are typical protein concentrations?
Cells represent a fairly dense solution of protein. One can estimate the concentration ranges for individual enzymes in cells. If we assume that the cell has about 1000 proteins with an average molecular weight of 34.7kD, as is typical for an _E. coli_ cell, see Figure 7.3c, and given the fact that the cellular biomass is about 15% protein, we get:
$$\begin{equation} e_{tot} \approx \frac{1\ gm/cm^3 * 0.15}{1000 * 34700 gm/mole} = 4.32 \mu M \tag{7.15} \end{equation}$$
This estimate is, indeed, the range into which the _in vivo_ concentration of most proteins fall. It corresponds to about 2500 molecules of a particular protein molecule per cubic micron. As with metabolites there is a significant distribution around the estimate of Eq. (7.15). Important proteins such as the enzymes catalyzing major catabolic reactions tend to be present in higher concentrations, and pathways with smaller fluxes have their enzymes in lower concentrations. It should be noted that we are not assuming that all these proteins are in solution; the above number should be viewed more as a molar density.
The distribution about this mean is significant, Figure 7.3d. Many of the proteins in _E. coli_ are in concentrations as low as a few dozen per cell. The _E. coli_ cell is believed to have about 200 major proteins which brings our estimate for the abundant ones to about 12,000 copies per cell. The _E. coli_ cell has a capacity to carry about 2.0-2.3 million protein molecules.
### What are typical fluxes?
The rates of synthesis of the major classes of macromolecules in _E. coli_ are summarized in Table 7.9. The genome can be replicated in 40 min with two replication forks. This means that the speed of DNA polymerase is estimated to be
$$\begin{equation} \text{rate of DNA polymerase} = \frac{4.4*10^6\ \text{bp}}{2*40*60} \approx 900\ \text{bp/sec/fork} \tag{7.16} \end{equation}$$
RNA polymerase is much slower at 40-50 bp/sec and the ribosomes operate at about 12-21 peptide bonds per ribosome per second.
#### Protein synthesis capacity in _E. coli_
We can estimate the number of peptide bonds (pb) produced by _E. coli_ per second. To do so we will need the rate of peptide bond formation by the ribosome (12 to 21 bp/ribosome/sec (Bremer 1996, Dennis 1974, Young 1976)); and number of ribosomes present in the _E. coli_ cell $(7*10^3\ \text{to} \ 7*10^4$ ribosomes/cell depending on the growth rate (Bremer 1996)). So the total number of the peptide bonds that _E. coli_ can make per second is on the order of: $8*10^4\ \text{to}\ 1.5*10^6$ pb/cell/sec
The average size of a protein in _E. coli_ is about 320 amino acids. At about 45 to 60 min doubling time, the total amount of protein produced by _E. coli_ per second is ~300 to 900 protein molecules/cell/sec. This is equivalent to $1\ \text{to}\ 3*10^6$ molecules/cell/h as a function of growth rate, about the total number of protein per cell given above.
#### Maximum protein production rate from a single gene in murine cells
The total amount of the protein formed from a single gene in the mammalian cell can be estimated based on the total amount of mRNA present in the cytoplasm from a single gene, the rate of translation of the mRNA molecule by ribosomes, and the ribosomal spacing (Savinell 1989). Additional factors needed include gene dosage (here taken as 1), rate of mRNA degradation, velocity of the RNA polymerase II molecule, and the growth rate (Savinell 1989).
Murine hybridoma cell lines are commonly used for antibody production. For this cell type, the total amount of mRNA from a single antibody encoding gene in the cytoplasm is on the order of 40,000 mRNA molecules/cell (Gilmore 1979, Schibler 1978) the ribosomal synthesis rate is on the order of 20 nucleotides/sec [Potter72], and the ribosomal spacing on the mRNA is between 90-100 nucleotides (Christensen 1987, Potter 1972). Multiplying these numbers, we can estimate the protein production rate in a hybridoma cell line to be approximately 3000 - 6000 protein molecules/cell/sec.
### What are the typical turnover times?
The assembly of new macromolecules such as RNA and proteins requires the source of nucleotides and amino acids. These building blocks are generated by the degradation of existing RNA molecules and proteins. Many cellular components are constantly degraded and synthesized. This process is commonly characterized by the turnover rates and half-lives. Intracellular protein turnover is experimentally assessed by an addition of an isotope-labeled amino acid mixture to the normally growing or non-growing cells (Levine 1965, Pratt 2002). It has been shown that the rate of breakdown of an individual proteins is on the order of 2-20% per hour in _E. coli_ culture (Levine 1965, Neidhardt 1996).
## Cell Growth and Phenotypic Functions
### What are typical cell-specific production rates?
The estimated rates of metabolism and macromolecular synthesis can be used to compute various cellular functions and their limitations. Such computations can in turn be used for bioengineering design purposes, environmental roles and impacts of microorganisms, and for other purposes. We provide a couple of simple examples.
#### Limits on volumetric productivity
_E. coli_ is one of the most commonly used host organisms for metabolic engineering and overproduction of metabolites. In many cases, the glycolytic flux acts as a carbon entry point to the pathway for metabolite overproduction (Eiteman 2008, Jantama 2008, Yomano 2008, Zhu 2008). Thus, the substrate uptake rate (SUR) is one of the critical characteristics of the productive capabilities of the engineered cell.
Let us examine the wild-type _E. coli_ grown on glucose under anoxic conditions. As shown in Table 7.4, the (SUR) is on the order of 15-20 mmol glucose/gDW/h which translates into 1.5 gm glucose/L/h at cell densities of (VP has number). Theoretically, if all the carbon source (glucose) is converted to the desired metabolite the volumetric productivity will be approximately 3 g/L/h.
The amount of cells present in the culture, play a significant role in production potential. In the industrial settings, the cell density is usually higher which increases the volumetric productivity. Some metabolic engineered strain designs demonstrate higher SUR (Portnoy 2008) that also leads to the increase in volumetric productivity.
#### Photoautotrophic growth
_Chlorella vulgaris_ is a single-celled green algae that uses light to generate energy necessary for growth. At the top rate of photosynthesis, the specific oxygen production rate (SOPR) can be estimated to be between 20-400 fmol $O_2$/cell/h (Lee 1994a). Algae biotechnology is drawing increasing interest due to its potential for production of biofuels and fine chemicals (Lee 1994a). However, a lack of suitable photobioreactors (PBR) makes the cost of algally-derived compounds high. One of the key limiting factors for PBR is the light source; however, light-emitting diodes (LED) can be employed for these purposes.
Let us now use order of magnitude calculations to estimate the light requirement for an algae photobioreactor using _C. vulgaris_ as a model organism. Given the fact that maximum photosynthetic efficiency of _C. vulgaris_ is below 50% (Kok 1960, Myers 1980, Pirt 1980) and one mole of photons (680 nm) is equivalent to 50W, we can estimate that in order to sustain the SOPR of 100 fmol $\text{O}_2$/cell/h each cell must receive 40pW equivalent of photons (Lee1994a). A conventional LED can provide $0.3\ \text{mW/cm}^2$ or $0.1\ \text{mW}$ per LED. With a cell density close to $10^9\ \text{cells/ml}$ and $80\ \text{cm}^3$ volume of the reactor (Lee 1994a), the photobioreactor must include close to a 100 LED to sustain the growth of algae and oxygen production.
### Balancing the fluxes and composition in an entire cell
The approximate calculation procedures presented can be used to estimate the overall flows of mass and energy in a bacterial cell. Proteins are 55% of the dry weight of cells and their most energetically costly component, so let's begin such computation with the assumption that there are about $10^9$ amino acids found in the proteins of a single cell. With this starting point and the various data given in this chapter, we can roughly estimate all the major flows using a 60 min doubling time:
* With approximately 316 amino acids found in a protein, we have to make about 3 million protein molecules.
* If we take the ribosome to make 20 pb/sec = 72,000 pb/hr, (pb=peptide bond) the we require
$$\begin{equation} \frac{1,000,000,000}{72,000} = 14,000\ \text{ribosomes} \tag{7.17} \end{equation}$$
to carry out this protein synthesis.
* To make 14,000 ribosomes with each having 4,500 nt length RNA molecules (nt = nucleotide), we need
$$\begin{equation} 14,000*4,500 = 63,000,000\ \text{nt}\ \tag{7.18} \end{equation}$$
assembled. In addition, there are 10 tRNAs of 80 nt in length per ribosome leading to an additional nucleotide requirement of
$$\begin{equation} 10*80*14,000 = 11,200,000\ \text{nt}\ \tag{7.19} \end{equation}$$
for a grand total of approximately 75,000,000 for stable RNA molecule synthesis.
* The total nucleotide synthesis for RNA will be 3000 RNA polymerase molecules synthesizing at the rate of 50 nt/sec or
$$\begin{equation} 3000*50*3600 = 540,000,000\ \text{nt/hour}\ \tag{7.20} \end{equation}$$
* The fraction of RNA that is mRNA is 0.03 to 0.05 Rosenow01 or,
$$\begin{equation} 540,000,000*(0.03\ \text{to}\ 0.05) \approx (16\ \text{to}\ 25.0)*10^6\ \text{nt/cell/hr}\ \tag{7.21} \end{equation}$$
If the average mRNA length is 1100 nt then the cell needs to make on average 20,000 transcripts in one hour.
* We have to make 3,000,000 proteins from 20,000 transcripts, or about 150 protein molecules per transcript.
* The transcripts have a finite half live. On average, each transcript has a 5 min lifetime, or 300 sec. Due to structural constraints a ribosome can only bind every 50 nt to the mRNA, producing a maximum ribosomal loading of about 20 ribosomes per transcript. The rate of translation is 20 pb/sec. With the average length of the peptide being 316 amino acids, we can produce 1.25 protein/sec. This calculation estimated the maximum protein production from one transcript on the order of 375 protein molecules per transcript.
* To synthesize the genome, we need $2*4,500,000 = 9,000,000\ \text{nt}$ to make the double stranded DNA.
* Thus, the total metabolic requirement of amino acids and nucleotides in _E. coli_ per doubling is $1*10^9$ amino acids/cell/h and $5*10^8$ nt/cell/h.
These are the approximate overall material requirements. We also need energy to drive the process. Using Table 7.4 we can estimate energy requirements for _E. coli_ under oxic and anoxic conditions.
* Aerobically, at a doubling time of 1 hour, the glucose uptake rate is about 10 mmol/gDW/h, that is equivalent to $1.5*10^9$ molecules of glucose per cell per doubling. At 17.5 ATP produced per glucose he corresponding energy production is: $3*10^{10}$ ATP per cell per doubling.
* Anaerobically, at a doubling time of 1.5 hours, the glucose uptake rate is about 18 mmol/gDW/h, which is equivalent to $4.5*10^9$ molecules of glucose/ cell/doubling. At 3 ATP per glucose the corresponding energy production is $1.4*10^{10}$ molecules ATP/cell/doubling.
## Summary
* Data on cellular composition and over all rates are available.
* Order of magnitude estimation procedures exist through which one can obtain the approximate values for key quantities.
* In this fashion, typical concentrations, fluxes and turnover times can be estimated.
* An approximate quantitative overall multi-scale framework can be obtained for the function of complex biological processes.
$\tiny{\text{© B. Ø. Palsson 2011;}\ \text{This publication is in copyright.}\\ \text{Subject to statutory exception and to the provisions of relevant collective licensing agreements,}\\ \text{no reproduction of any part may take place without the written permission of Cambridge University Press.}}$
| github_jupyter |
## Programming Exercise 3: Multi-class Classification and Neural Networks
#### Author - Rishabh Jain
```
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
%matplotlib inline
from scipy.io import loadmat
```
### 1 Multi-class Classification
##### Problem Statement
For this exercise, we will use **logistic regression** and **neural networks** to recognize handwritten digits (from 0 to 9). In this part of the exercise we will extend our previous implementation of logistic regression and apply it to one-vs-all classification.
#### 1.1 Dataset
We are given a dataset in ex3data.mat that contains 5000 training examples of handwritten digits, where each training example is a 20 X 20 pixel grayscale image of the digit. Each pixel is repsented by a floating point number indicating the grayscale intensity at that location.
This 20 by 20 pixel grid is "unrolled" into 400-dimensional vector. Each of these training examples become a single row in our dataset. This gives us a 5000 X 400 design matrix X where every row is a training example for a handwritten digit image. The second part of the training set is a 5000-dimnesional vectory y that contains labels for the training set. Labels range from 1 to 10 where 10 reprsents digit '0'.
```
# Loading Mat file
mat=loadmat('./ex3data1.mat')
print(*mat.keys(),sep='\n')
# Loading data from mat to Dataframe
m,n=mat['X'].shape
data=pd.DataFrame()
for i in range(n):
data[f'x{i+1}']=mat['X'].T[i]
# Converting label 10 to label 0
data['y']=mat['y']
data.loc[data['y']==10,'y']=0
print('TRAINING DATASET SHAPE : {0} X {1}'.format(*data.shape))
data.sample(5)
```
#### 1.2 Visualizing the data
We will begin by visualizing a subset of the training set. We will randomly select 100 rows from X. This function maps each row to a 20 X 20 pixel image and displays together.
```
def displayData(X):
m,n=X.shape
width=int(np.sqrt(n))
height=int(n/width)
rows=int(np.floor(np.sqrt(m)))
cols=int(np.ceil(m/rows))
totalWidth=cols+cols*width
displayArray=np.zeros((1,totalWidth))
rowPadding=np.ones((1,totalWidth))
colPadding=np.ones((height,1))
index=0
for i in range(rows):
row=colPadding*0
for j in range(cols):
if index<m:
x=X[index].reshape((width,height)).T
index=index+1
else:
x=np.zeros((width,height)).T
row=np.column_stack((row,x))
if j<cols-1:
row=np.column_stack((row,colPadding))
displayArray=np.row_stack((displayArray,row))
if i<rows-1:
displayArray=np.row_stack((displayArray,rowPadding))
displayArray=np.row_stack((displayArray,rowPadding*0))
plt.imshow(displayArray,cmap='gray')
plt.axis('off')
displayData(data.sample(100).iloc[:,:-1].values)
```
#### 1.3 Vectorizing Logistic Regression
We will be using mutiple one-vs-all logistic regression models to build multi-class classifier. Since there are 10 classes, we wil need to train 10 separate logistic regression. To make this training efficient, it is important to ensure that our code is well vectorized. In this section, we will implement a vectorized version of logistic regression that does not employ any for loops.
**Formulae :**
$$ h_\theta(x)=g(\theta^Tx) $$
$$ g(z)=\frac{1}{1+e^{-z}} $$
$$ J(\theta)= \frac{-1}{m}\sum_{i=0}^m[y^{(i)}log(h_\theta(x^{(i)}))+(1-y^{(i)})log(1-h_\theta(x^{(i)})]+\frac{\lambda}{2m}\sum_{j=1}^n\theta_j^2$$
$$ \theta_j:=\theta_j-\frac{\alpha}{m}(\sum_{i=0}^m(h_\theta(x^{(i)})-y^{(i)})x_j^{(i)}+\lambda\sum_{j=1}^n\theta_j) $$
**From the previous notebook [Exercise-2](https://nbviewer.jupyter.org/github/rj425/ML-Coursera/blob/master/Exercise-2/ex2.ipynb), all these functions are already vectorized.**
```
def sigmoid(z):
sigma=1/(1+np.exp(-z))
return sigma
def predict(X,theta):
'''Predicts by applying logistic function on linear model'''
z=np.dot(X,theta.T)
h=sigmoid(z)
return h
def computeCost(h,y):
'''Computes the cost using Cross Entropy function'''
m=y.shape[0]
J=(-1/m)*np.sum(np.multiply(y,np.log(h))+np.multiply(1-y,np.log(1-h)))
return J
def regularizedGradientDescent(X,y,theta,alpha,lmbda,iterations):
'''Minimizes the cost function using Gradient Descent Optimization Algorithm'''
m=X.shape[0]
jHistory=[]
for i in range(iterations):
h=predict(X,theta)
# Computing cost
J=computeCost(h,y)
# Adding the regularized term
J=J+(lmbda/(2*m))*np.sum(np.power(theta[:,1:],2))
jHistory.append(J)
# Parameters update rule
gradient=(alpha/m)*(np.dot((h-y).T,X))
# Adding the regularized term
gradient=gradient+(alpha/m)*lmbda*np.column_stack((np.zeros((1,1)),theta[:,1:]))
theta=theta-gradient
return (theta,jHistory)
```
#### 1.4 One-vs-all Classification
In this part of the exercise, **we will implement One-vs-all classification by training multiple logistic regression classifiers, one for each of K classes in our dataset.**
```
def oneVsAll(X,Y,alpha,lmbda,iterations):
'''Returns all the classifier parameters in a matrix with shape of classes X features'''
m,n=X.shape
# Initializing theta
intialTheta=np.zeros(shape=(1,n))
labels=np.unique(Y)
thetas=np.zeros(shape=(len(labels),n))
i=0
print('Training classifiers...\n')
for label in labels:
y=np.zeros(shape=(m,1))
y[Y==label]=1
theta,jHistory=regularizedGradientDescent(X,y,intialTheta,alpha,lmbda,iterations)
thetas[i,:]=theta
print(f'For classifier{label} , J = {jHistory[iterations-1]:.3f}')
i+=1
return thetas
m=data.shape[0]
X=data.values[:,:-1]
# Adding intercept term to the design matrix
intercept=np.ones(shape=(m,1))
X=np.column_stack((intercept,X))
y=data['y'].values.reshape((m,1))
alpha=1
lmbda=0.3
iterations=3000
thetas=oneVsAll(X,y,alpha,lmbda,iterations)
```
#### 1.4.1 One-vs-all Prediction
After training One-vs-all classifier, we can use it to predict the digit contained in a given image. For each input, we should compute the 'probability' that it belongs to each class using the trained logistic regression classifiers.
```
def predictOneVsAll(thetas,X):
'''Predic the label for a trained One-vs-all classifier. The labels are in the range 0 to 9'''
h=predict(X,thetas)
labels=h.argmax(axis=1)
return labels.reshape((X.shape[0],1))
def calculateAccuracy(h,y):
'''Calculates the accuray between the target and prediction'''
m=y.shape[0]
unique,counts=np.unique(h==y,return_counts=True)
dic=dict(zip(unique,counts))
accuracy=(dic[True]/m)*100
return accuracy
h=predictOneVsAll(thetas,X)
accuracy=calculateAccuracy(h,y)
print(f'TRAINING ACCURACY : {accuracy:.2f}')
```
### 2 Neural Networks
In previous part of the exercise, we implemented multi-class logistic regression to recognize the handwritten digits. **However, logistic regression cannot form more complex hypotheses, as it is only a Linear Classifier.**
>**One may ask why is logistic regression a linear model if it can fit curves to our data??**
That's because the linearity of the model is concerned by the linearity of the parameters $\theta$. Here the target variable is a function of parameter ($\theta$) multiplied by the independent variable ($x_i$). And we can fit the curves to our data using the non linear transformation of independent variables or features but the parameters are still linear.
**Non Linear Hypotheses**
If the number of features are large which will ususally be the case, we will move towards the non linear hypotheses for a better fit for the data. So if $n=100$ :
- For quadratic hypotheses, $n\approx5000$ features
- For cubic hypotheses, $n\approx170000$ features
Solving such problems with logistic regression can cause two problems:
- Computationally expensive
- Time consuming
Not a good way to learn complex Non Linear hypthoses when feature space is large. Hence, **Neural Networks**.
#### 2.1 Model Representation
<table>
<tr>
<td>
<img src="images/neuron.png" width="300">
</td>
<td>
<img src="images/nn.png" width="300">
</td>
</tr>
</table>
$a_i^{(j)}=$ "activation" of unit $i$ in layer $j$
$\theta^{(j)}=$ matrix of weights controlling function mapping from layer $j$ to layer $j+1$
$g(x)=$ sigmoid activation function
**If network has $s_j$ units in layer $j$, $s_{j+1}$ units in layer $j+1$, then $\theta^{(j)}$ will be of dimension $s_{j+1}$ X $(s_j+1)$**
$$ a^{(2)}_1=g(\theta_{10}^{(1)}x_0+\theta_{11}^{(1)}x_1+\theta_{12}^{(1)}x_2+\theta_{13}^{(1)}x_3) $$
$$ a^{(2)}_2=g(\theta_{20}^{(1)}x_0+\theta_{21}^{(1)}x_1+\theta_{22}^{(1)}x_2+\theta_{23}^{(1)}x_3) $$
$$ a^{(2)}_3=g(\theta_{30}^{(1)}x_0+\theta_{31}^{(1)}x_1+\theta_{32}^{(1)}x_2+\theta_{33}^{(1)}x_3) $$
In this exercise, we will be using the parameters from a neural network that is already trained. Our goal is to implement the **Feed-Forward Progration** algorithm for prediction.
```
# Loading weights
weights=loadmat('./ex3weights.mat')
print(*weights.keys(),sep='\n')
```
The neural network we are about to use has 3 layers - an input layer ($L_1$), a hidden layer ($L_2$) and an output layer ($L_3$).
* L1 layer has 400 Neuron units (20 X 20 pixel image) excluding the extra bias unit that always output +1
* L2 layer has 25 Neuron units
* L3 layer has 10 Neuron Units
```
theta1=weights['Theta1']
theta2=weights['Theta2']
print(f'X : {X.shape}')
print(f'Theta1 : {theta1.shape}')
print(f'Theta2 : {theta2.shape}')
```
#### 2.2 Feedforward Propogation and Prediction
**Vectorized Implementation** for three layered Neural Network:
Step 0 : $a^{(1)}=x$
Step 1 : $z^{(2)}=\theta^{(1)}a^{(1)}$
Step 2 : $a^{(2)}=g(z^{(2)})$
Step 3 : Add $a^{(2)}_0=1$
Step 4 : $z^{(3)}=\theta^{(2)}a^{(2)}$
Step 5 : $a^{(3)}=g(z^{(3)})$
Step 6 : $h_\theta(x)=a^{(3)}$
```
def predictNN(theta1,theta2,X):
'''Predict the label of an input given a trained neural network'''
m,n=X.shape
# Feed Forward Propogation
a1=X
z2=np.dot(a1,theta1.T)
a2=sigmoid(z2)
a2=np.column_stack((np.ones(shape=(a2.shape[0],1)),a2))
z3=np.dot(a2,theta2.T)
a3=sigmoid(z3)
h=a3
labels=h.argmax(axis=1)
return labels.reshape((m,1))
# Using label 10 for digit 0
y=mat['y']
h=predictNN(theta1,theta2,X)
# Adding one to h because matlab indexing starts from 1 whereas python indexing starts from 0
h+=1
accuracy=calculateAccuracy(h,y)
print(f'TRAINING ACCURACY : {accuracy:.2f}')
```
<table>
<tr>
<td>
<img src="images/meme1.jpg" width="500">
</td>
<td>
<img src="images/meme2.jpg" width="300">
</td>
</tr>
</table>
| github_jupyter |
```
import pandas as pd
import pyspark.sql.functions as F
from pyspark.sql.types import *
pd.set_option("display.max_rows", 101)
pd.set_option("display.max_columns", 101)
```
<hr />
### reading preprocessed dataframes
```
srag_2021 = spark.read.parquet('gs://ai-covid19-datalake/standard/srag/pp_interm_srag_2021_v3_new_attr-UPDATE-28-11-2021')
gmr = spark.read.csv('gs://ai-covid19-datalake/standard/google-mobility/pp_google-mobility_report-28-11-2021.csv', header=True, inferSchema=True)
ibge = spark.read.csv('gs://ai-covid19-datalake/standard/ibge-data/pp_ibge-municipality-to-code-28-11-2021.csv', header=True, inferSchema=True)
inmet = spark.read.csv('gs://ai-covid19-datalake/standard/inmet-data/pp_inmet_meteorological_data-25052021-a-28112021', header=True, inferSchema=True)
epi_weeks = spark.read.csv('gs://ai-covid19-datalake/raw/epidemiological-data/epi_weeks.csv', header=True, inferSchema=True)
```
<hr />
# Preparing srag to receive the new information from UF's and Epidemiological Weeks
```
# # dropping duplicate columns
# def dropDupeDfCols(df):
# newcols = []
# dupcols = []
# for i in range(len(df.columns)):
# if df.columns[i] not in newcols:
# newcols.append(df.columns[i])
# else:
# dupcols.append(i)
# df = df.toDF(*[str(i) for i in range(len(df.columns))])
# for dupcol in dupcols:
# df = df.drop(str(dupcol))
# return df.toDF(*newcols)
# srag_2019 = dropDupeDfCols(srag_2019)
# srag_2020 = dropDupeDfCols(srag_2020)
# srag_2021 = dropDupeDfCols(srag_2021)
srag_2021 = srag_2021.withColumn('ANO', F.lit('2021'))
def get_epi_week_year(epi_week, year):
return str(epi_week) + '-' + str(year)
udf_get_epi_week_year = F.udf(get_epi_week_year, StringType())
# adding 'epi_week_year' tag
srag_2021 = srag_2021.withColumn('epi_week_year', udf_get_epi_week_year(F.col('SEM_PRI'), F.col('ANO')))
# padding
srag_2021 = srag_2021.withColumn('epi_week_year', F.lpad(F.col('epi_week_year'), 7, '0'))
def get_uf(cod_mun_res):
return str(cod_mun_res)[:2]
udf_get_uf = F.udf(get_uf, StringType())
srag_2021 = srag_2021.withColumn('UF', udf_get_uf(F.col('CO_MUN_NOT')))
```
# Google Mobility Report with state codes by epi_weeks
### adding epidemiological weeks
```
# converting date type variables
gmr = gmr.withColumn('date', F.to_date(F.col('date'), 'dd/MM/yyyy'))
epi_weeks = epi_weeks.withColumn('date', F.to_date(F.col('date'), 'dd/MM/yyyy'))
# find the date from 7 or 14 days before
epi_weeks = epi_weeks.withColumn('date_7_days_early', F.date_sub(F.col('date'), 7))
epi_weeks = epi_weeks.withColumn('date_14_days_early', F.date_sub(F.col('date'), 14))
# finding the epi_week number from 1 or 2 weeks before
def find_one_or_two_epi_weeks(n_weeks, col):
return col - n_weeks
udf_find_one_or_two_epi_weeks = F.udf(find_one_or_two_epi_weeks, IntegerType())
epi_weeks = epi_weeks.withColumn('epi_week_7_days_early', udf_find_one_or_two_epi_weeks(F.lit(1), F.col('epi_week')))
epi_weeks = epi_weeks.withColumn('epi_week_14_days_early', udf_find_one_or_two_epi_weeks(F.lit(2), F.col('epi_week')))
# creating the epi_week tag for the new found early epi_weeks
def set_epi_week_year(col1, col2):
string = str(col1) + '-' + str(col2)
return string
udf_set_epi_week_year = F.udf(set_epi_week_year, StringType())
epi_weeks = epi_weeks.withColumn('epi_week_year_7_days_early', udf_set_epi_week_year(F.col('epi_week_7_days_early'), F.col('epi_year')))
epi_weeks = epi_weeks.withColumn('epi_week_year_14_early', udf_set_epi_week_year(F.col('epi_week_14_days_early'), F.col('epi_year')))
# make sure to erase inexistent epi_week
epi_weeks = epi_weeks.withColumn('epi_week_year_7_days_early', F.when((F.col('epi_week_7_days_early') < 1), None).otherwise(F.col('epi_week_year_7_days_early')))
epi_weeks = epi_weeks.withColumn('epi_week_7_days_early', F.when((F.col('epi_week_7_days_early') < 1), None).otherwise(F.col('epi_week_7_days_early')))
epi_weeks = epi_weeks.withColumn('epi_week_year_14_early', F.when((F.col('epi_week_14_days_early') < 1), None).otherwise(F.col('epi_week_year_14_early')))
epi_weeks = epi_weeks.withColumn('epi_week_14_days_early', F.when((F.col('epi_week_14_days_early') < 1), None).otherwise(F.col('epi_week_14_days_early')))
# joining using the actual date
epi_weeks_actual = epi_weeks.select(['date', 'epi_week', 'epi_year', 'epi_week_year'])
gmr = gmr.join(epi_weeks_actual, 'date', 'left')
# joining using date from 7 days before
epi_weeks_7 = epi_weeks.select(['date_7_days_early', 'epi_week_7_days_early', 'epi_week_year_7_days_early'])
gmr = gmr.join(epi_weeks_7, F.date_add(gmr.date, 7) == epi_weeks.date_7_days_early, 'left')
# joining using date from 14 days before
epi_weeks_14 = epi_weeks.select(['date_14_days_early', 'epi_week_14_days_early', 'epi_week_year_14_early'])
gmr = gmr.join(epi_weeks_14, F.date_add(gmr.date, 14) == epi_weeks.date_14_days_early, 'left')
epi_weeks.limit(10).toPandas()
gmr.limit(5).toPandas()
```
### adding state code
```
gmr = gmr.join(ibge.select('UF', 'sub_region_1'), 'sub_region_1', 'left')
```
### aggregating
```
gmr1 = gmr.groupBy(['UF', 'epi_week_year']).agg({"residential_percent_change_from_baseline":"avg",
"workplaces_percent_change_from_baseline":"avg",
"transit_stations_percent_change_from_baseline":"avg",
"parks_percent_change_from_baseline":"avg",
"grocery_and_pharmacy_percent_change_from_baseline":"avg",
"retail_and_recreation_percent_change_from_baseline":"avg"})\
.withColumnRenamed('avg(transit_stations_percent_change_from_baseline)', 'gmr_transit_stations_avg')\
.withColumnRenamed('avg(grocery_and_pharmacy_percent_change_from_baseline)', 'gmr_grocery_and_pharmacy_avg')\
.withColumnRenamed('avg(retail_and_recreation_percent_change_from_baseline)', 'gmr_retail_and_recreation_avg')\
.withColumnRenamed('avg(retail_and_recreation_percent_change_from_baseline)', 'gmr_retail_and_recreation_avg')\
.withColumnRenamed('avg(workplaces_percent_change_from_baseline)', 'gmr_workplaces_percent_avg')\
.withColumnRenamed('avg(residential_percent_change_from_baseline)', 'gmr_residential_percent_avg')\
.withColumnRenamed('avg(parks_percent_change_from_baseline)', 'gmr_parks_percent_avg')\
.orderBy('UF')
gmr7 = gmr.groupBy(['UF', 'epi_week_year_7_days_early']).agg({"residential_percent_change_from_baseline":"avg",
"workplaces_percent_change_from_baseline":"avg",
"transit_stations_percent_change_from_baseline":"avg",
"parks_percent_change_from_baseline":"avg",
"grocery_and_pharmacy_percent_change_from_baseline":"avg",
"retail_and_recreation_percent_change_from_baseline":"avg"})\
.withColumnRenamed('avg(transit_stations_percent_change_from_baseline)', 'gmr_transit_stations_1week_before_avg')\
.withColumnRenamed('avg(grocery_and_pharmacy_percent_change_from_baseline)', 'gmr_grocery_and_pharmacy_1week_before_avg')\
.withColumnRenamed('avg(retail_and_recreation_percent_change_from_baseline)', 'gmr_retail_and_recreation_1week_before_avg')\
.withColumnRenamed('avg(retail_and_recreation_percent_change_from_baseline)', 'gmr_retail_and_recreation_1week_before_avg')\
.withColumnRenamed('avg(workplaces_percent_change_from_baseline)', 'gmr_workplaces_percent_1week_before_avg')\
.withColumnRenamed('avg(residential_percent_change_from_baseline)', 'gmr_residential_percent_1week_before_avg')\
.withColumnRenamed('avg(parks_percent_change_from_baseline)', 'gmr_parks_percent_1week_before_avg')\
.orderBy('UF')
gmr14 = gmr.groupBy(['UF', 'epi_week_year_14_early']).agg({"residential_percent_change_from_baseline":"avg",
"workplaces_percent_change_from_baseline":"avg",
"transit_stations_percent_change_from_baseline":"avg",
"parks_percent_change_from_baseline":"avg",
"grocery_and_pharmacy_percent_change_from_baseline":"avg",
"retail_and_recreation_percent_change_from_baseline":"avg"})\
.withColumnRenamed('avg(transit_stations_percent_change_from_baseline)', 'gmr_transit_stations_2weeks_avg')\
.withColumnRenamed('avg(grocery_and_pharmacy_percent_change_from_baseline)', 'gmr_grocery_and_pharmacy_2weeks_avg')\
.withColumnRenamed('avg(retail_and_recreation_percent_change_from_baseline)', 'gmr_retail_and_recreation_2weeks_avg')\
.withColumnRenamed('avg(retail_and_recreation_percent_change_from_baseline)', 'gmr_retail_and_recreation_2weeks_avg')\
.withColumnRenamed('avg(workplaces_percent_change_from_baseline)', 'gmr_workplaces_percent_2weeks_avg')\
.withColumnRenamed('avg(residential_percent_change_from_baseline)', 'gmr_residential_percent_2weeks_avg')\
.withColumnRenamed('avg(parks_percent_change_from_baseline)', 'gmr_parks_percent_2weeks_avg')\
.orderBy('UF')
```
##### making some validations
```
gmr1.filter((F.col('UF')=='11') & ((F.col('epi_week_year') == '22-2020') | (F.col('epi_week_year') == '23-2020') | (F.col('epi_week_year') == '24-2020') | (F.col('epi_week_year') == '25-2020')))\
.orderBy('epi_week_year').limit(5).toPandas()
gmr7.filter((F.col('UF')=='11') & ((F.col('epi_week_year_7_days_early') == '22-2020') | (F.col('epi_week_year_7_days_early') == '23-2020') | (F.col('epi_week_year_7_days_early') == '24-2020') | (F.col('epi_week_year_7_days_early') == '25-2020')))\
.orderBy('epi_week_year_7_days_early').limit(5).toPandas()
gmr14.filter((F.col('UF')=='11') & ((F.col('epi_week_year_14_early') == '22-2020') | (F.col('epi_week_year_14_early') == '23-2020') | (F.col('epi_week_year_14_early') == '24-2020') | (F.col('epi_week_year_14_early') == '25-2020')))\
.orderBy('epi_week_year_14_early').limit(5).toPandas()
# joining all for 1 week early
gmr7 = gmr7.withColumnRenamed('UF', 'UF7')
cond = [gmr1.UF == gmr7.UF7, gmr1.epi_week_year == gmr7.epi_week_year_7_days_early]
gmr_agg = gmr1.join(gmr7, cond, 'left')
gmr_agg.filter((F.col('UF')=='11') & ((F.col('epi_week_year') == '22-2020') | (F.col('epi_week_year') == '23-2020') | (F.col('epi_week_year') == '24-2020') | (F.col('epi_week_year') == '25-2020')))\
.orderBy('epi_week_year').limit(5).toPandas()
# joining all for 2 weeks early
gmr14 = gmr14.withColumnRenamed('UF', 'UF14')
cond = [gmr1.UF == gmr14.UF14, gmr1.epi_week_year == gmr14.epi_week_year_14_early]
gmr_agg = gmr_agg.join(gmr14, cond, 'left')
gmr_agg.filter((F.col('UF')=='11') & ((F.col('epi_week_year') == '22-2020') | (F.col('epi_week_year') == '23-2020') | (F.col('epi_week_year') == '24-2020') | (F.col('epi_week_year') == '25-2020')))\
.orderBy('epi_week_year').limit(5).toPandas()
# casting to string
gmr_agg = gmr_agg.withColumn('UF', F.col('UF').cast('string'))
# selecting variables of interest
gmr_agg = gmr_agg.select(['UF', 'epi_week_year',
'gmr_transit_stations_avg', 'gmr_grocery_and_pharmacy_avg', 'gmr_retail_and_recreation_avg', 'gmr_workplaces_percent_avg', 'gmr_residential_percent_avg', 'gmr_parks_percent_avg',
'gmr_transit_stations_1week_before_avg', 'gmr_grocery_and_pharmacy_1week_before_avg', 'gmr_retail_and_recreation_1week_before_avg', 'gmr_workplaces_percent_1week_before_avg', 'gmr_residential_percent_1week_before_avg', 'gmr_parks_percent_1week_before_avg',
'gmr_transit_stations_2weeks_avg', 'gmr_grocery_and_pharmacy_2weeks_avg', 'gmr_retail_and_recreation_2weeks_avg', 'gmr_workplaces_percent_2weeks_avg', 'gmr_residential_percent_2weeks_avg', 'gmr_parks_percent_2weeks_avg'])
# padding 'epi_week_year' col
gmr_agg = gmr_agg.withColumn('epi_week_year', F.lpad(F.col('epi_week_year'), 7, '0'))
gmr_agg.limit(10).toPandas()
```
### Joining mobility data for srags
#### 2021
```
print("How much distincts before? ", srag_2021.select('NU_NOTIFIC').distinct().count())
print("How much distincts epi_weeks before? ", srag_2021.select('epi_week_year').distinct().count())
srag_2021 = srag_2021.join(gmr_agg, ['UF', 'epi_week_year'], 'left')
print("How much distincts after? ", srag_2021.select('NU_NOTIFIC').distinct().count())
print("How much distincts epi_weeks after? ", srag_2021.select('epi_week_year').distinct().count())
```
# Meteorological data with state codes by epi_weeks
```
inmet.limit(5).toPandas()
inmet = inmet.join(ibge.select('UF', 'sub_region_1'), 'sub_region_1', 'left')
inmet = inmet.withColumn('UF', F.col('UF').cast('string'))
inmet = inmet.na.drop(subset=['sub_region_1', 'UF'])
inmet = inmet.withColumnRenamed('measurement_date', 'date')
# joining using the actual date
epi_weeks_actual = epi_weeks.select(['date', 'epi_week', 'epi_year', 'epi_week_year'])
inmet = inmet.join(epi_weeks_actual, 'date', 'left')
# joining using date from 7 days before
epi_weeks_7 = epi_weeks.select(['date_7_days_early', 'epi_week_7_days_early', 'epi_week_year_7_days_early'])
inmet = inmet.join(epi_weeks_7, F.date_add(inmet.date, 7) == epi_weeks.date_7_days_early, 'left')
# joining using date from 14 days before
epi_weeks_14 = epi_weeks.select(['date_14_days_early', 'epi_week_14_days_early', 'epi_week_year_14_early'])
inmet = inmet.join(epi_weeks_14, F.date_add(inmet.date, 14) == epi_weeks.date_14_days_early, 'left')
inmet1 = inmet.groupBy(['UF', 'epi_week_year']).agg({"total_daily_precipitation_mm":"avg",
"daily_avg_temp_c":"avg",
"daily_avg_relative_air_humidity_percent":"avg"})\
.withColumnRenamed('avg(total_daily_precipitation_mm)', 'inmet_daily_precipt_avg')\
.withColumnRenamed('avg(daily_avg_temp_c)', 'inmet_temp_c_avg')\
.withColumnRenamed('avg(daily_avg_relative_air_humidity_percent)', 'inmet_relative_air_humidity_avg')\
.orderBy('UF')
inmet7 = inmet.groupBy(['UF', 'epi_week_year_7_days_early']).agg({"total_daily_precipitation_mm":"avg",
"daily_avg_temp_c":"avg",
"daily_avg_relative_air_humidity_percent":"avg"})\
.withColumnRenamed('avg(total_daily_precipitation_mm)', 'inmet_daily_precipt_1week_before_avg')\
.withColumnRenamed('avg(daily_avg_temp_c)', 'inmet_temp_c_1week_before_avg')\
.withColumnRenamed('avg(daily_avg_relative_air_humidity_percent)', 'inmet_relative_air_humidity_1week_before_avg')\
.orderBy('UF')
inmet14 = inmet.groupBy(['UF', 'epi_week_year_14_early']).agg({"total_daily_precipitation_mm":"avg",
"daily_avg_temp_c":"avg",
"daily_avg_relative_air_humidity_percent":"avg"})\
.withColumnRenamed('avg(total_daily_precipitation_mm)', 'inmet_daily_precipt_2weeks_before_avg')\
.withColumnRenamed('avg(daily_avg_temp_c)', 'inmet_temp_c_2weeks_before_avg')\
.withColumnRenamed('avg(daily_avg_relative_air_humidity_percent)', 'inmet_relative_air_humidity_2weeks_before_avg')\
.orderBy('UF')
```
##### some validations
```
inmet1.filter((F.col('UF')=='11') & ((F.col('epi_week_year') == '22-2020') | (F.col('epi_week_year') == '23-2020') | (F.col('epi_week_year') == '24-2020') | (F.col('epi_week_year') == '25-2020')))\
.orderBy('epi_week_year').limit(5).toPandas()
inmet7.filter((F.col('UF')=='11') & ((F.col('epi_week_year_7_days_early') == '22-2020') | (F.col('epi_week_year_7_days_early') == '23-2020') | (F.col('epi_week_year_7_days_early') == '24-2020') | (F.col('epi_week_year_7_days_early') == '25-2020')))\
.orderBy('epi_week_year_7_days_early').limit(5).toPandas()
inmet14.filter((F.col('UF')=='11') & ((F.col('epi_week_year_14_early') == '22-2020') | (F.col('epi_week_year_14_early') == '23-2020') | (F.col('epi_week_year_14_early') == '24-2020') | (F.col('epi_week_year_14_early') == '25-2020')))\
.orderBy('epi_week_year_14_early').limit(5).toPandas()
```
<hr />
```
# joining all for 1 week early
inmet7 = inmet7.withColumnRenamed('UF', 'UF7')
cond = [inmet1.UF == inmet7.UF7, inmet1.epi_week_year == inmet7.epi_week_year_7_days_early]
inmet_agg = inmet1.join(inmet7, cond, 'left')
inmet_agg.filter((F.col('UF')=='11') & ((F.col('epi_week_year') == '22-2020') | (F.col('epi_week_year') == '23-2020') | (F.col('epi_week_year') == '24-2020') | (F.col('epi_week_year') == '25-2020')))\
.orderBy('epi_week_year').limit(5).toPandas()
# joining all for 2 weeks early
inmet14 = inmet14.withColumnRenamed('UF', 'UF14')
cond = [inmet1.UF == inmet14.UF14, inmet1.epi_week_year == inmet14.epi_week_year_14_early]
inmet_agg = inmet_agg.join(inmet14, cond, 'left')
inmet_agg.filter((F.col('UF')=='11') & ((F.col('epi_week_year') == '22-2020') | (F.col('epi_week_year') == '23-2020') | (F.col('epi_week_year') == '24-2020') | (F.col('epi_week_year') == '25-2020')))\
.orderBy('epi_week_year').limit(5).toPandas()
```
<hr />
```
# selecting variables of interest
inmet_agg = inmet_agg.select(['UF', 'epi_week_year',
'inmet_temp_c_avg', 'inmet_relative_air_humidity_avg', 'inmet_daily_precipt_avg',
'inmet_temp_c_1week_before_avg', 'inmet_relative_air_humidity_1week_before_avg', 'inmet_daily_precipt_1week_before_avg',
'inmet_temp_c_2weeks_before_avg', 'inmet_relative_air_humidity_2weeks_before_avg', 'inmet_daily_precipt_2weeks_before_avg'])
# padding 'epi_week_year' col
inmet_agg = inmet_agg.withColumn('epi_week_year', F.lpad(F.col('epi_week_year'), 7, '0'))
```
<hr />
### Joining meteorological data for srags
#### 2021
```
print("How much distincts before? ", srag_2021.select('NU_NOTIFIC').distinct().count())
print("How much distincts epi_weeks before? ", srag_2021.select('epi_week_year').distinct().count())
srag_2021 = srag_2021.join(inmet_agg, ['UF', 'epi_week_year'], 'left')
print("How much distincts after? ", srag_2021.select('NU_NOTIFIC').distinct().count())
print("How much distincts epi_weeks after? ", srag_2021.select('epi_week_year').distinct().count())
```
<hr />
#### writing the last temporary version of srags
```
srag_2021.write.parquet('gs://ai-covid19-datalake/standard/srag/pp_interm_srag_2021_v4_super-srag', mode='overwrite')
```
<hr />
```
# # reading temporary files
# srag_2019 = spark.read.parquet('gs://ai-covid19-datalake/standard/srag/pp_interm_srag_2019_v4_super-srag/')
# srag_2020 = spark.read.parquet('gs://ai-covid19-datalake/standard/srag/pp_interm_srag_2020_v4_super-srag')
# srag_2021 = spark.read.parquet('gs://ai-covid19-datalake/standard/srag/pp_interm_srag_2021_v4_super-srag')
srag_2021 = srag_2021.withColumn('AGE_GROUP', F.when(F.col('AGE_AT_NOTIF') < 1, 1)\
.when((F.col('AGE_AT_NOTIF') >= 1) & (F.col('AGE_AT_NOTIF') <= 5), 2)\
.when((F.col('AGE_AT_NOTIF') >= 6) & (F.col('AGE_AT_NOTIF') <= 19), 3)\
.when((F.col('AGE_AT_NOTIF') >= 20) & (F.col('AGE_AT_NOTIF') <= 29), 4)\
.when((F.col('AGE_AT_NOTIF') >= 30) & (F.col('AGE_AT_NOTIF') <= 39), 5)\
.when((F.col('AGE_AT_NOTIF') >= 40) & (F.col('AGE_AT_NOTIF') <= 49), 6)\
.when((F.col('AGE_AT_NOTIF') >= 50) & (F.col('AGE_AT_NOTIF') <= 59), 7)\
.when((F.col('AGE_AT_NOTIF') >= 60) & (F.col('AGE_AT_NOTIF') <= 69), 8)\
.when((F.col('AGE_AT_NOTIF') >= 70) & (F.col('AGE_AT_NOTIF') <= 79), 9)\
.when((F.col('AGE_AT_NOTIF') >= 80) & (F.col('AGE_AT_NOTIF') <= 89), 10)\
.when((F.col('AGE_AT_NOTIF') >= 90), 11)\
.otherwise(12))
# just for check the header later
srag_2021.limit(1).toPandas()
```
# The SUPER SRAG at last
```
# now the are united, lets create the last attributes
suffix = 'avg'
gmr_inmet_cols = {
'gmr_transit_stations_': [[-43.09183673469388, -32.61737331954498, -25.20892494929006, -17.24561403508772]],
'gmr_grocery_and_pharmacy_': [[0.36936936936936937, 8.107558139534884, 13.838709677419354, 19.86090775988287]],
'gmr_retail_and_recreation_': [[-42.607894736842105, -31.163636363636364, -22.735064935064933, -15.647230320699709]],
'gmr_workplaces_percent_': [[-15.347786811201445, -7.407114624505929, -4.023725391216558, 0.5605338417540515]],
'gmr_residential_percent_': [[5.825, 7.780269058295964, 9.963333333333333, 12.902788844621513]],
'gmr_parks_percent_': [[-45.52965235173824, -36.97651663405088, -30.29383886255924, -21.115384615384617]],
'gmr_transit_stations_1week_before_': [[-44.09894736842105, -32.61737331954498, -25.263959390862944, -17.85185185185185]],
'gmr_grocery_and_pharmacy_1week_before_': [[-0.16783216783216784, 7.644859813084112, 13.094644167278062, 18.64406779661017]],
'gmr_retail_and_recreation_1week_before_': [[-42.81818181818182, -31.208955223880597, -22.889212827988338, -15.928205128205128]],
'gmr_workplaces_percent_1week_before_': [[-15.347786811201445, -7.351599852887091, -3.9318181818181817, 0.33611111111111114]],
'gmr_residential_percent_1week_before_': [[5.746001279590531, 7.714285714285714, 9.980842911877394, 13.173153296266879]],
'gmr_parks_percent_1week_before_': [[-45.42248062015504, -36.97651663405088, -30.672727272727272, -21.639327024185068]],
'gmr_transit_stations_2weeks_': [[-44.16945606694561, -32.86206896551724, -25.244274809160306, -17.40625]],
'gmr_grocery_and_pharmacy_2weeks_': [[-0.5689655172413793, 7.173590504451038, 11.925925925925926, 18.068483063328426]],
'gmr_retail_and_recreation_2weeks_': [[-43.526233359436176, -30.847341337907377, -22.798561151079138, -16.060301507537687]],
'gmr_workplaces_percent_2weeks_': [[-17.4364406779661, -7.7555555555555555, -4.397341211225997, -0.5694656488549619]],
'gmr_residential_percent_2weeks_': [[5.692477876106195, 7.639135959339263, 10.018041237113403, 13.193877551020408]],
'gmr_parks_percent_2weeks_': [[-45.38961038961039, -36.54710144927536, -29.9047131147541, -20.95306859205776]],
'inmet_temp_c_': [[19.99995412088011, 22.3166005555556, 23.721603579832205, 25.526168289473578]],
'inmet_relative_air_humidity_': [[65.59553142063525, 69.93693714285736, 73.8520828571426, 78.24404857142858]],
'inmet_daily_precipt_': [[0.3383720930232559, 1.4355555555555408, 3.35503875968993, 5.962886597938155]],
'inmet_temp_c_1week_before_': [[20.05500949193533, 22.322388241935233, 23.761091968254064, 25.53099282105264]],
'inmet_relative_air_humidity_1week_before_': [[65.65009503030261, 70.04159629710138, 73.88706227868933, 78.36411010769187]],
'inmet_daily_precipt_1week_before_': [[0.3585585585585601, 1.5249999999999975, 3.567741935483844, 6.042424242424034]],
'inmet_temp_c_2weeks_before_': [[20.174016811110565, 22.320047571428823, 23.821744027322836, 25.50684535398227]],
'inmet_relative_air_humidity_2weeks_before_': [[65.91730179032191, 70.25798429936523, 73.88706227868934, 78.47414770987979]],
'inmet_daily_precipt_2weeks_before_': [[0.3714285714285674, 1.6878787878788217, 3.640816326530609, 6.274725274725252]]
}
# Showing the quintiles cuttofs
for col in pd.DataFrame(gmr_inmet_cols).columns:
print(col, list(pd.DataFrame(gmr_inmet_cols)[col]))
# Showing the quintiles cuttofs
pd.DataFrame(gmr_inmet_cols)
n_suffix = 'q'
for col in list(gmr_inmet_cols.keys()):
srag_2021 = srag_2021.withColumn(col+n_suffix, F.when(F.col(col+suffix) <= gmr_inmet_cols[col][0][0], '1')\
.when((F.col(col+suffix) > gmr_inmet_cols[col][0][0]) & (F.col(col+suffix) <= gmr_inmet_cols[col][0][1]), '2')\
.when((F.col(col+suffix) > gmr_inmet_cols[col][0][1]) & (F.col(col+suffix) <= gmr_inmet_cols[col][0][2]), '3')\
.when((F.col(col+suffix) > gmr_inmet_cols[col][0][2]) & (F.col(col+suffix) <= gmr_inmet_cols[col][0][3]), '4')\
.when(F.col(col+suffix) > gmr_inmet_cols[col][0][3], '5')\
.otherwise('6'))
# making all column names uppercase
for col in srag_2021.columns:
srag_2021 = srag_2021.withColumnRenamed(col, col.upper())
```
<hr />
<hr />
<hr />
```
super_srag = spark.read.parquet('gs://ai-covid19-datalake/standard/super-srag/super_srag_v1.parquet')
super_srag_cols = super_srag.columns
srag_2021 = srag_2021.select(super_srag_cols)
super_srag = super_srag.union(srag_2021)
super_srag = super_srag.withColumn('ANO', F.year('DT_SIN_PRI'))
# srag 2019 has 48554 records
# srag 2020 has 1193735 records
# srag 2021 has 868367 records
# super srag must contain (48554 + 1193735 + 868367 =) 2110656 records
print('super srag has', super_srag.count(), 'records')
```
#### writing super srag
```
super_srag.write.parquet('gs://ai-covid19-datalake/standard/super-srag/super_srag_v2.parquet')
super_srag.coalesce(1).write.csv('gs://ai-covid19-datalake/standard/super-srag/super_srag_v2.csv', header=True)
```
| github_jupyter |
# Naive Sentence to Emoji Translation
## Purpose
To workshop a naive version of an sentence to emoji translation algorithm. The general idea is that sentences can be "chuncked" out into n-grams that are more related to a single emoji. The related-ness of an n-gram to an emoji is directly related to the cosine similarity of the sent2vec representation of the sentence and the sent2vec representation of one of the emoji's definitions. The emoji definitons are gathered from the [emoji2vec](https://github.com/uclmr/emoji2vec) github repo and the sent2vec model is from the [sent2vec](https://github.com/epfml/sent2vec) github repo.
For a better explanation of the algorithm please see our [paper](https://www.authorea.com/users/269084/articles/396929-confet-an-english-to-emojis-translation-algorithm) or the [slides] from our presentation
```
# NOTE: If this is the first time
# !pip install spacy
# !pip install tabulate
# !pip install ../../sent2vec/
# Standard Library
from typing import List, Tuple, Callable # Datatypes for the function typing
from functools import lru_cache # Function annotation for storing results
from dataclasses import dataclass, field # C-like struct functions and class annotation
from string import punctuation
# Scipy suite
import numpy as np # For function annotation
from scipy.spatial.distance import cosine # Distance between sentence and emoji in sent2vec vector space
# NLTK
from nltk import word_tokenize, pos_tag # Tokenizing a sentence into words and tagging POS
from nltk.stem import PorterStemmer, WordNetLemmatizer, SnowballStemmer # Different stemming algorithms
from nltk.corpus import stopwords # Define the set of stopwords in english
from nltk import Tree
stopwords = " " #set(stopwords.words('english'))
# Import spacy (NLP)\n",
import spacy
# Import sentence vectorizer\n"
import sent2vec
# IPython output formatting\n",
from tabulate import tabulate # Tabulation from 2-d array into html table
from IPython.display import display, HTML, clear_output # Nice displaying in the output cell
import warnings; warnings.simplefilter('ignore') # cosine distance gives warnings when div by 0 so
# ignore all of these
# Timing functions
from time import time, localtime, strftime
# Paramatize the file locations
emoji_file = "../data/emoji_joined.txt" # https://github.com/uclnlp/emoji2vec/blob/master/data/raw_training_data/emoji_joined.txt
wikipedia_file = "../data/wikipedia_utf8_filtered_20pageviews.csv" # https://blog.lateral.io/2015/06/the-unknown-perils-of-mining-wikipedia/
# Initialize the sent2vec model
s2v = sent2vec.Sent2vecModel()
s2v.load_model('../../models/wiki_unigrams.bin') # https://drive.google.com/open?id=0B6VhzidiLvjSa19uYWlLUEkzX3c
# Intitialize the lemmatizers
# !python -m spacy download en
ps = PorterStemmer()
sb = SnowballStemmer("english")
lemmatizerNLTK = WordNetLemmatizer()
nlp = spacy.load("en")
```
## Sentence Cleaning
The general idea with sentence cleaning is that the sentences need to be put into the same "format" for better analysis. There are two main aspects of cleaning: 1) removal, and 2) modification. Removal is primarily for tokens that do not contribute to the sentence at all. These include ".", "and", "but". Normally this is a standard step in sentence cleaning but it has actually has zero effect on the output that I can see. However, token modification changes the spelling of tokens to uniform all tokens that use the same root. For example "rocked", "rock", "rocking" should all be reduced to their lemma of "rock". There are two different ways to do this: [stemming and lemmatization](https://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html).
```
def clean_sentence(sent: str, lemma_func: Callable[[str], str]=lemmatizerNLTK.lemmatize, keep_stop_words: bool=True) -> str:
"""
Clean a sentence
Tokenize the word and then lemmatize each individual word before rejoining it all together.
Optionally removing stop words along the way
Args:
sent(str): Sentence to clean
lemma_func(Callable[[str], str]): A function that takes in a word and outputs a word,
normally used to pass in the lemmatization function to be mapped
on every word the sentence
keep_stop_words(bool): Keep the stop words in the sentence
Rets:
(str): Cleaned sentence
"""
# Lemmatize each word in the sentence and remove the stop words if the flag is set
return " ".join([lemma_func(token) for token in word_tokenize(sent.lower()) if (token not in stopwords or keep_stop_words) and (token not in punctuation)])
```
#### Emoji Vectorization and Related
```
# Define the array to store the (emoji, repr) 2-tuple
def generate_emoji_embeddings(lemma_func: Callable[[str], str]=lemmatizerNLTK.lemmatize, keep_stop_words: bool=True) -> List[Tuple[str, List[float]]]:
"""
Generate the sent2vec emoji embeddings from the input file
Run each emoji within the emoji_joined data file from the emoji2vec paper through
the sent2vec sentence embedder. This is a very naive way of doing it because one
emoji may have multiple entries in the data file so it has multiple vectors in the
emoji_embeddings array
Args:
lemma_func(Callable[[str], str]): Lemmatization function for cleaning. A function that takes in a word and outputs a word,
normally used to pass in the lemmatization function to be mapped
on every word the sentence
keep_stop_words(bool): Keep the stop words in the cleaned sentence
Rets:
(List[Tuple[str, List[float]]]): A list of 2-tuples containing the emoji and
one vector representation of it
"""
# Initialize the list that will hold all of the embedings
emoji_embeddings = []
# Open the file that stores the emoji, description 2-tuple list
with open(emoji_file) as emojis:
for defn in emojis:
# The file is tab-delim
split = defn.split("\t")
# Get the emoji and the description from the current line
emoji = split[-1].replace("\n", "")
desc = clean_sentence(split[0], lemma_func, keep_stop_words)
# Add each emoji and embedded description to the list
emoji_embeddings.append((emoji, s2v.embed_sentence(desc), desc))
# Return the embeddings
return emoji_embeddings
emoji_embeddings = generate_emoji_embeddings()
@lru_cache(maxsize=1000)
def closest_emoji(sent: str, return_all: bool=False) -> Tuple[str, int]:
"""
Get the closest emoji to the given sentence
Loop through the list of emoji embeddings and keep track of which one has the
lowest cosine distance from the input sentence's embedding. This is the "closest"
emoji. The lru_cache designation means that python will store the last [maxsize]
calls to this function with their return value to reduce computation. This is
cleared after every call to the summary function.
Args:
sent(List[str]): Sentence to check
return_all(bool, Optional): Return all minimum emojis rather than just the first
Ret:
(Tuple[str, int]) Closest emoji, cosine similarity of emoji
"""
# Embed the sentence using sent2vec
emb = s2v.embed_sentence(sent)
# Check the similarity between each emoji and the sentence. The tuple
# is in the format (emoji, cosine diff, emoji desc)
emoji_sent_diffs = [(emoji[0], cosine(emoji[1], emb), emoji[2])
for emoji in emoji_embeddings]
min_val = min(emoji_sent_diffs, key=lambda e: e[1])[1]
# Return the entry with the lowest cosine diff
good_emojis = [emoji for emoji in emoji_sent_diffs
if emoji[1] == min_val]
return good_emojis[0]
if len(good_emojis) == 1:
return good_emojis[0]
# else:
# other_keyword_embeddings = [emoji for emoji]
closest_emoji("her royal highness")
@lru_cache(maxsize=1000)
def closest_emoji(sent: str, return_all: bool=False) -> Tuple[str, int]:
"""
Get the closest emoji to the given sentence
Loop through the list of emoji embeddings and keep track of which one has the
lowest cosine distance from the input sentence's embedding. This is the "closest"
emoji. The lru_cache designation means that python will store the last [maxsize]
calls to this function with their return value to reduce computation. This is
cleared after every call to the summary function.
Args:
sent(List[str]): Sentence to check
return_all(bool, Optional): Return all minimum emojis rather than just the first
Ret:
(Tuple[str, int]) Closest emoji, cosine similarity of emoji
"""
# Embed the sentence using sent2vec
emb = s2v.embed_sentence(sent)
# Check the similarity between each emoji and the sentence. The tuple
# is in the format (emoji, cosine diff, emoji desc)
emoji_sent_diffs = [(emoji[0], cosine(emoji[1], emb), emoji[2])
for emoji in emoji_embeddings]
min_val = min(emoji_sent_diffs, key=lambda e: e[1])
return min_val
# Return the entry with the lowest cosine diff
good_emojis = [emoji for emoji in emoji_sent_diffs
if emoji[1] == min_val]
return good_emojis[0]
if len(good_emojis) == 1:
return good_emojis[0]
# else:
# other_keyword_embeddings = [emoji for emoji]
closest_emoji("her royal highness")
```
#### N-Gram Generation and Related
```
def pos_n_gram(sentence: str, keep_stop_words: bool=True) -> List[str]:
"""
Generate an n-gram based on the POS tagged dependency tree of the sentence that is "simplified" down according
to a few assumptions that dictate a good sentence split. These assumptions are as follows:
1. If two words are leafs and on the same level with the same parent they can be grouped as an n-gram
2. If there is a sequence of parent-child relationships with only 1 child they can be grouped as one
n-gram
"""
stopword = "the in has be".split()
pos_tagged_n_grams = []
def to_nltk_tree(node):
current_node = node
backlog = []
while current_node.n_lefts + current_node.n_rights == 1:
backlog.append((current_node.orth_, current_node.i))
current_node = list(current_node.children)[0]
backlog.append((current_node.orth_, current_node.i))
if current_node.n_lefts + current_node.n_rights > 1:
good_children = [child for child in current_node.children if len(list(child.children)) > 0]
bad_children = [(child.orth_, child.i) for child in current_node.children if child not in good_children]
pos_tagged_n_grams.append(backlog)
pos_tagged_n_grams.append(bad_children)
return Tree(backlog, [Tree(bad_children, [])] + [to_nltk_tree(child) for child in good_children])
else:
pos_tagged_n_grams.append(backlog)
return Tree(backlog, [])
def strip_nothing_unigrams(n_grams):
return [n_gram for n_gram in n_grams if not (len(n_gram.split(" ")) == 1 and n_gram.split(" ")[0] in stopword)]
query = " ".join([word for word in sentence.split() if word not in stopword or keep_stop_words])
doc = nlp(query)
to_nltk_tree(list(doc.sents)[0].root);
# print(nltk_tree)
sort_inner = [sorted(nltk_child, key=lambda x: x[1]) for nltk_child in pos_tagged_n_grams]
nltk_averages = []
for nltk_child in sort_inner:
if nltk_child == []:
continue
nltk_averages.append((nltk_child, max(x[1] for x in nltk_child)))
sorted_outer = list(sorted(nltk_averages, key=lambda x: x[1]))
n_grams = []
for nltk_average in sorted_outer:
n_grams.append(" ".join(word[0] for word in nltk_average[0]))
if not keep_stop_words:
new_n_grams = []
for n_gram in n_grams:
new_n_gram = " ".join([word for word in word_tokenize(n_gram) if word not in stopword])
# print(new_n_gram)
new_n_grams.append(new_n_gram)
return new_n_grams
else:
return n_grams
def clean_n_gram(n_grams:List[str]) -> bool:
"""
Validate that a given n_gram is good. Good is defined as the series of n-grams contains no n-grams containing only stop words
"""
stopwords = "the and but".split()
return list(filter(lambda x: x not in stopwords, n_grams))
def combinations_of_sent(sent: str) -> List[List[str]]:
"""
Return all possible n-gram combinations of a sentence
Args:
sent(str): Sentence to n-gram-ify
Rets:
(List[List[str]]): List of all possible n-gram combinations
"""
def combinations_of_sum(sum_to: int, combo: List[int]=None) -> List[List[int]]:
"""
Return all possible combinations of ints that sum to some int
Args:
sum_to(int): The number that all sub-arrays should sum to
combo(List[int]): The current combination of number that the recursive
algo should subdivide, not needed for first run but used
in every consequent recursive run of the function
"""
# Initialize the list for combinations
combos = []
# If the current combo list is none (first run through)
# then generate it with all 1s and length = sum_to
if combo is None:
combo = [1 for x in range(sum_to)]
combos.append(combo)
# Base case: If the length of the combination is 0 then
# end the recursion because we are at the top of the "tree"
if len(combo) == 0:
return None
# For each
for i in range(1, len(combo)):
combo_to_query = combo[:i-1] + [sum(combo[i - 1:i + 1])] + combo[i+1:]
combos.append(combo_to_query)
[combos.append(combo) for combo in combinations_of_sum(sum_to, combo_to_query) if combo is not None]
return combos
def combinations_of_sent_helper(sent):
sent = word_tokenize(sent)
combos = np.unique(combinations_of_sum(len(sent)))
sent_combos = []
for combo in combos:
sent_combo = []
curr_i = 0
for combo_len in combo:
space_joined = " ".join(sent[curr_i:combo_len + curr_i])
if space_joined not in sent_combo:
sent_combo.append(space_joined)
curr_i += combo_len
if sent_combo not in sent_combos:
sent_combos.append(sent_combo)
return sent_combos
return combinations_of_sent_helper(sent)
```
### Summarization Algorithm and Related
```
@dataclass
class EmojiSummarizationResult:
"""
"Struct" for keeping track of an Emoji Summarization result
Data Members:
emojis(str): String of emojis that represent the summarization
n_grams(List[str]): List of variable length n-grams that each emoji represents
uncertainty_scores(List[float]): List of the cosine distance between each n_gram and emoji
time_elapsed(float): How long it took to complete the summary
"""
emojis: str = ""
emojis_n_grams: str = field(default_factory=list)
n_grams: List[str] = field(default_factory=list)
uncertainty_scores: List[float] = field(default_factory=list)
elapsed_time: float = 0
#weighted on real estate an n-gram occupies
def score_summarization_result_weighted_average(summarization: EmojiSummarizationResult) -> float:
weighted_sum = 0
sentence_length = 0
for i in range(len(summarization.uncertainty_scores)):
sentence_length += len(summarization.n_grams[i].split(" "))
weighted_sum += summarization.uncertainty_scores[i] * len(summarization.n_grams[i].split(" "))
return weighted_sum/sentence_length
def score_summarization_result_geometric_average(summarization: EmojiSummarizationResult) -> float:
return np.prod(summarization.uncertainty_scores)**(1/len(summarization.uncertainty_scores))
# Can do with logs - better?
def score_summarization_result_weighted_geometric_average(summarization: EmojiSummarizationResult) -> float:
weighted_prod = 1
sentence_length = 0
for i in range(len(summarization.uncertainty_scores)):
sentence_length += len(summarization.n_grams[i].split(" "))
weighted_prod += summarization.uncertainty_scores[i] ** len(summarization.n_grams[i].split(" "))
return weighted_prod ** (1/sentence_length)
def score_summarization_result_harmonic_average(summarization: EmojiSummarizationResult) -> float:
return len(summarization.n_grams) / sum([1/uncertainty_score for uncertainty_score in summarization.uncertainty_scores])
def score_summarization_result_weighted_harmonic_average(summarization: EmojiSummarizationResult) -> float:
total = 0
for i in range(len(summarization.uncertainty_scores)):
total += 1/(len(summarization.n_grams[i].split(" ")) * summarization.uncertainty_scores[i])
return total
def score_summarization_result_average(summarization: EmojiSummarizationResult) -> float:
"""
Score a EmojiSummarizationResult
Get the average of all uncertainty scores and return that as the score
Args:
summarization(EmojiSummarizationResult): Summarization to score
Rets:
(float): Numerical summarization score
"""
return sum(summarization.uncertainty_scores) / len(summarization.uncertainty_scores)
def summarize(sent:str, lemma_func: Callable[[str], str]=lemmatizerNLTK.lemmatize,
keep_stop_words: bool=True, scoring_func: Callable[[EmojiSummarizationResult], float]=score_summarization_result_average) -> EmojiSummarizationResult:
"""
Summarize the given sentence into emojis
Split the sentence into every possible combination of n-grams and see which returns the highest score
when each n-gram is translated to an emoji using the closest emoji in the dataset
Args:
sent(str): Sentence to summarize
lemma_func(Callable[[str], str]): Lemmatization function for cleaning. A function that takes in a word and outputs a word,
normally used to pass in the lemmatization function to be mapped
on every word the sentence
keep_stop_words(bool): Keep the stop words in the cleaned sentence
Rets:
(Tuple[List[str], List[float], List[str]]): (Emoji Sentence,
List of Uncertainty values for the corresponding emoji,
list of n-grams used to generate the corresponding emoji)
"""
# Start the timer
time_now = time()
# Clean the sentence
sent = clean_sentence(sent, lemma_func=lemma_func, keep_stop_words=keep_stop_words)
# Generate all combinations of sentences
sent_combos = combinations_of_sent(sent)
# Init "best" datamembers as empty or exceedingly high
best_summarization = EmojiSummarizationResult()
best_summarization_score = 100_000_000
# Iterate through every combination of sentence combos
for sent_combo in sent_combos:
# Start the local data members as empty
local_summarization = EmojiSummarizationResult()
# Iterate through each n_gram adding the uncertainty and emoji to the lists
for n_gram in sent_combo:
close_emoji, cos_diff, close_ngram = closest_emoji(n_gram)
local_summarization.emojis += close_emoji
local_summarization.uncertainty_scores.append(cos_diff)
local_summarization.emojis_n_grams.append(close_ngram)
local_summarization.n_grams = sent_combo
# Check if the average uncertainty is less than the best
# TODO: Maybe a median check would be helpful as well?
if scoring_func(local_summarization) < best_summarization_score:
# Update the best emojis
best_summarization = local_summarization
best_summarization_score = scoring_func(best_summarization)
# Clear the function cache on closest_emoji because it is unlikely the next run will make use of them
closest_emoji.cache_clear()
# Stop the timer
best_summarization.elapsed_time = time() - time_now
# Return the emoji "sentence", list of all the cosine similarities, and all of the n-grams
return best_summarization
def summarize_pos(sent:str, keep_stop_words:bool=True, lemma_func: Callable[[str], str]=lambda x: x) -> EmojiSummarizationResult:
"""
Summarize a sentence using POS n-gram chunking
Args:
sent(str): Sentence to summarize
keep_stop_words(bool, Optional): Flag to keep the stop words when cleaning the sentence and n-grams
lemma_func(Callable[[str], str], Optional): Function to use to lemmatize the sentence
Rets:
EmojiSummarizationResult: Result of the emoji summarization
"""
time_now = time()
# Clean the sentence
sent = clean_sentence(sent, keep_stop_words=True, lemma_func=lemma_func)
# Get the n-grams using the part of speech tagging
pos_n_grams = pos_n_gram(sent, keep_stop_words=keep_stop_words)
# Clean the n_grams
n_grams = clean_n_gram(pos_n_grams)
# Create an Emoji Summarization Result
esr = EmojiSummarizationResult()
# Translate every n_gram in that n-gram sequence
for n_gram in n_grams:
# Get the closest emoji to the current n-gram
emoji, similarity, desc = closest_emoji(n_gram)
# Add the closest emoji to the sumary
esr.emojis += emoji
esr.emojis_n_grams.append(desc)
esr.n_grams.append(n_gram)
esr.uncertainty_scores.append(similarity)
# Stop the timer
esr.elapsed_time = time() - time_now
# Return the summary
return esr
```
### Verification and Related
```
def format_summary(sents: List[str], lemma_func: Callable[[str], str]=lemmatizerNLTK.lemmatize, keep_stop_words: bool=True, generate_embeddings: bool=True,
scoring_func: Callable[[EmojiSummarizationResult], float]=score_summarization_result_average) -> HTML:
"""
Summarize a collection of sentences and display it nicely with IPython
Args:
sents(List[str]): List of sentences to translate
lemma_func(Callable[[str], str]), optional: Lemmatization function for cleaning. A function that takes in a word and outputs a word,
normally used to pass in the lemmatization function to be mapped
on every word the sentence
keep_stop_words(bool), optional: Keep the stop words in the cleaned sentence
generate_embeddings(bool), optional: Regenerate the emoji embeddings for the case that the lemmatazation/stop_word params have changed
Rets:
IPython.HTML: HTML List to be displayed with IPython
"""
# Generate emoji embeddings in case the cleaning parameters have changed
if generate_embeddings:
time_now = time()
global emoji_embeddings
emoji_embeddings = generate_emoji_embeddings(lemma_func, keep_stop_words)
print("Completed emoji embeddings, time elapsed: {}\n".format(time() - time_now))
# Create the 2d array for the talbe
table = []
# Iterate through each sentence to be summarized
for sent in sents:
# Start timer
time_now = time()
# Summarize it
summarization_res = summarize(sent, lemma_func, keep_stop_words, scoring_func)
# Get elapsed time
elapsed_time = time() - time_now
# Update elapsed time
summarization_res.elapsed_time = elapsed_time
# Print status update
# print("Completed sentence: {}, time elapsed: {}".format(sents.index(sent), elapsed_time))
# Append pertinent data to the table
table.append([sent, round(scoring_func(summarization_res), 3),
[round(x, 3) for x in summarization_res.uncertainty_scores],
summarization_res.n_grams,
summarization_res.elapsed_time,
summarization_res.emojis])
# Print out an update
# Return the table with the headers
return tabulate(table, tablefmt='pipe',
headers=["Input Sentence", "Summary Score", "Individual N-Gram Scores",
"N-Grams", "Elapsed Time", "Emoji Results"])
def pp(esr):
print("Emojis: " + esr.emojis)
print("Emoji n-grams: " + ", ".join(esr.emojis_n_grams))
print("Sentence n-grams: " + ", ".join(esr.n_grams))
print("Uncertainty Scores: " + ", ".join([str(round(x, 3)) for x in esr.uncertainty_scores]))
print("Time: " + str(round(esr.elapsed_time, 3)) + "s")
print("Score: " + str(round(1 - score_summarization_result_average(esr), 3)))
print()
sentences = ["The happy dog shares a treat with another dog"]
for sentence in sentences:
pp(summarize_pos(sentence, keep_stop_words=True))
pp(summarize(sentence, keep_stop_words=False))
```
### Timing and Graphing
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set(style="whitegrid", context="paper")
outer_sent = "the quick brown fox jumped over the lazy dog sphinx of".split(" ") # Issues with 1 word phrases. Also changed stopwords to empty list.
sent_lens = []
exh_time = []
pos_time = []
for x in range(2, len(outer_sent) + 1):
sent = " ".join(outer_sent[:x])
sent_lens.append(x)
pos = summarize_pos(sent)
exh = summarize(sent)
pos_time.append(pos.elapsed_time)
exh_time.append(exh.elapsed_time)
df = pd.DataFrame()
df["Sentence Length (words)"] = sent_lens
df["Exhaustive"] = exh_time
df["Dependency Tree"] = pos_time
df.head()
df_melted = pd.melt(df, ["Sentence Length (words)"])
df_melted = df_melted.rename(columns={"variable": "Sentence Composition Algorithm", "value": "Time To Translation (seconds)"})
p = sns.lineplot(data=df_melted, x="Sentence Length (words)", y="Time To Translation (seconds)", hue="Sentence Composition Algorithm", color="coral")
p.set_title("Timing of Dependency Tree and Exhastive Translation")
p.get_figure().savefig("../plots/AlgorithmTiming.png")
```
| github_jupyter |
## Problem Statement
Previously, we considered the following problem:
>Given a positive integer `n`, write a function, `print_integers`, that uses recursion to print all numbers from `n` to `1`.
>
>For example, if `n` is `4`, the function shuld print `4 3 2 1`.
Our solution was:
```
def print_integers(n):
if n <= 0:
return
print(n)
print_integers(n - 1)
print_integers(5)
```
We have already discussed that every time a function is called, a new *frame* is created in memory, which is then pushed onto the *call stack*. For the current function, `print_integers`, the call stack with all the frames would look like this:
<img src='./recurrence-relation-resources/01.png'>
Note that in Python, the stack is displayed in an "upside down" manner. This can be seen in the illustration above—the last frame (i.e. the frame with `n = 0`) lies at the top of the stack (but is displayed last here) and the first frame (i.e., the frame with `n = 5`) lies at the bottom of the stack (but is displayed first).
But don't let this confuse you. The frame with `n = 0` is indeed the top of the stack, so it will be discarded first. And the frame with `n = 5` is indeed at the bottom of the stack, so it will be discarded last.
We define time complexity as a measure of amount of time it takes to run an algorithm. Similarly, the time complexity of our function `print_integers(5)`, would indicate the amount of time taken to exceute our function `print_integers`. But notice how when we call `print_integers()` with a particular value of `n`, it recursively calls itself multiple times.
In other words, when we call `print_integers(n)`, it does operations (like checking for base case, printing number) and then calls `print_integers(n - 1)`.
Therefore, the overall time taken by `print_integers(n)` to execute would be equal to the time taken to execute its own simple operations and the time taken to execute `print_integers(n - 1)`.
Let the time taken to execute the function `print_integers(n)` be $T(n)$. And let the time taken to exceute the function's own simple operations be represented by some constant, $k$.
In that case, we can say that
$$T(n) = T(n - 1) + k$$
where $T(n - 1)$ represents the time taken to execute the function `print_integers(n - 1)`.
Similarly, we can represent $T(n - 1)$ as
$$T(n - 1) = T(n - 2) + k$$
We can see that a pattern is being formed here:
1. $T(n)\ \ \ \ \ \ \ = T(n - 1) + k$
2. $T(n - 1) = T(n - 2) + k$
3. $T(n - 2) = T(n - 3) + k$
4. $T(n - 3) = T(n - 4) + k$
.<br>
.<br>
.<br>
.<br>
.<br>
.<br>
5. $T(2) = T(1) + k$
6. $T(1) = T(0) + k$
7. $T(0) = k1$
Notice that when `n = 0` we are only checking the base case and then returning. This time can be represented by some other constant, $k1$.
If we add the respective left-hand sides and right-hand sides of all these equations, we get:
$$T(n) = nk + k1$$
We know that while calculating time complexity, we tend to ignore these added constants because for large input sizes on the order of $10^5$, these constants become irrelevant.
Thus, we can simplify the above to:
$$T(n) = nk $$
We can see that the time complexity of our function `print_integers(n)` is a linear function of $n$. Hence, we can say that the time complexity of the function is $O(n)$.
## Binary Search
#### Overview
Given a **sorted** list (say `arr`), and a key (say `target`). The binary search algorithm returns the index of the `target` element if it is present in the given `arr` list, else returns -1. Here is an overview of how the the recursive version of binary search algorithm works:
1. Given a list with the lower bound (`start_index`) and the upper bound (`end_index`).
1. Find the center (say `mid_index`) of the list.
1. Check if the element at the center is your `target`? If yes, return the `mid_index`.<br><br>
1. Check if the `target` is greater than that element at `mid_index`? If yes, call the same function with right sub-array w.r.t center i.e., updated indexes as `mid_index + 1` to `end_index` <br><br>
1. Check if the `target` is less than that element at `mid_index`? If yes, call the same function with left sub-array w.r.t center i.e., updated indexes as `start_index` to `mid_index - 1` <br><br>
1. Repeat the step above until you find the target or until the bounds are the same or cross (the upper bound is less than the lower bound).
#### Complexity Analysis
Let's look at the time complexity of the recursive version of binary search algorithm.
>Note: The binary search function can also be written iteratively. But for the sake of understanding recurrence relations, we will have a look at the recursive algorithm.
Here's the binary search algorithm, coded using recursion:
```
def binary_search(arr, target):
return binary_search_func(arr, 0, len(arr) - 1, target)
def binary_search_func(arr, start_index, end_index, target):
if start_index > end_index:
return -1
mid_index = (start_index + end_index)//2
if arr[mid_index] == target:
return mid_index
elif arr[mid_index] > target:
return binary_search_func(arr, start_index, mid_index - 1, target)
else:
return binary_search_func(arr, mid_index + 1, end_index, target)
arr = [0, 1, 2, 3, 4, 5, 6, 7, 8]
print(binary_search(arr, 5))
```
Let's try to analyze the time complexity of the recursive algorithm for binary search by finding out the recurrence relation.
Our `binary_search()` function calls the `binary_search_func()` function. So the time complexity of the function is entirely dependent on the time complexity of the `binary_search_func()`.
The input here is an array, so our time complexity will be determined in terms of the size of the array.
Like we did earlier, let's say the time complexity of `binary_search_func()` is a function of the input size, `n`. In other words, the time complexity is $T(n)$.
Also keep in mind that we are usually concerned with the worst-case time complexity, and that is what we will calculate here. In the worst case, the `target` value will not be present in the array.
In the `binary_search_func()` function, we first check for the base case. If the base case does not return `True`, we calculate the `mid_index` and then compare the element at this `mid_index` with the `target` values. All the operations are independent of the size of the array. Therefore, we can consider all these independent operations as taking a combined time, $k$.
Apart from these constant time operations, we do just one other task. We either make a call on the left-half of the array, or on the right half of the array. By doing so, we are reducing the input size by $n/2$.
>Note: Remember that we usually consider large input sizes while calculating time complexity; there is no significant difference between $10^5$ and ($10^5 + 1$).
Thus, our new function call is only called with half the input size.
We said that $T(n)$ was the time complexity of our original function. The time complexity of the function when called with half the input size will be $T(n/2)$.
Therefore:
$$T(n) = T(n/2) + k$$
Similarly, in the next step, the time complexity of the function called with half the input size would be:
$$T(n/2) = T(n/4) + k$$
We can now form similar equations as we did for the last problem:
1. $T(n)\ \ \ = T(n/2) + k$
2. $T(n/2) = T(n/4) + k$
3. $T(n/4) = T(n/8) + k$
4. $T(n/8) = T(n/16) + k$
.<br>
.<br>
.<br>
.<br>
.<br>
.<br>
5. $T(4) = T(2) + k$
6. $T(2) = T(1) + k$
7. $T(1) = T(0) + k1$ $^{(1)}$
8. $T(0) = k1$
$^{(1)}$ If we have only one element, we go to 0 elements next
From our binary search section, we know that it takes $log(n)$ steps to go from $T(n)$ to $1$. Therefore, when we add the corresponding left-hand sides and right-hand sides, we can safely say that:
$$T(n) = log(n) * k + k1$$
As always, we can ignore the constant. Therefore:
$$T(n) = log(n) * k $$
Thus we see that the time complexity of the function is a logarithmic function of the input, $n$. Hence, the time complexity of the recursive algorithm for binary search is $log(n)$.
| github_jupyter |
# Caso de estudio - Supervivencia en el Titanic
# Extracción de características
Ahora trataremos parte muy importante del aprendizaje automático: la extracción de características cuantitativas a partir de los datos. Con este fin:
- Aprenderemos como las características pueden extraerse a partir de datos del mundo real.
- Veremos como extraer características numéricas a partir de datos textuales.
Además, repasaremos algunas herramientas básicas en scikit-learn que pueden utilizarse para realizar estas tareas.
## ¿Qué son características?
### Características numéricas
Recuerda que los datos en scikit-learn vienen en arrays de dos dimensiones con tamaño **n_samples** $\times$ **n_features**.
Anteriormente, vimos el dataset iris, que tienen 150 ejemplos y 4 características.
```
from sklearn.datasets import load_iris
iris = load_iris()
print(iris.data.shape)
```
Las características son:
- Longitud de sépalo en cm
- Ancho de sépalo en cm
- Longitud de pétalo en cm
- Ancho de pétalo en cm
Las características numéricas como estas son directas: cada ejemplo contiene una lista de números con precisión real que se corresponden con las características.
### Características categóricas
¿Qué pasa si tenemos características categóricas?. Por ejemplo, imagina que disponemos del color de cada flor de iris: $color \in \{red, blue, purple\}$
Podrías estar tentado de usar algo así como i.e. *red=1, blue=2, purple=3*, pero, en general, **esto es una mala idea**. Los estimadores tienden a trabajar con la suposición de que las características numéricas se sitúan en una escala continua por lo que, en este ejemplo, 1 y 2 serían más parecidos que 1 y 3 y esto no tiene porque ser generalmente verdad.
De hecho, el ejemplo anterior es una subcategoría de las variables categóricas, en concreto, una variable nominal. Las variables nominales no tienen asociado un orden, mientras que las variables ordinales si que implican un orden. Por ejemplo, las tallas de las camisetas formarían una variable ordinal "XL > L > M > S".
Una forma de transformar variables nominales en un formato que prevenga al estimador de asumir un orden es la llamada representación $1$-$de$-$J$ (*one-hot encoding*). Cada categoría genera su propia variable por separado.
El conjunto de características aumentado sería:
- Longitud de sépalo en cm
- Ancho de sépalo en cm
- Longitud de pétalo en cm
- Ancho de pétalo en cm
- color=purple (1.0 o 0.0)
- color=blue (1.0 o 0.0)
- color=red (1.0 o 0.0)
Observa que al usar este conjunto de características puede que los datos se representen mejor usando **matrices dispersas**, como veremos en el ejemplo de clasificación de texto que analizaremos después.
#### Utilizando DictVectorizer para codificar variables categóricas
Cuando los datos de entrada están codificados con un diccionario de tal forma que los valores son o cadenas o valores numéricos, se puede usar la clase `DictVectorizer` para obtener la expansión booleana sin tocar las características numéricas:
```
measurements = [
{'city': 'Dubai', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Francisco', 'temperature': 18.}
]
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
vec
vec.fit_transform(measurements).toarray()
vec.get_feature_names()
```
### Características derivadas
Otro tipo bastante común de características son las **características derivadas**, que son características obtenidas a partir de algún paso previo de preprocesamiento y que se supone que son más informativas que las originales. Este tipo de características pueden estar basadas en **extracción de características** y en **reducción de la dimensionalidad** (tales como PCA o aprendizaje de variedades) y pueden ser combinaciones lineales o no lineales de las características originales (como en regresión polinómica) o transformaciones más sofisticadas de las características.
### Combinando características numéricas y categóricas
Como un ejemplo de la forma en que se trabaja con datos numéricos y categóricos, vamos a realizar un ejercicio en el que predeciremos la supervivencia de los pasajeros del HMS Titanic.
Utilizaremos una versión del dataset Titatic que puede descargarse de [titanic3.xls](http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.xls). Previamente, ya hemos convertido el `.xls` a `.csv` para que sea más fácil su manipulación (como texto), de manera que los datos no fueron modificados.
Necesitamos leer todas las líneas del fichero `titanic3.csv`, ignorar la cabecera y encontrar las etiquetas (sobrevivió o murió) y los datos de entrada (características de la persona). Vamos a ver la cabecera y algunas líneas de ejemplo:
```
import os
import pandas as pd
titanic = pd.read_csv(os.path.join('datasets', 'titanic3.csv'))
print(titanic.columns)
```
Aquí tenemos una descripción de lo que significan cada una de las variables:
```
pclass Passenger Class
(1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival
(0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare
cabin Cabin
embarked Port of Embarkation
(C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
```
Parece que las variables `name`, `sex`, `cabin`, `embarked`, `boat`, `body` y `homedest` son candidatas a ser variables categóricas, mientras que el resto parecen variables numéricas. Vamos a ver las primeras filas para tener un mejor conocimiento de la base de datos:
```
titanic.head()
```
Podemos descartar directamente las columnas "boat" y "body" ya que está directamente relacionadas con que el pasajero sobreviviese. El nombre es (probablemente) único para cada persona y por tanto no es informativo. Vamos a intentar en primer lugar usar "pclass", "sibsp", "parch", "fare" y "embarked" como características:
```
labels = titanic.survived.values
features = titanic[['pclass', 'sex', 'age', 'sibsp', 'parch', 'fare', 'embarked']]
features.head()
```
En principio, los datos ahora solo contienen características útiles, pero no están en un formato que los algoritmos de aprendizaje automático puedan entender. Necesitamos transformar las cadenas "male" y "female" en variables binarias que indiquen el género y lo mismo para `embarked`.Podemos hacer esto usando la función ``get_dummies`` de pandas:
```
pd.get_dummies(features).head()
```
Esta transformación ha codificado bien las columnas de cadenas. Sin embargo, parece que la variable ``pclass`` también es una variable categórica. Podemos listar de forma explícita las variables que queremos codificar utilizando el parámetro ``columns`` para incluir ``pclass``:
```
features_dummies = pd.get_dummies(features, columns=['pclass', 'sex', 'embarked'])
features_dummies.head(n=16)
#También podríamos hacerlo con DictVectorizer
from sklearn.feature_extraction import DictVectorizer
diccionario = features.to_dict('records')
vec = DictVectorizer()
dataset = vec.fit_transform(diccionario)
print(dataset.todense())
data = features_dummies.values
# Comprobamos que hay valores perdidos, tendremos que aplicar un Imputer
import numpy as np
np.isnan(data).any()
```
Una vez hemos hecho el trabajo de duro de cargar los datos, evaluar un clasificador con estos datos es directo. Vamos a ver que rendimiento obtenemos con el clasificador más simple, `DummyClassifier('most_frequent')`, que es equivalente al `ZeroR`.
```
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
train_data, test_data, train_labels, test_labels = train_test_split(
data, labels, random_state=0)
imp = SimpleImputer()
imp.fit(train_data)
train_data_finite = imp.transform(train_data)
test_data_finite = imp.transform(test_data)
np.isnan(train_data_finite).any()
from sklearn.dummy import DummyClassifier
clf = DummyClassifier(strategy='most_frequent')
clf.fit(train_data_finite, train_labels)
print("Accuracy: %f"
% clf.score(test_data_finite, test_labels))
```
<div class="alert alert-success">
<b>EJERCICIO</b>:
<ul>
<li>
Intenta ejecutar el problema de clasificación anterior pero usando ``LogisticRegression`` y ``RandomForestClassifier`` en lugar de ``DummyClassifier``
</li>
<li>
Prueba a cambiar el conjunto de características considerado. ¿Consigues mejorar los resultados?
</li>
</ul>
</div>
| github_jupyter |
# Estimating an AR Model
## Introduction to Autoregression Model
An autoregression model is a regression with a time series and itself, shifted by a time step or steps. These are called lags. I will demonstrate with five examples with the non-stationarized datasets so that you can see the results in the original dataset along with the forecasted dataset.
```
import pandas as pd
from pandas import read_csv
from matplotlib import pyplot
from pandas.plotting import lag_plot
from statsmodels.graphics.tsaplots import plot_acf
```
### Example 1: Vacation dataset
```
# Read in vacation dataset
vacation = read_csv('~/Desktop/section_3/df_vacation.csv', index_col=0, parse_dates=True)
vacation.head()
# Plot the time series against its lag
lag_plot(vacation)
pyplot.show()
from pandas import concat
values = pd.DataFrame(vacation.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation of the dataset
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(vacation)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(vacation, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(vacation, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
```
The best model chosen is the one with the lowest Information Criterion. The AIC shows the lowest.
```
# Forecasting
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(vacation, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
```
### Example 2: Furniture dataset
```
furn = read_csv('~/Desktop/section_3/df_furniture.csv', index_col=0, parse_dates=True)
furn.head()
# Plot the time series against its lag
lag_plot(furn)
pyplot.show()
from pandas import concat
values = pd.DataFrame(furn.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(furn)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(furn, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(furn, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(furn, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
```
### Example 3: Bank of America dataset
```
# Read in BOA dataset, this is original with resampling to monthly data
bac= read_csv('~/Desktop/section_3/df_bankofamerica.csv', index_col=0, parse_dates=True)
# convert daily data to monthly
bac= bac.resample(rule='M').last()
bac.head()
# Plot the time series against its lag
lag_plot(bac)
pyplot.show()
from pandas import concat
values = pd.DataFrame(bac.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(bac)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(bac, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(bac, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(bac, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
```
### Example 4: J.P. Morgan dataset
```
# Read in JPM dataset
jpm = read_csv('~/Desktop/section_3/df_jpmorgan.csv', index_col=0, parse_dates=True)
# Convert the daily data to quarterly
jpm= jpm.resample(rule='Q').last() # resample to quarterly data
jpm.head()
# Plot the time series against its lag
lag_plot(jpm)
pyplot.show()
from pandas import concat
values = pd.DataFrame(jpm.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(jpm)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(jpm, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(jpm, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(jpm, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
```
### Example 5: Average Temperature of St. Louis dataset
```
# Read in temp dataset
temp = read_csv('~/Desktop/section_3/df_temp.csv', index_col=0, parse_dates=True)
temp.head()
# Plot the time series against its lag
lag_plot(temp)
pyplot.show()
from pandas import concat
values = pd.DataFrame(temp.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t-1', 't+1']
result = dataframe.corr()
print(result)
# Plot the autocorrelation
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(temp)
pyplot.show()
# Plot the Autocorrelation Function, using candle sticks
from pandas import read_csv
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(temp, lags=50)
pyplot.show()
# Estimating an AR Model
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Fit an AR(1) model to the first simulated data
mod = ARMA(temp, order=(1,0)) # fit data to an AR1 model
res = mod.fit() # use fit() to estimate model
# Print out summary information on the fit
print(res.summary())
print(res.params)
# Estimated parameters are close to true parameters
# S.D. of innovations is standard deviation of errors
# L1 is lag1
# fitted model parameters
# Import the ARMA module from statsmodels
from statsmodels.tsa.arima_model import ARMA
# Forecast the first AR(1) model
mod = ARMA(temp, order=(1,0))
res = mod.fit()
# Start the forecast 10 data points before the end of the point series at ,
#and end the forecast 10 data points after the end of the series at point
res.plot_predict(start='2015', end='2025')
pyplot.show()
# end
```
| github_jupyter |
# Prediksi Predikat Lulus
## Drive - Colab
```
from google.colab import drive
from google.colab import files
drive.mount('/content/drive')
%cd /content/drive/MyDrive/ai_contest/Kelulusan/olah
```
## Import Modules
```
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
```
## D3
```
df = pd.read_csv('new_lulus_diploma.csv')
df_ori = df.copy()
df.head()
df.Predikat.value_counts()
df.info()
df['Predikat'].value_counts().plot(kind='bar')
```
## Semester 5
```
df_new = df.drop(['IP6'], axis=1)
# Backup
df_new.to_csv('nilai_diploma_5_semester.csv', index=False)
```
### Label Encoding
```
df = pd.read_csv('nilai_diploma_5_semester.csv', index_col=None)
df.head()
df.Predikat.unique()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Predikat'] = le.fit_transform(df['Predikat'])
df.head()
df.Predikat.unique()
```
Keterangan:
* Cumlaude: 0
* Memuaskan: 1
* Sangat Memuaskan: 2
* Tidak Lulus: 3
```
df.Predikat.value_counts()
```
### Predict
```
from sklearn.model_selection import train_test_split
X = df.drop(['Average', 'Predikat'], axis=1)
y = df['Predikat']
X
y
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
```
### SVM
```
from sklearn.svm import SVC
svm = SVC(gamma="auto")
svm.fit(X_train, y_train)
y_pred = svm.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
def test_acc(model):
print(f"{model} score")
print('\n')
print("Classification Report:")
print(classification_report(y_test, y_pred))
print('\n')
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred))
print('\n')
print("Accuracy Score:")
print(accuracy_score(y_test, y_pred))
test_acc(svm)
```
### Export Semester 5
```
import pickle
pickle.dump(svm, open("Semester5.pkl", 'wb'))
del svm
model = pickle.load(open("Semester5.pkl", "rb"))
y_example = model.predict([[3.9, 3.6, 3.7, 3.6, 3.5]])
y_example
```
## Semester 4
```
df = df_ori.copy()
df_new = df.drop(['IP5', 'IP6'], axis=1)
# Backup
df_new.to_csv('nilai_diploma_4_semester.csv', index=False)
```
### Label Encoding
```
df = pd.read_csv('nilai_diploma_4_semester.csv', index_col=None)
df.head()
df.Predikat.unique()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Predikat'] = le.fit_transform(df['Predikat'])
df.head()
df.Predikat.unique()
```
Keterangan:
* Cumlaude: 0
* Memuaskan: 1
* Sangat Memuaskan: 2
* Tidak Lulus: 3
```
df.Predikat.value_counts()
```
### Predict
```
from sklearn.model_selection import train_test_split
X = df.drop(['Average', 'Predikat'], axis=1)
y = df['Predikat']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
```
### SVM
```
from sklearn.svm import SVC
svm = SVC(gamma="auto")
svm.fit(X_train, y_train)
y_pred = svm.predict(X_test)
test_acc(svm)
```
### Export Semester 4
```
pickle.dump(svm, open("Semester4.pkl", 'wb'))
del svm
model = pickle.load(open("Semester4.pkl", "rb"))
y_example = model.predict([[3.9, 3.6, 3.5, 3.6]])
y_example
```
## Semester 3
```
df = df_ori.copy()
df_new = df.drop(['IP4', 'IP5', 'IP6'], axis=1)
# Backup
df_new.to_csv('nilai_diploma_3_semester.csv', index=False)
```
### Label Encoding
```
df = pd.read_csv('nilai_diploma_3_semester.csv', index_col=None)
df.head()
df.Predikat.unique()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Predikat'] = le.fit_transform(df['Predikat'])
df.head()
df.Predikat.unique()
```
Keterangan:
* Cumlaude: 0
* Memuaskan: 1
* Sangat Memuaskan: 2
* Tidak Lulus: 3
```
df.Predikat.value_counts()
```
### Predict
```
from sklearn.model_selection import train_test_split
X = df.drop(['Average', 'Predikat'], axis=1)
y = df['Predikat']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
```
### SVM
```
from sklearn.svm import SVC
svm = SVC(gamma="auto")
svm.fit(X_train, y_train)
y_pred = svm.predict(X_test)
test_acc(svm)
```
### Export Semester 3
```
pickle.dump(svm, open("Semester3.pkl", 'wb'))
del svm
model = pickle.load(open("Semester3.pkl", "rb"))
y_example = model.predict([[3.9, 3.6, 3.6]])
y_example
```
## Catatan
Bagian ini merupakan bagian untuk membuat prediksi pada program studi diploma (D3)
| github_jupyter |
```
import os
import numpy as np
from glob import glob
import re
import time
import shutil
from tqdm import tqdm
directory_map = {
'rccc': {
'search_string': r'\\monacoda\FocalData\RCCC\1~Clinical\*~*\demographic.*',
'holding': r'\\monacoda\FocalData\ToBeArchived',
'archived': r'\\UTILSVR\PhysBack\MONACO_ARCHIVE_1'
},
'nbcc': {
'search_string': r'\\nbccc-monaco\Users\Public\Documents\CMS\FocalData\NBCCC\1~Clinical\*~*\demographic.*',
'holding': r'\\nbccc-monaco\Users\Public\Documents\HoldingDirectory',
'archived': r'\\nbccc-monaco\Archive\Patients'
}
}
centre = 'rccc'
number_of_weeks_to_keep = 13
patient_demographic_files = np.array(glob(directory_map[centre]['search_string']))
patient_demographic_files
patient_directories = np.array([
os.path.dirname(item)
for item in patient_demographic_files])
patient_directories
corresponding_number_of_weeks_ago = []
for current_patient_directory in patient_directories:
files_and_folders_one_level = glob(os.path.join(current_patient_directory, '*'))
all_date_modified = np.array([
os.path.getmtime(item) for item in files_and_folders_one_level])
number_of_weeks_ago = (time.time() - all_date_modified) / (60 * 60 * 24 * 7)
minimum_number_of_weeks_ago = np.min(number_of_weeks_ago)
corresponding_number_of_weeks_ago.append(minimum_number_of_weeks_ago)
corresponding_number_of_weeks_ago = np.array(corresponding_number_of_weeks_ago)
corresponding_number_of_weeks_ago
archive_reference = corresponding_number_of_weeks_ago > number_of_weeks_to_keep
directories_to_be_archived = patient_directories[archive_reference]
directories_to_be_archived
corresponding_number_of_weeks_ago[archive_reference]
len(corresponding_number_of_weeks_ago[archive_reference])
patient_folder_name = [
os.path.basename(item) for item in directories_to_be_archived]
patient_folder_name
test_archive_directory = directory_map[centre]['archived']
test_location_to_move_to = [
os.path.join(test_archive_directory, item) for item in patient_folder_name]
test_location_to_move_to
for i in tqdm(range(len(directories_to_be_archived))):
assert os.path.exists(directories_to_be_archived[i]), "Error {} doesnt exist anymore!".format(directories_to_be_archived[i])
assert not(os.path.exists(test_location_to_move_to[i])), "Error {} exists already!".format(test_location_to_move_to[i])
print("{} => {}".format(directories_to_be_archived[i], test_location_to_move_to[i]))
archive_directory = directory_map[centre]['holding']
location_to_move_to = [
os.path.join(archive_directory, item) for item in patient_folder_name]
location_to_move_to
for i in tqdm(range(len(directories_to_be_archived))):
assert os.path.exists(directories_to_be_archived[i]), "Error {} doesnt exist anymore!".format(directories_to_be_archived[i])
assert not(os.path.exists(location_to_move_to[i])), "Error {} exists already!".format(location_to_move_to[i])
print("{} => {}".format(directories_to_be_archived[i], location_to_move_to[i]))
location_to_move_to
for i in tqdm(range(len(directories_to_be_archived))):
assert os.path.exists(directories_to_be_archived[i]), "Error {} doesnt exist anymore!".format(directories_to_be_archived[i])
assert not(os.path.exists(location_to_move_to[i])), "Error {} exists already!".format(location_to_move_to[i])
print("{} => {}".format(directories_to_be_archived[i], location_to_move_to[i]))
shutil.move(directories_to_be_archived[i], location_to_move_to[i])
assert not(os.path.exists(directories_to_be_archived[i])), "The move failed to delete the clinical file!"
assert os.path.exists(location_to_move_to[i]), "Failed to archive!"
print(" Done\n")
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.