code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis for PV Panels in North of Norway (latitud: 70)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Energy consumption
# "In Norway, average household electricity consumption is 16 000 kWh per year, and the average price was NOK 1.1 per kWh in 2013." <br>
# [Source: energifaktanorge.no](https://energifaktanorge.no/en/norsk-energibruk/energibruken-i-ulike-sektorer/)
#
# The average household electricity consumption in Northern Norway was 19719 in 2012 (last data available). <br>
# [Source: ssb.no](https://www.ssb.no/en/statbank/list/husenergi)
cons = pd.read_csv("energy_consumption_region.csv", delimiter= ';')
cons.tail()
# # PV Cost
#
# ## Subsidies
#
# ### ENOVA
#
# https://www.enova.no/privat/alle-energitiltak/solenergi/el-produksjon-/
#
# From 1 July 2020, the fixed subsidy rate for electricity generation will change from NOK 10,000 to NOK 7,500. The rate of NOK 1,250 per installed power up to 15 kW is maintained.
#
# To receive the current support rate, the measure must be completed and registered with an invoice in Enova's application portal by 30 June 2020.
#
# Due to the corona situation, Enova has chosen to postpone these changes from April 1 to July 1.
#
#
# Norway's subsidies at 1000€ + 125€/kW (usually amount to +/- 15% of system cost)
# Source: https://www.otovo.no/blog/2019/02/21/the-otovo-solar-insight-solar-payback-trends-2019/
#
#
# * Type of scheme: Feed-in-tariff (Plusskundeordningen)
# * Value: 30 øre/kWh electricity sold to the grid (0.03-0.04 €/kWh)
# * Start/end: 2015 - current
# * Eligibility: <100kW capacity Other information: NOK 15 000 registration fee for prosumers (<100 kW). These schemes differ depending on the energy provider
#
# * Type of scheme: Subsidy (Enova)
# * Value: Support is limited to 10,000 NOK plus 1,250 NOK/kWp, up to a maximal capacity of 15 kWp. This is equivalent to 10-30% of the system cost.
# * Start/end: 2015- current
#
# Source: https://ec.europa.eu/commission/sites/beta-political/files/study-residential-prosumers-energy-union_en.pdf
# ## System elements and prices
# +
#Solar panels
category = ['Entry', 'Entry', 'Entry', 'Entry', 'Entry', 'Max_Power', 'Max_Power',
'Max_Power', 'Max_Power', 'Grid', 'Grid']
item = ['1', '2', '3', '4', '5','1', '2', '3', '4', '1', '2']
hier_index = list(zip(category, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['cat', 'item'])
data = np.array([[80, 4.6, 105.5, 54.5, 3.5, 1290],
[90, 5.4, 100.5, 52, 3.5, 1290],
[140, 8.88, 147, 66, 5, 2290],
[160, 9.6, 130.6, 66.6, 3.5, 2290],
[200, 11.3, 158, 80.8, 3.5, 3499],
[50, 3, 63, 54, 3.5, 1090],
[75, 4.17, 110.8, 50.2, 3.5, 2790],
[100, 6, 119.5, 54, 4, 1999],
[185, 11.1, 148.2, 66.6, 4, 2999],
[300, 10.8, 165, 99.2, 3.5, 2999],
[315, 9.31, 165, 99.2, 3, 3290]])
df_panel = pd.DataFrame(data, index=hier_index, columns= ['watts','A', 'X', 'Y', 'Z', 'Price'])
# +
#Panels structure
category = ['Bracket', 'Bracket', 'Bracket', 'Bracket', 'Tray rack', 'Tray rack', 'Tray rack']
item = ['1','2','3','4','1','2','3']
hier_index = list(zip(category, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['cat', 'item'])
data = np.array([['50/75/80/90/100', 1, 'adjus', 775],
['140/160/185', 1, 'adjus', 775],
['270/280/300', 1, 'adjus', 1049],
['185', 4, 'not-adjus', 12900],
['300', 4, 'adjus', 15990],
['300', 6, 'adjus', 18490],
['90/160/200/50/75/100/185', 1, 'adjus',1195]])
df_struc = pd.DataFrame(data, index=hier_index, columns= ['panels','Quant', 'Movement', 'Price'])
# +
#Charge regulator
brand = ['Basic', 'Basic', 'MorningStar', 'SunWind', 'SunWind','PeakPower', 'PeakPower', 'PeakPower',
'Victron', 'Victron', 'Victron SmartSolar', 'Victron SmartSolar', 'Victron BlueSolar']
item = ['1', '2', '1', '1', '2', '1', '2', '3', '1', '2', '1', '2', '1']
hier_index = list(zip(brand, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['brand', 'item'])
data = np.array([[10, 'NA', 'NA', 'NA', 495],
[20, 'NA', 'NA', 'NA', 595],
[30, 'NA', 'NA', 'NA', 3490],
[16, 180, 180, 'NA', 1890],
[20, 240, 240, 'NA', 2990],
[10, 130,260, 'NA', 1290],
[20, 260,520, 'NA', 1790],
[30, 390, 780, 'NA', 1995],
[30, 'NA', 'NA', 100, 2990],
[50, 'NA', 'NA', 100, 3990],
[70, 1000, 2000, 'NA', 7990],
[15, 220, 440, 75, 1499],
[10, 'NA', 'NA', 'NA', 890]])
df_reg = pd.DataFrame(data, index=hier_index, columns= ['A', 'max_12V', 'max_24', 'V', 'Price'])
# +
#Batteries
category = ['SunWind', 'SunWind', 'SunWind', 'SunWind', 'SunWind', 'SunWind', 'SunWind', 'SunWind',
'Rolls', 'Rolls', 'Rolls', 'Rolls', 'Rolls', 'Rolls', 'MG', 'MG']
technology = ['AGM', 'AGM', 'AGM', 'AGM', 'Lithium', 'Lithium', 'Lithium', 'Lithium',
'Lead / acid', 'Lead / acid', 'Lead / acid', 'Lead / acid', 'Lead / acid',
'Lead / acid', 'Lithium', 'Lithium', ]
item = ['1', '2', '3', '4', '5', '6', '7', '8', '1',
'2', '3', '4', '5', '6', '1', '2']
hier_index = list(zip(category, technology, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['cat', 'tech', 'item'])
data = np.array([[136, 'NA', 32.9,17.3, 20.9, 32.5, 3390],
[260, 'NA', 52.2, 24, 22, 64, 5990],
[292, 'NA', 52.1, 27, 20.3, 73.5, 6990],
[305, 'NA', 52.6, 27.8, 26, 72.1, 11995],
[50, 'NA', 25, 16, 18, 7.5, 4499],
[100,'NA', 31, 17.3, 21.7, 13.5, 6999],
[125, 'NA', 33.7, 17.2, 27.9, 15, 12995],
[300, 'NA', 52, 26.8, 22.8, 35.3, 25195],
[120, 12, 34.3, 17.1, 24.1, 34, 3490],
[504, 6, 31.8, 18.1, 42.5, 60, 6995],
[503, 12, 55.7, 28.6, 46.4, 123, 17990],
[605, 6, 31.8, 18.1, 42.5, 57, 6790],
[2490, 2, 39.2, 22.4, 63, 94, 12595],
[3426, 2, 39.4, 22.9, 80.3, 129, 15990],
[200, 'NA', 'NA', 'NA', 'NA', 'NA', 49990],
[300, 'NA', 'NA', 'NA', 'NA', 'NA', 62990]])
df_bat = pd.DataFrame(data, index=hier_index, columns= ['cap', 'V', 'X', 'Y', 'Z','weight', 'Price'])
# +
#Inverter
category = ['SunWind', 'SunWind', 'SunWind', 'Phoenix', 'Phoenix', 'Phoenix', 'Phoenix',
'Phoenix', 'Phoenix', 'Phoenix', 'Phoenix', 'Phoenix', 'Phoenix' ]
item = ['1', '2', '3', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
hier_index = list(zip(category, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['cat', 'item'])
data = np.array([[12, 300, 'No', 1390],
[12, 600, 'No', 1990],
[12, 1500, 'No', 3695],
[12, 250, 'No', 1390],
[12, 375, 'No', 1790],
[12, 500, 'Yes', 2195],
[12, 800, 'No', 3495],
[12, 1200, 'Yes', 4999],
[24, 250, 'Yes', 1390],
[24, 375, 'Yes', 1790],
[24, 500, 'Yes', 2195],
[24, 800, 'Yes', 3495],
[24, 1200, 'Yes', 4999]])
df_inv = pd.DataFrame(data, index=hier_index, columns= ['voltage', 'power', 'connect', 'Price'])
# +
#Exchangers
category = ['Multiplus', 'Multiplus', 'Multiplus', 'Quattro', 'Quattro', 'Quattro',
'EasySolar', 'EasySolar', 'Multi']
item = ['1', '2', '3', '1', '2', '3', '1', '2', '1']
hier_index = list(zip(category, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['cat', 'item'])
data = np.array([[12, 1200, 2100, 300, 11490],
[12, 2000, 4000, 500, 14995],
[12, 3000, 6000, 800, 20495],
[24, 5000, 5000, 800, 32995],
[12, 3000, 6000, 800, 25995],
[48, 8000, 8000, 600, 34990],
[24, 3000, 3000, 200, 16999],
[24, 3000, 6000, 400, 24999],
[12, 500, 900, 250, 5990]])
df_exc = pd.DataFrame(data, index=hier_index, columns= ['voltage', 'VA', 'power', 'battery', 'Price'])
# +
#Accesories
category = ['Structure', 'Structure', 'Structure', 'Structure', 'Structure', 'Security',
'Cable', 'Cable', 'Cable', 'Cable', 'Cable', 'Cable', 'Aditional']
item = ['1', '2', '3', '4', '5', '1', '1', '2', '3', '4', '5', '6', '1']
hier_index = list(zip(category, item))
hier_index = pd.MultiIndex.from_tuples(hier_index, names=['cat', 'item'])
data = np.array([['Aluminium rail', 360],
['Shot for alloy rail', 99],
['Mounting clip double', 45],
['Mounting clip simple', 40],
['Mounting bracket', 87],
['Switch, 2 poles, 25A', 695],
['Cable(4mm2, 50m)', 1690],
['Cable clips(100)', 139],
['Cable MC4 for solar panel (1m)', 69],
['Splitter MC4 cable', 164],
['Battery-regulator cable', 225],
['Battery parallelcable (6mm, 1m)', 115],
['Battery monitor', 2595]])
df_acc = pd.DataFrame(data, index=hier_index, columns= ['description', 'Price'])
# -
print('panels')
display(df_panel)
print('structure')
display(df_struc)
print('regulator')
display(df_reg)
print('exchanger')
display(df_exc)
print('inverter')
display(df_inv)
print('batteries')
display(df_bat)
print('accesories')
display(df_acc)
# ## Analysis and design
# ### Solar panels
df_panel['NOK/W'] = round((df_panel['Price'] / df_panel['watts']),2)
df_panel.sort_values(by='NOK/W', ascending=True)
# +
# Create Figure (empty canvas)
fig1 = plt.figure()
axes = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
#AGM
eprice = df_panel.loc['Entry'][['Price']]
ewatt = df_panel.loc['Entry'][['watts']]
#Lithium
mpprice = df_panel.loc['Max_Power'][['Price']]
mpwatt = df_panel.loc['Max_Power'][['watts']]
#Lead acid
gprice = df_panel.loc['Grid'][['Price']]
gwatt = df_panel.loc['Grid'][['watts']]
# Plot on that set of axes
axes.plot(eprice, ewatt,'b', lw=0.5, ls='-', marker='+', ms = 8, label="Entry")
axes.plot(mpprice, mpwatt, 'r', lw=0.5, ls='-', marker='+', ms = 8, label="Max Power")
axes.plot(gprice, gwatt, 'g', lw=0.5, ls='-', marker='+', ms = 8, label="Grid")
axes.set_xlabel('NOK')
axes.set_ylabel('watts')
axes.set_title('Panels subcategories per price')
axes.legend(loc=0)
# -
# The best relation between power and price is given by the "Grid" panels, being the the panel of 300 watts the cheapest price in NOK/watt. <br>
# ### Energy produced
# The system proposed is of 3kWp installed, to try to match the energy consumption of a household.
pwsyst = 3000 #3000 wp = 3 kWp
pwpanel = df_panel.loc['Grid'].loc['1'][['watts']] #panel selected (Grid, 300w)
quantity = pwsyst / pwpanel #10
# ## Climatic conditions
#
# NASA/POWER Climatologies <br>
# Monthly & Annual Climatologies (July 1983 - June 2005)<br>
# Location: Latitude 70.43 Longitude 24.5 <br>
# Source: https://power.larc.nasa.gov/data-access-viewer/
df_clim = pd.read_csv('Climatology.csv', delimiter= ';', na_values= 'NA')
df_clim
# +
# Create Figure (empty canvas)
fig2 = plt.figure()
axes = fig2.add_axes([0.1, 0.1, 0.8, 0.8])
#Temperature
months = df_clim.columns[2:14]
values = df_clim.iloc[0][2:14].astype(float)
# Plot on that set of axes
axes.plot(months, values,'b', lw=0.5, ls='-', marker='+', ms = 8)
axes.plot(months, np.zeros(12),'black', lw=0.5, ls='-')
axes.set_xlabel('month')
axes.set_ylabel('ºC')
axes.set_title('Monthly average temperature at 2 meters')
# +
# Create Figure (empty canvas)
fig3 = plt.figure()
axes = fig3.add_axes([0.1, 0.1, 0.8, 0.8])
#hours
months = df_clim.columns[2:14]
values = df_clim.iloc[1][2:14].astype(float)
# Plot on that set of axes
axes.plot(months, values,'r', lw=0.5, ls='-', marker='+', ms = 8)
axes.plot(months, np.zeros(12),'black', lw=0.5, ls='-')
axes.set_xlabel('month')
axes.set_ylabel('hours')
axes.set_title('Daily hours')
# +
# Create Figure (empty canvas)
fig4 = plt.figure()
axes = fig4.add_axes([0.1, 0.1, 0.8, 0.8])
#Irradiation
months = df_clim.columns[2:14]
direct = df_clim.iloc[4][2:14].astype(float)
diff = df_clim.iloc[5][2:14].astype(float)
# Plot on that set of axes
axes.plot(months, direct,'r', lw=0.5, ls='-', marker='+', ms = 8, label="Direct")
axes.plot(months, (direct + diff),'b', lw=0.5, ls='-', marker='+', ms = 8, label="Direct + Diffuse")
axes.set_xlabel('month')
axes.set_ylabel('kW-hr/m^2/day')
axes.set_title('Daily normal radiation')
axes.legend(loc=0)
# +
# Create Figure (empty canvas)
fig5 = plt.figure()
axes = fig5.add_axes([0.1, 0.1, 0.8, 0.8])
#Irradiation
months = df_clim.columns[2:14]
deg0 = df_clim.iloc[6][2:14].astype(float)
deg55 = df_clim.iloc[7][2:14].astype(float)
deg70 = df_clim.iloc[8][2:14].astype(float)
deg85 = df_clim.iloc[9][2:14].astype(float)
deg90 = df_clim.iloc[10][2:14].astype(float)
# Plot on that set of axes
axes.plot(months, deg0,'LightSalmon', lw=2, ls='-', marker='+', ms = 8, label="Tilted 0 degrees")
axes.plot(months, deg55,'DarkSalmon', lw=1, ls='-', marker='+', ms = 8, label="Tilted 55 degrees")
axes.plot(months, deg70,'IndianRed', lw=1, ls='-', marker='+', ms = 8, label="Tilted 70 degrees")
axes.plot(months, deg85,'Crimson', lw=1, ls='-', marker='+', ms = 8, label="Tilted 85 degrees")
axes.plot(months, deg90,'FIreBrick', lw=2, ls='-', marker='+', ms = 8, label="Tilted 90 degrees")
axes.set_xlabel('month')
axes.set_ylabel('kW-hr/m^2/day')
axes.set_title('Solar irradiance in tilted surfaces')
axes.legend(loc=0)
# +
# Create Figure (empty canvas)
fig6 = plt.figure()
axes = fig6.add_axes([0.1, 0.1, 0.8, 0.8])
#Irradiation
months = df_clim.columns[2:14]
opt = df_clim.iloc[12][2:14].astype(float)
avg_opt = (55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, )
# Plot on that set of axes
axes.plot(months, opt,'b', lw=0.5, ls='-', marker='+', ms = 8)
axes.plot(months, avg_opt,'grey', lw=0.5, ls='-', )
axes.set_ylim([0, 90])
axes.set_xlabel('month')
axes.set_ylabel('degrees')
axes.set_title('Solar irradiance optimal angle')
# -
# ## Electricity Cost
#
#
| projects/PV/PV_Finnmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modelling
#importing the dependencies
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
CAPTCHA_IMAGE_FOLDER = "generated_captcha_images"
OUTPUT_FOLDER = "extracted_letter_images"
# ## Extracting and Forming dataset of single letters by creating lists
#Extracting the letters and appending them in lists
def generate_data():
char_folders=os.listdir(OUTPUT_FOLDER)
images=[]
labels=[]
for folder in char_folders:
if(folder.startswith('.')):
continue
char_path=os.listdir(OUTPUT_FOLDER+'/'+folder)
for imag in char_path:
img=cv2.imread(OUTPUT_FOLDER+'/'+folder+'/'+imag,0)
img=cv2.resize(img,(20,20))
#img = (img-img.mean())/img.std()
img = img.reshape((20,20,1))
images.append(img)
labels.append(folder)
print(len(images))
print(len(labels))
return images,labels
xtrain,ytrain=generate_data()
ytrain[0]
# ## Converting to one hot encoding
#Converting into one-hot
from sklearn.preprocessing import LabelBinarizer
lb=LabelBinarizer()
lb.fit(ytrain)
print(ytrain[0])
print(lb.classes_)
ytrain=lb.transform(ytrain)
print(ytrain[0])
print(ytrain.shape)
# labels=lb.inverse_transform(labels)
#viewing the image in train
xtrain_np=np.array(xtrain,dtype=float)/255.0
plt.imshow(cv2.cvtColor(xtrain[1933].reshape(20,20), cv2.COLOR_BGR2RGB))
# ## Modelling
#importing the dependencies
from sklearn.model_selection import train_test_split
Xtrain,Xval,Ytrain,Yval=train_test_split(xtrain_np,ytrain,test_size=0.1)
from keras.models import Sequential
from keras.layers import Dense,Conv2D,MaxPooling2D,Flatten
# ## Model
model=Sequential()
model.add(Conv2D(128,kernel_size=5,strides=1,padding="Same",activation="relu",input_shape=(20,20,1)))
model.add(MaxPooling2D(pool_size=2,padding='Same'))
model.add(Conv2D(64,kernel_size=4,strides=1,padding='Same',activation='relu'))
model.add(MaxPooling2D(pool_size=2,padding='Same'))
model.add(Flatten())
model.add(Dense(32,activation='sigmoid'))
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["accuracy"])
model.summary()
# ## Training
model.fit(Xtrain,Ytrain,validation_data=(Xval,Yval),batch_size=64,epochs=5)
model.evaluate(Xtrain,Ytrain)
# ## Breaking CAPTCHAS
out=[]
def test(image_names):
CAPTCHA_IMG_FOLDER=CAPTCHA_IMAGE_FOLDER+'/'+'train_images'
for image in image_names:
if(image.startswith('.')):
continue
im_path=CAPTCHA_IMG_FOLDER +'/'+ image
gray=cv2.imread(im_path) #reading the image
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY) #converting to gray scale
gray = cv2.copyMakeBorder(gray, 20, 20, 20, 20, cv2.BORDER_REPLICATE) # Add some extra padding around the image
thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] # threshold the image (convert it to pure black and white)
contours= cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # find the contours (continuous blobs of pixels) the image
contours=contours[0]
im_letters=[]
for contour in contours: # Now we can loop through each of the four contours and extract the letter inside of each one
(x,y,w,h)=cv2.boundingRect(contour)
if w/h>1.5: #if contour to wide the split it into 2
im_letters.append((x,y,w//2,h))
im_letters.append((x+w//2,y,w//2,h))
else:
im_letters.append((x,y,w,h))
if(len(im_letters)!=4):
continue
im_letters=sorted(im_letters,key=lambda x : x[0]) #Sort the detected letter images based on the x coordinate to make sure we are processing them from left-to-right
output = cv2.merge([gray] * 3) # Create an output image
predictions = [] # A list to hold our predicted letters
for letter_box in im_letters: # loop over the letters
(x,y,w,h)=letter_box
letter_img=gray[y-2:y+h+2,x-2:x+w+2] # Extract the letter from the original image with a 2-pixel margin around the edge
img=cv2.resize(letter_img,(20,20))
img = img.reshape((1,20,20,1))
pred=model.predict(img) #Predict on this letter
p_letter=lb.inverse_transform(pred)[0] # Convert the one-hot-encoded prediction back to a normal letter
predictions.append(p_letter) #append the predictions
cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1) # draw the prediction on the output image (draws the box)
cv2.putText(output, p_letter, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2) # draw the prediction on the output image (inserts the text)
capcha_test="".join(predictions)
if(capcha_test==image.split('.')[0]):
out.append(output) # append the captcha's predicted text
test_images=os.listdir(CAPTCHA_IMAGE_FOLDER+'/'+'train_images')[0:100]
test(test_images)
# ## Results
plt.imshow(out[0])
plt.imshow(out[1])
plt.imshow(out[2])
| Capcha_recognition_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Practice Exercise
# In this notebook, you have to use the knowledge that you have gathered from the `intro-to-python` notebook and solve three Questions and one Project question to pass the first module.
from grader import grader_1,grader_2,grader_3
# #### Question 1
# You are driving a little too fast, and a police officer stops you. Write a function to return one of 3 possible results: "No ticket", "Small ticket", or "Big Ticket". If your speed is 60 or less, the result is "No Ticket". If speed is between 61 and 80 inclusive, the result is "Small Ticket". If speed is more than 80, the result is "Big Ticket". Unless it is your birthday (encoded as a boolean value in the parameters of the function) -- on your birthday, your speed can be 5 higher in all cases.
# +
import random
# Don't delete this function
# Helper function
def test_case():
birthday = random.choice([True, False])
speed = random.uniform(0, 120)
return speed, birthday
# -
# Create a function called 'speeding'
def speeding(speed, birthday):
# Your code goes here
pass
# Don't delete this function
# Driver code to test your 'speeding' function
def check_test_case():
random.seed(9020)
ans=[]
for _ in range(0, 3):
speed, birthday = test_case()
print(speed, birthday)
ans.append(speeding(speed, birthday))
print(ans)
return ans
## Run this to grade your code
## Do not edit
grader_1(check_test_case())
# #### Questions 2
# Write a Python program to find a triplet such that sum of two equals to third element.
#
# Return type : tuple
#
# Example:
#
# Given : [22, 5, 32, 0, 10, 786, 23, 8, 19, 46]
# Expected : (10, 22, 32)
#
#
# Given : [22, 5, 32, 0, 12, 2, 23, 8, 19, 46]
# Expected : "No such triplet found"
def find_triplets(input_array, length):
"""
Reads and returns triplets.
:param input_array: given_array.
length: length of the given_array
:return: triplets or return "No such triplet found"
"""
# Your answer goes here
pass
# +
# Don't delete this function
# Driver code-block for your functions
def check_test_case_2():
ans_2=[]
given_1 = [22, 5, 32, 0, 10, 786, 23, 8, 19, 46]
ans_2.append(str(find_triplets(given_1, len(given_1))))
given_2 = [22, 5, 32, 0, 12, 2, 23, 8, 19, 46]
ans_2.append(find_triplets(given_2, len(given_2)))
return ans_2
# -
## Run this to grade your code
## Do not edit
grader_2(check_test_case_2())
# #### Project 1
# You are given a file with different data points, you have to gather inference from those data points. For example,
#
# This file content:
#
# something
# 1
# 7
# somEThing
# N/A
# 2
# wassup
# woop
#
# woop
# something
# WoOP
#
# Should yield this report:
#
# missing values: 1
# highest number: 7.0
# most common words: something, woop
# occurrences of most common: 3
# numbers: [1.0, 7.0, 2.0]
# words: ['something', 'something', 'n/a', 'wassup', 'woop', 'woop', 'something', 'woop']
# +
def read_file(path):
"""
Reads and returns the data from the file specified as argument.
:param path: path to the file to be read.
:return: a tuple containing
1. the number of empty lines (int)
2. numeric values (list of floats)
3. non-numeric values (list of strings)
"""
# Your answer goes here
pass
def make_report(missing_values, numbers, words):
"""
Creates and a report based on data given as arguments.
:param missing_values: number of empty lines (int)
:param numbers: numeric values (list of floats)
:param words: non numeric values (list of strings)
:return: the generated report (string)
"""
max_value = get_max_value(numbers)
lower_case_words = words_to_lowercase(words)
most_common_info = get_most_common_words(lower_case_words)
most_common_words = most_common_info[0]
most_common_count = most_common_info[1]
most_common_str = ''
for idx in range(len(most_common_words)):
most_common_str += most_common_words[idx] + ', '
# remove the last comma and space
most_common_str = most_common_str[0:len(most_common_str) - 2]
report = ('missing values: {}\n'
'highest number: {}\n'
'most common words: {}\n'
'occurrences of most common: {}\n'
'numbers: {}\n'
'words: {}').format(missing_values, max_value, most_common_str,
most_common_count, numbers, lower_case_words)
return report
def get_max_value(numbers):
"""
Returns the greatest value of the list given as argument.
:param numbers: numbers (list of numeric values)
:return: greatest value of numbers, None if numbers is an empty list
"""
# Your answer goes here
return max_value
def words_to_lowercase(words):
"""
:param words: words to be converted (list of strings)
:return: lowercased words (list of strings)
"""
# Your answer goes here
return lowercased
def get_most_common_words(words):
"""
Finds the most common words in a list of words.
If there are multiple different words with the same amount of occurrences,
they are all included in the return value sorted alphabetically.
In addition to returning the most common words, the return value
includes also the count of occurrences of the most common words.
:param words: list of words (list of strings)
:return: a tuple containing:
1. most common words (list of strings)
2. the count of occurrences of the most common words (int)
"""
# Your answer goes here
return most_common_words, max_count
# +
# Don't delete this function
# Driver function for your functions
def get_report(path):
"""
Creates a report of the file specified as argument.
:param path: path to file from which the report should be created (string)
:return: the report (string)
"""
data = read_file(path)
missing_count = data[0]
numbers = data[1]
words = data[2]
report = make_report(missing_count, numbers, words)
return report
# -
def check_project():
path = 'project_1_input_file.txt'
out=get_report(path)
print(out)
return out
grader_3(check_project())
| Homework/Homework 1/python-exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# language: python
# name: python38264bit9aed414d9b2c4aafacf2c3038ae0f19f
# ---
# +
# module 20
# this is going to be the last module of matplotlib series.
# plotting mathematical expession in matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# -
plt.plot([0,3,2,4,1], label="plain text")
plt.title("alpha > beta",size=20)
plt.legend()
plt.plot([0,3,2,4,1], label="math text")
plt.title(r"$\alpha > \beta$",size=20)
# here we need to use r option before the text
# and in the text we need to use $ sign. it indicates where we are starting out math expession and where we are ending it.
# and we need to use backslash before every math function name
plt.legend()
plt.title(r"$\alpha_i > \beta_i$",size=20)
plt.title(r"$\alpha^{ic} > \beta^{ic}$",size=20)
plt.title(r"$\sum_{i=0}^\infty x_i$",size=20)
plt.title(r"$\frac{3}{4} \binom{3}{4} \genfrac{}{}{0}{}{3}{4}$",size=20)
plt.title(r"$\frac{5 - \frac{1}{x}}{4}$",size=20)
plt.title(r"$(\frac{5 - \frac{1}{x}}{4})$",size=20)
# if we want to bracket the whole expression.
plt.title(r"$\left(\frac{5 - \frac{1}{x}}{4}\right)$",size=20)
# note:
# here \left is situated outside of the bracket but \right is in inside.
plt.title(r"$\sqrt{2}$",size=20)
plt.title(r"$\sqrt[5]{2}$",size=20)
# +
plt.title(r"$s(t) = \mathcal{A}\mathrm{sin}(2 \omega t)$",size=20)
# here mathcal means caligraphy font
# mathrm means roman font
# we also have-
# mathit -- italic
# mathtt -- typewriter
# mathbb -- blackboard
# mathfrak -- fraktur
# mathsf -- sansserif
# we can also combine multiple fonts
#\mathrm{\mathsf{"roman sansserif"}}
# +
# we have accents coommand to add accent above any symble.
# Command
#\acute
#\bar
#\breve
#\ddot
#\dot
#\grave
#\hat
#\tilde
#\vec
#\overline{abc}
# In addition, there are two special accents that automatically adjust to the width of the symbols below:
#\widehat{xyz}
#\widetilde{xyz}
# Care should be taken when puttiing accents on lower-case i's and j's in linear algebra.
# Note that in the following \imath is used to avoid the extra dot over the i:
plt.title(r"$ \hat\imath, \hat\jmath, \hat k$",size=25)
# +
# we can use many symbles. like
# upper case greek
# lower case greek
# hebrow
# delimeters
# big symbles
# standerd function names
# binary operations and relation operations
# arrow symbles
# miscellaneous symbles
# to learn more we can visit this link below:
"""https://matplotlib.org/tutorials/text/mathtext.html#sphx-glr-tutorials-text-mathtext-py"""
# -
| 20.Mathematical Expression.ipynb |
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
-- # Try Haskell!
--
-- [Haskell](https://www.haskell.org/) is a modern purely functional programming language, backed by a large open-source community, a large package database, suitable for tackling difficult problems on a commercial scale.
--
-- Welcome to IHaskell, Haskell running on a Jupyter temporary notebook! This notebook provides you a way to quickly experiment with Haskell and the Jupyter notebook interface. (This server will be deleted after ten minutes of inactivity.)
-- ## Run some Haskell code!
--
-- The Jupyter interface is divided into cells. Some cells, like this one, have markdown and other media content. Other cells, like the one below, contain *code*.
--
-- 1. Select the cell below this one by click in on it.
-- 2. Run the cell by pressing `SHIFT-ENTER` (or the play button in the toolbar at the top).
-- +
-- List of all natural numbers.
naturals :: [Integer]
naturals = [1..]
-- List of all even naturals.
evens :: [Integer]
evens = filter isEven naturals
where isEven x = x `mod` 2 == 0
-- Print the first ten even integers.
-- If we wrote `print evens`, our code would run forever.
print (take 10 evens)
-- -
-- ## More Info
--
-- You can find more information on using the Jupyter notebook interface [here](http://ipython.org/ipython-doc/3/notebook/notebook.html#notebook-user-interface). If you'd like to install IHaskell locally, follow [these instructions](http://www.github.com/gibiansky/IHaskell), and [file issues](http://www.github.com/gibiansky/IHaskell/issues) if you run into trouble.
--
-- IHaskell can be customized for rich displays and interactive media for any library; some demos are available in the [full IHaskell demo notebook](http://nbviewer.ipython.org/github/gibiansky/IHaskell/blob/master/notebooks/IHaskell.ipynb).
-- ## Another Code Snippet
--
-- If you want a few more code snippets to play with, here's the code from [haskell.org](https://www.haskell.org/) for (very slowly) finding primes. (This isn't a prime seive, and is very slow, so don't use it anywhere important! It's suitable more or less only for getting a feel for Haskell syntax.)
-- +
primes :: [Int]
primes = filterPrime [2..]
where filterPrime (p:xs) =
p : filterPrime [x | x <- xs, x `mod` p /= 0]
putStrLn $ concat ["The 100th prime is ", show (primes !! 99), "!"]
-- -
-- ## Parsing
--
-- Haskell's monadic parser combinator libraries make writing parsers incredibly easy, making prototyping custom textual formats and small parsers very quick. For example, where in another language you may turn to a regular expression as a quick-and-dirty solution, `parsec` makes writing parsers so easy that it is your go-to tool instead.
--
-- Below is a small code snippet for parsing US phone numbers.
-- +
import Text.Parsec
import Text.Parsec.String
-- Parse a single digit
digit :: Parser Char
digit = oneOf ['0'..'9']
-- Parse a multi-digit number.
number :: Parser Integer
number = do
digits <- many1 digit -- At least one digit
return (read digits) -- Convert [Char] to Integer
-- Parse a country code, starting with a +.
countryCode :: Parser Integer
countryCode = do
char '+'
number
-- Parse an area code, optionally with parentheses.
areaCode :: Parser Integer
areaCode = choice [withParens, withoutParens]
where
withParens = between (char '(') (char ')') withoutParens
withoutParens = number
-- Simple data type representing a phone number.
-- Real phone numbers are much more complex!
data PhoneNumber = PhoneNumber {
phoneCountryCode :: Maybe Integer,
phoneNumbers :: [Integer]
} deriving (Eq, Show)
phoneNumber :: Parser PhoneNumber
phoneNumber = do
-- Try to parse a country code. If it doesn't work, it's Nothing.
c <- optionMaybe countryCode
optional separator
a1 <- areaCode
separator -- Separator required after area code
a2 <- number
separator -- Separator required before last group of digits
a3 <- number
return (PhoneNumber c [a1, a2, a3])
where
separator :: Parser Char
separator = oneOf " -"
-- -
-- We can use this to parse phone numbers from strings:
-- +
parsePhoneNumber :: String -> Maybe PhoneNumber
parsePhoneNumber str = case parse phoneNumber "<interactive>" str of
Left _ -> Nothing
Right x -> Just x
print (parsePhoneNumber "+1 (327)-525 3029")
-- -
-- This parser is of course much longer than an equivalent regular expression, but much easier to get sane error messages out of, and much more maintainable as a result:
parse phoneNumber "example" "+1 (327)x525x3029"
| notebooks/Welcome to Haskell.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment2 Day8
file=open("File1.txt","w")
file.write("Hello guys how are you?")
file.close()
try:
file=open("file1.txt","r")
file.write("Hello...!")
file.close()
except:
print("The file is open in read mode so can't perform write operation on this file")
| Assignment2D8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path,PurePath
input_dir = PurePath("~/Documents/'more applications'/ElementsofProgrammingInterviews/ML/Covid")
list(Path(input_dir).glob('*'))
# -
import os
biorxiv_dir = "~/Documents/'more applications'/ElementsofProgrammingInterviews/ML/Covid/biorxiv_medrxiv/biorxiv_medrxiv/"
filenames = os.listdir(biorxiv_dir)
print("Number of articles retrieved from biorxiv:", len(filenames))
# +
#os.listdir( "~/Documents/'more applications'/ElementsofProgrammingInterviews/ML/Covid/biorxiv_medrxiv/biorxiv_medrxiv/"
#)
with open( "~/Documents/'more applications'/ElementsofProgrammingInterviews/ML/Covid/metadata.readme", 'r') as f:
data = f.read()
print(data)
# -
import os
path="/home/cristobal/Documents/more applications/ElementsofProgrammingInterviews/ML/Covid/"
train_csv = os.path.join(os.path.dirname(path), "metadata.readme")
with open(train_csv) as f:
data=f.read()
print(data)
biorxiv_dir =os.path.join(os.path.dirname(path), "biorxiv_medrxiv/biorxiv_medrxiv/")
filenames = os.listdir(biorxiv_dir)
print("Number of articles retrieved from biorxiv:", len(filenames))
f=filenames[2]
print(f)
# +
#import pandas as pd
#df = pd.read_json (r'Path where the JSON file is saved\File Name.json')
#df.to_csv (r'Path where the new CSV file will be stored\New File Name.csv', index = None)
# +
import json
all_files = []
biorxiv_dir2="/home/cristobal/Documents/'more applications'/ElementsofProgrammingInterviews/ML/Covid/biorxiv_medrxiv/biorxiv_medrxiv/"
biorxiv_dir3 =os.path.join(os.path.dirname(path), "biorxiv_medrxiv/biorxiv_medrxiv/")
biorxiv_dir4=os.path.dirname(path)+"/"+"biorxiv_medrxiv/biorxiv_medrxiv/"
biorxiv_dir5="/home/cristobal/Documents/more applications/ElementsofProgrammingInterviews/ML/Covid/biorxiv_medrxiv/biorxiv_medrxiv/"
for filename in filenames:
filename = biorxiv_dir2 + filename
file = json.load(open(filename, 'rb'))
all_files.append(file)
# +
import json
import csv
# Opening JSON file and loading the data
# into the variable data
with open('data.json') as json_file:
data = json.load(json_file)
employee_data = data['emp_details']
# now we will open a file for writing
data_file = open('data_file.csv', 'w')
# create the csv writer object
csv_writer = csv.writer(data_file)
# Counter variable used for writing
# headers to the CSV file
count = 0
for emp in employee_data:
if count == 0:
# Writing headers of CSV file
header = emp.keys()
csv_writer.writerow(header)
count += 1
# Writing data of CSV file
csv_writer.writerow(emp.values())
data_file.close()
# +
import json
import glob
result = []
for f in glob.glob("*.json"):
with open(f, "rb") as infile:
result.append(json.load(infile))
with open("merged_file.json", "wb") as outfile:
json.dump(result, outfile)
| ChallengeV0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + deletable=true editable=true
import numpy as np
import numpy
import os
class TopRelated(object):
def __init__(self, track_factors):
# fully normalize artist_factors, so can compare with only the dot product
norms = numpy.linalg.norm(track_factors, axis=-1)
self.factors = track_factors / norms[:, numpy.newaxis]
def get_related(self, matrix_index, N=10):
scores = self.factors.dot(self.factors[matrix_index])
best = numpy.argpartition(scores, -N)[-N:]
return sorted(zip(best, scores[best]), key=lambda x: -x[1])
# + deletable=true editable=true
msd_artists_tracks_fname = '../../matchings/msd_lfm-1b/matched_artists_tracks.txt'
msd_track_ids_fname = '../../matchings/msd_lfm-1b/artist_trackname_to_msd_track_ids.txt'
msd_artist_tracks = [line.strip() for line in open(msd_artists_tracks_fname)]
msd_track_ids = [line.strip().split('\t') for line in open(msd_track_ids_fname)]
msd_track_id_to_artists_trackname = {
msd_track_id: artist_trackname
for msd_track_ids_list, artist_trackname in zip(msd_track_ids, msd_artist_tracks)
for msd_track_id in msd_track_ids_list
}
matrix_artist_tracknames_fname = '/../../matchings/both/matched_artists_tracks.txt'
matrix_artist_tracknames = [line.strip() for line in open(matrix_artist_tracknames_fname)]
artist_trackname_to_matrix_index = {
artist_trackname: index
for index, artist_trackname in enumerate(matrix_artist_tracknames)
}
matrix_index_to_artist_trackname = {
index: artist_trackname
for index, artist_trackname in enumerate(matrix_artist_tracknames)
}
msd_track_id_to_matrix_index = {
msd_track_id: artist_trackname_to_matrix_index[artist_trackname]
for msd_track_ids_list, artist_trackname in zip(msd_track_ids, msd_artist_tracks)
for msd_track_id in msd_track_ids_list
}
song_factors_fname = '/home/devin/git/ms-thesis/latent_factors/output/factors_merged_v.npy'
song_factors = np.load(song_factors_fname)
song_factors.shape
# + deletable=true editable=true
tr = TopRelated(song_factors)
# + deletable=true editable=true
msd_track_id = 'TRCOOYB128E078ED95'
artist_trackname = msd_track_id_to_artists_trackname[msd_track_id]
print(artist_trackname)
matrix_index = msd_track_id_to_matrix_index[msd_track_id]
print(matrix_index)
# + deletable=true editable=true
rel = tr.get_related(matrix_index, N=20)
for r in rel:
print('{0:.2f}\t'.format(r[1]) + matrix_index_to_artist_trackname[r[0]])
# + deletable=true editable=true
| latent_factors/eval/qualitative_evaluation_of_factors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment: Climate change in the zero-dimensional EBM
# ## Learning goals
#
# Students completing this assignment will gain the following skills and concepts:
#
# - Familiarity with the Jupyter notebook
# - Familiarity with the zero-dimensional Energy Balance Model
# - Understanding of the adjustment toward equilibrium temperature
# - Introduction to the concept of albedo feedback
# - Use of numerical timestepping to find the equilibrium temperature
# - Python programming skills: arrays, loops, and simple graphs
# ## Instructions
#
# - In a local copy of this notebook (on the JupyterHub or your own device) **add your answers in additional cells**.
# - **Complete the required problems** below.
# - Some assignments have **optional bonus problems**. These are meant to be interesting and thought-provoking, but are not required. Extra credit will be given for interesting answers to the bonus problems.
# - Remember to set your cell types to `Markdown` for text, and `Code` for Python code!
# - **Include comments** in your code to explain your method as necessary.
# - Remember to actually answer the questions. **Written answers are required** (not just code and figures!)
# - Submit your solutions in **a single Jupyter notebook** that contains your text, your code, and your figures.
# - *Make sure that your notebook* ***runs cleanly without errors:***
# - Save your notebook
# - From the `Kernel` menu, select `Restart & Run All`
# - Did the notebook run from start to finish without error and produce the expected output?
# - If yes, save again and submit your notebook file
# - If no, fix the errors and try again.
# ## Problem 1: Time-dependent warming in the zero-dimensional Energy Balance Model
# In lecture we defined a zero-dimensional energy balance model for the global mean surface temperature $T_s$ as follows
#
# $$ C \frac{dT_s}{dt} = \text{ASR} - \text{OLR}$$
#
# $$ \text{ASR} = (1-\alpha) Q $$
#
# $$ \text{OLR} = \tau \sigma T_s^4$$
#
# where we defined these terms:
#
# - $C$ is a heat capacity for the atmosphere-ocean column
# - $\alpha$ is the global mean planetary albedo
# - $\sigma = 5.67 \times 10^{-8}$ W m$^{-2}$ K$^{-4}$ is the Stefan-Boltzmann constant
# - $\tau$ is our transmissivity parameter for the atmosphere.
# - $Q$ is the global-mean incoming solar radiation, or *insolation*.
# Refer back to our class notes for parameter values.
#
# 1. If the heat penetrated to twice as deep into the ocean, the value of $C$ would be twice as large. Would this affect the **equilibrium temperature**? Why or why not?
#
# 2. In class we used numerical timestepping to investigate a *hypothetical climate change scenario* in which $\tau$ decreases to 0.57 and $\alpha$ increases to 0.32. We produced a graph of $T_s(t)$ over a twenty year period, starting from an initial temperature of 288 K. Here you will repeat this calculate with a larger value of $C$ and compare the warming rates. Specifically:
# - Repeat our in-class time-stepping calculation with the same parameters we used before (including a heat capacity of $C = 4\times10^8$ J m$^{-2}$ K$^{-1}$), but extend it to 50 years. **You should create an array of temperatures with 51 elements, beginning from 288 K**.
# - Now do it again, but use $C = 8\times10^8$ J m$^{-2}$ K$^{-1}$ (representing 200 meters of water). You should **create another 51-element array** of temperatures also beginning from 288 K.
# - **Make a well-labeled graph** that compares the two temperatures over the 50-year period.
# 3. What do your results show about the role of heat capacity on climate change? **Give a short written answer.**
#
# ## Problem 2: Albedo feedback in the Energy Balance Model
# For this exercise, we will introduce a new physical process into our model by **letting the planetary albedo depend on temperature**. The idea is that a warmer planet has less ice and snow at the surface, and thus a lower planetary albedo.
#
# Represent the ice-albedo feedback through the following formula:
#
# $$ \alpha(T) = \left\{ \begin{array}{ccc}
# \alpha_i & & T \le T_i \\
# \alpha_o + (\alpha_i-\alpha_o) \frac{(T-T_o)^2}{(T_i-T_o)^2} & & T_i < T < T_o \\
# \alpha_o & & T \ge T_o \end{array} \right\}$$
#
# with the following parameter values:
#
# - $\alpha_o = 0.289$ is the albedo of a warm, ice-free planet
# - $\alpha_i = 0.7$ is the albedo of a very cold, completely ice-covered planet
# - $T_o = 293$ K is the threshold temperature above which our model assumes the planet is ice-free
# - $T_i = 260$ K is the threshold temperature below which our model assumes the planet is completely ice covered.
#
# For intermediate temperature, this formula gives a smooth variation in albedo with global mean temperature. It is tuned to reproduce the observed albedo $\alpha = 0.299$ for $T = 288$ K.
# 1.
# - Define a Python function that implements the above albedo formula. *There is definitely more than one way to do it. It doesn't matter how you do it as long as it works!*
# - Use your function to calculate albedos for a wide range on planetary temperature (e.g. from $T=250$ K to $T=300$ K.)
# - Present your results (albedo as a function of global mean temperature, or $\alpha(T)$) in a nicely labeled graph.
# 2. Now investigate a climate change scenario with this new model:
# - Suppose that the transmissivity decreases from 0.611 to 0.57 (same as before)
# - Your task is to **calculate the new equilibrium temperature**. First, explain very briefly why you can't just solve for it analytically as we did when albedo was a fixed number.
# - Instead, you will use numerical time-stepping to find the equilibrium temperature
# - Repeat the procedure from Question 3 *(time-step forward for 50 years from an initial temperature of 288 K and make a graph of the results)*, but this time **use the function you defined above to compute the albedo for the current temperature**.
# - Is the **new equilibrium temperature larger or smaller** than it was in the model with fixed albedo? **Explain why in your own words.**
# ## Bonus problem
#
# *Open-ended investigation for extra credit, not required*
#
# Something very different occurs in this model if you introduce a strong negative radiative forcing, either by substantially reducing greenhouse gases (which we would represent as an increase in the transmissivity $\tau$), or by decreasing the incoming solar radiation $Q$.
#
# Investigate, using your numerical model code, and report your results along with your thoughts.
| content/courseware/assignment-zero-dim-ebm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import os
# %pylab
env = 'MiniGrid-DoorKey-6x6-v0'
disctype = 'DoorKeyGAILpositive'
seed=1
disc_dir = '/serverdata/rohit/reward_bias/imitation/output/train_adversarial/{}/{}/{}/checkpoints/final/discrim'.format(env, disctype, seed)
list(os.walk(disc_dir))
from imitation.rewards import discrim_net
with tf.variable_scope('test'):
model = discrim_net.DiscrimNetGAIL.load(disc_dir)
# +
# model.reward_test?
# -
import gym
import gym_minigrid
from gym_minigrid import wrappers as mgwr
env = gym.make('MiniGrid-Empty-Random-6x6-v0')
env = mgwr.FullyObsWrapper(env)
env = mgwr.ImgObsWrapper(env)
env = mgwr.FullyObsOneHotWrapper(env, drop_color=1, keep_classes=['goal', 'agent', 'wall', 'empty', 'door', 'key'], flatten=False)
import pickle as pkl
with open('/serverdata/rohit/reward_bias/imitation/output/expert_demos/MiniGrid-DoorKey-6x6-v0/DoorKeyPPO/rollouts/final.pkl', 'rb') as fi:
rollouts = pkl.load(fi)
obs = rollouts[0].obs[:-1]
next_obs = rollouts[0].obs[1:]
act = rollouts[0].acts
with tf.variable_scope('test'):
model._sess = tf.Session()
model._sess.run(tf.global_variables_initializer())
print(model.reward_test(obs, act, next_obs, None))
| Discriminator rewards to expert .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from IPython.display import YouTubeVideo, Image
# -
# # Decision surface
# A popular diagnostic for understanding the decisions made by a classification algorithm is the **decision surface**. This is a plot that shows how a **fit** machine learning algorithm predicts a coarse grid across the input feature space.
#
# A decision surface plot is a powerful tool for understanding how a given model “sees” the prediction task and how it has decided to divide the input feature space by class label.
YouTubeVideo(id="UiB-wSvV-So", width=900, height=450)
# # Overfitting and Underfitting
#
# - Overfitting: Good performance on the training data, poor **generliazation** to other data.
# - Underfitting: Poor performance on the training data and poor **generalization** to other data
YouTubeVideo(id="awSLYt1Jso8", width=900, height=450)
# # Need for Cross validation
#
# There is always a need to validate the stability of your machine learning model. I mean you just can’t fit the model to your training data and hope it would accurately work for the real data it has never seen before. You need some kind of assurance that your model has got most of the patterns from the data correct, and its not picking up too much on the noise, or in other words its low on bias and variance.
YouTubeVideo(id="gkEtTTnKvj8", width=900, height=450)
# # K-fold Cross Validation
#
# One of the most common technique for model evaluation and model selection in machine learning practice is K-fold cross validation. The main idea behind cross-validation is that each observation in our dataset has the opportunity of being tested. K-fold cross-validation is a special case of cross-validation where we iterate over a dataset set k times. In each round, we split the dataset into k parts: one part is used for validation, and the remaining k−1 parts are merged into a training subset for model evaluation.
# #### The figure below illustrates the process of 5-fold cross-validation:
Image("kfolds.png")
YouTubeVideo(id="SNZAAzTMKvc", width=900, height=450)
# # Visualizing Train, Validation and Test datasets
#
# Data is usually split into 3 sets after preprocessing: the test set, validation set and training set.
Image("train-validate-test.png")
YouTubeVideo(id="GxN2qzAuT1o", width=900, height=450)
# # How to determine Overfitting and Underfitting?
#
# We can determine the difference between an underfitting and overfitting experimentally by comparing fitted models to training-data and test-data.
Image("under.png")
Image("train.png")
YouTubeVideo(id="8k8qlIENMiQ", width=900, height=450)
# + language="html"
# <script src="https://cdn.rawgit.com/parente/4c3e6936d0d7a46fd071/raw/65b816fb9bdd3c28b4ddf3af602bfd6015486383/code_toggle.js"></script>
#
| Lecture 52 Decision surface overfitting cross validation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pulp import *
prob = LpProblem("The Problem",LpMinimize)
Employees = ['KC', 'DH', 'HB', 'SC', 'KS', 'NK']
monday = {'KC' : 6,
'DH' : 0,
'HB' : 4,
'SC' : 5,
'KS' : 4,
'NK' : 6,
}
KCpon = LpVariable("KC pn", 0, 6, cat = 'Integer')
KCsr = LpVariable("KC sr", 0, 6, cat = 'Integer')
KCpt = LpVariable("KC pt", 0, 6, cat = 'Integer')
DHwt = LpVariable("DH wt", 0, 6, cat = 'Integer')
DHczw = LpVariable("DH czw", 0, 6, cat = 'Integer')
HBpon = LpVariable("HB pn", 0, 4, cat = 'Integer')
HBwt = LpVariable("HB wt", 0, 8, cat = 'Integer')
HBsr = LpVariable("HB sr", 0, 4, cat = 'Integer')
HBpt = LpVariable("HB pt", 0, 4, cat = 'Integer')
SCpon = LpVariable("SC pon", 0, 5, cat = 'Integer')
SCwt = LpVariable("SC wt", 0, 5, cat = 'Integer')
SCpt = LpVariable("SC pt", 0, 5, cat = 'Integer')
KSpon = LpVariable("KS pon", 0, 3, cat = 'Integer')
KSczw = LpVariable("KS czw", 0, 8, cat = 'Integer')
NKczw = LpVariable("NK czw", 0, 6, cat = 'Integer')
NKpt = LpVariable("NK pt", 0, 2, cat = 'Integer')
# main problem - company's benefit
prob += 25*(KCpon+KCsr+KCpt) + 26*(DHwt+DHczw) + 24*(HBpon + HBwt + HBsr + HBpt) + 23*(SCpon + SCwt + SCpt) +28*(KSpon+KSczw) +20*(NKczw + NKpt)
#constraints
prob += KCpon + HBpon + SCpon + KSpon >= 10
prob += DHwt + HBwt + SCwt >= 10
prob += KCsr + HBsr >= 10
prob += DHczw + KSczw + NKczw >= 10
prob += KCpt + HBpt + SCpt + NKpt >= 10
prob += KSczw + KSpon >= 2
# The problem data is written to an .lp file
prob.writeLP("TheProblem.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("Calkowity koszt pracownikow = ", value(prob.objective))
print("Godziny pracy NK =", value(NKpt+NKczw))
print("Godziny pracy DH =", value(DHwt+DHczw))
print("Godziny pracy HB =", value(HBpt+HBsr+HBwt+HBpon))
print("Godziny pracy KC =", value(KCpon+KCsr+KCpt))
print("Godziny pracy KS =", value(KSpon+KSczw))
print("Godziny pracy SC =", value(SCpon+SCwt+SCpt))
# -
| Firma.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1. Extracting 50 frames from Pain vid 1-2 each in RGB form
# +
# Importing necessary packages
import cv2 # OpenCV for image and video processing
import glob # glob to retrieve files/pathnames matching a specified pattern
import os # provides functions to create/remove a directory/folder, fetch contents etc
pain=glob.glob(r'C:\Users\Harshita\Desktop\aa') # path where the videos are
p_video_list=[] # empty list to store the videos
# iterating over each video
for video in pain:
for v in glob.glob(video+'/*.mp4'): # matching file patterns with .mp4 extension for extraction of frames
p_video_list.append(v) # adding all mp4 files to p_video_list list
i=1
n=1
# iterating over each video
for j in range(0,len(p_video_list)):
video_data=p_video_list[j]
cap=cv2.VideoCapture(video_data) # opening the video file
while cap.isOpened():
# ret: boolean for whether a frame is returned or not
# frame: next frame in the video (here- cap)
ret,frame=cap.read() # reading frames from the video
if ret: # if ret==True
i+=1
if i%12==0: # since a video gives 600 frames and we need just 50
name =str(n) + '.jpg' # naming the extracted frame (in sequence like 1.jpg, 2.jpg and so on..)
path = r'C:\Users\Harshita\Desktop\aa' # path to save the extracted frames
cv2.imwrite(os.path.join(path ,name), frame) # writing/saving the image frames
n+=1
print('Creating ' + name)
else:
break
cap.release() # Release all space and windows once done
# -
# 2. After extracting the frames, sort them into 3 categories: No-Mild Pain, Moderate Pain and Severe Pain
# 3. Convert the RGB images into Grayscale for each folder to avoid sorting the gray images separately
#
# Here, we take one folder at a time (i.e., No-Mild Pain, Moderate Pain and Severe Pain) and then convert those RGB images into Grayscale images. For this, we manually change the folder name in the img_path variable
# +
img_path = glob.glob(r"C:\Users\Harshita\Desktop\aa\Image Dataset\Severe Pain/*.jpg") # path for the images matching the given pattern i.e., ending with .jpg extension
i = 0 # counter for number of images converted
for image in img_path: # iterating through each images in the folder
img = cv2.imread(image) # read the images from above path
gray_images = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert RGB images into grayscale
# write/save the extracted images
name = 'gray' + str(i) + '.jpg'
path = r'C:\Users\Harshita\Desktop\aa\Image Dataset\Severe Pain' # path to save the images
cv2.imwrite(os.path.join(path ,name), gray_images) # saving them in their respective folders
i += 1
cv2.destroyAllWindows()
| Harshita Das Pain[vid 3-4]/Model/frame_extract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:udacity]
# language: python
# name: conda-env-udacity-py
# ---
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
# +
m = 4 # observations
n = 3 # variables
# x has 3 features and 4 rows
X_train = np.array([[1,5,23],[2,53,21],[3,89,13], [4,21,10],[6,53,10]])
y_train = np.array([100,200,300,400, 600]).reshape(-1,1)
#scale
# scaler = StandardScaler()
# X_train_norm = scaler.fit_transform(X_train)
# X_train = np.array([1,2,3,4,5,6,7,8,9,10])
# y_train = X_train*67
# n_samples = X_train.shape[0]
X_train, y_train
# +
with tf.name_scope("input"):
# define variables
X = tf.placeholder(tf.float32, shape=[None, n], name='x')
y = tf.placeholder(tf.float32, shape=[None, 1], name='y')
with tf.name_scope("regression"):
# define variables
W = tf.Variable(tf.zeros([n,1], dtype=tf.float32), name='weights')
b = tf.Variable(tf.zeros([1], dtype=tf.float32), name='biases')
with tf.name_scope("operations"):
# define LR expressions
#linear_model = tf.add(tf.matmul(X ,W), b)
linear_model = tf.matmul(X,W)
squared_error = tf.square(tf.subtract(linear_model, y))
loss = tf.reduce_sum(squared_error)
# define optimization
learning_rate = tf.placeholder(tf.float32)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
# +
# tensorboard
# creat a summary for x and y
tf.summary.scalar("loss", loss)
summary_op = tf.summary.merge_all()
# no need to specify graph
writer = tf.summary.FileWriter('./example', graph=tf.get_default_graph())
# +
# run it
epochs=20000
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(1, epochs):
# loss, summary = sess.run([train_op, summary_op], feed_dict)
feed_dict = {
X: X_train,
y: y_train,
learning_rate: 1e-5
}
# run
curr_loss, curr_W, curr_b, _, summary = sess.run([loss, W, b, train_op, summary_op], feed_dict)
# log results
writer.add_summary(summary)
# log to console
if epoch%(epochs/10) == 0:
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
# -
feed_dict = {
X: np.array([5,23,56]).reshape(1,-1)
}
print(sess.run(linear_model, feed_dict))
# # In scikit-learn
# +
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X_train, y_train)
# -
reg.predict(np.array([5, 23,23]).reshape(1,-1))
reg.coef_, reg.intercept_
| ENIAC-experiments/.ipynb_checkpoints/TFtutorial3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimization Methods
#
# Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you'll gain skills with some more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
#
# By the end of this notebook, you'll be able to:
#
# * Apply optimization methods such as (Stochastic) Gradient Descent, Momentum, RMSProp and Adam
# * Use random minibatches to accelerate convergence and improve optimization
#
# Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
# <img src="images/cost.jpg" style="width:650px;height:300px;">
# <caption><center> <u> <b>Figure 1</b> </u>: <b>Minimizing the cost is like finding the lowest point in a hilly landscape</b><br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
#
# **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
#
# Let's get started!
# ## Table of Contents
# - [1- Packages](#1)
# - [2 - Gradient Descent](#2)
# - [Exercise 1 - update_parameters_with_gd](#ex-1)
# - [3 - Mini-Batch Gradient Descent](#3)
# - [Exercise 2 - random_mini_batches](#ex-2)
# - [4 - Momentum](#4)
# - [Exercise 3 - initialize_velocity](#ex-3)
# - [Exercise 4 - update_parameters_with_momentum](#ex-4)
# - [5 - Adam](#5)
# - [Exercise 5 - initialize_adam](#ex-5)
# - [Exercise 6 - update_parameters_with_adam](#ex-6)
# - [6 - Model with different Optimization algorithms](#6)
# - [6.1 - Mini-Batch Gradient Descent](#6-1)
# - [6.2 - Mini-Batch Gradient Descent with Momentum](#6-2)
# - [6.3 - Mini-Batch with Adam](#6-3)
# - [6.4 - Summary](#6-4)
# - [7 - Learning Rate Decay and Scheduling](#7)
# - [7.1 - Decay on every iteration](#7-1)
# - [Exercise 7 - update_lr](#ex-7)
# - [7.2 - Fixed Interval Scheduling](#7-2)
# - [Exercise 8 - schedule_lr_decay](#ex-8)
# - [7.3 - Using Learning Rate Decay for each Optimization Method](#7-3)
# - [7.3.1 - Gradient Descent with Learning Rate Decay](#7-3-1)
# - [7.3.2 - Gradient Descent with Momentum and Learning Rate Decay](#7-3-2)
# - [7.3.3 - Adam with Learning Rate Decay](#7-3-3)
# - [7.4 - Achieving similar performance with different methods](#7-4)
# <a name='1'></a>
# ## 1- Packages
# +
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from copy import deepcopy
from testCases import *
from public_tests import *
# %matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
# -
# <a name='2'></a>
# ## 2 - Gradient Descent
#
# A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
#
# <a name='ex-1'></a>
# ### Exercise 1 - update_parameters_with_gd
#
# Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
#
# where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 1 in the `for` loop as the first parameters are $W^{[1]}$ and $b^{[1]}$.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "3e464eca4306181b7b2d7908c2543cb4", "grade": false, "grade_id": "cell-7ed1efcf9ec96292", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(1, L + 1):
# (approx. 2 lines)
# parameters["W" + str(l)] =
# parameters["b" + str(l)] =
# YOUR CODE STARTS HERE
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate * grads['dW' + str(l)]
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate * grads['db' + str(l)]
# YOUR CODE ENDS HERE
return parameters
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "98daeabded9dffaa6da2915192be14c3", "grade": true, "grade_id": "cell-01dafb2c412914df", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
learning_rate = 0.01
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
update_parameters_with_gd_test(update_parameters_with_gd)
# -
# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent, where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
#
# - **(Batch) Gradient Descent**:
#
# ``` python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# # Forward propagation
# a, caches = forward_propagation(X, parameters)
# # Compute cost.
# cost += compute_cost(a, Y)
# # Backward propagation.
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
#
# ```
#
# - **Stochastic Gradient Descent**:
#
# ```python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# for j in range(0, m):
# # Forward propagation
# a, caches = forward_propagation(X[:,j], parameters)
# # Compute cost
# cost += compute_cost(a, Y[:,j])
# # Backward propagation
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
# ```
#
# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here's what that looks like:
#
# <img src="images/kiank_sgd.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> <b>Figure 1</b> </u><font color='purple'> : <b>SGD vs GD</b><br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence, but each step is a lot faster to compute for SGD than it is for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
#
# **Note** also that implementing SGD requires 3 for-loops in total:
# 1. Over the number of iterations
# 2. Over the $m$ training examples
# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
#
# In practice, you'll often get faster results if you don't use the entire training set, or just one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
#
# <img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> <b>Figure 2</b> </u>: <font color='purple'> <b>SGD vs Mini-Batch GD</b><br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
# <a name='3'></a>
# ## 3 - Mini-Batch Gradient Descent
#
# Now you'll build some mini-batches from the training set (X, Y).
#
# There are two steps:
# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
#
# <img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
#
# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
#
# <img src="images/kiank_partition.png" style="width:550px;height:300px;">
#
# <a name='ex-2'></a>
# ### Exercise 2 - random_mini_batches
#
# Implement `random_mini_batches`. The shuffling part has already been coded for you! To help with the partitioning step, you've been provided the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
# ```python
# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
# second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
# ...
# ```
#
# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\left\lfloor \frac{m}{mini\_batch\_size}\right\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be $\left(m-mini_\_batch_\_size \times \left\lfloor \frac{m}{mini\_batch\_size}\right\rfloor\right)$.
#
# **Hint:**
#
# $$mini\_batch\_X = shuffled\_X[:, i : j]$$
#
# Think of a way in which you can use the for loop variable `k` help you increment `i` and `j` in multiples of mini_batch_size.
#
# As an example, if you want to increment in multiples of 3, you could the following:
#
# ```python
# n = 3
# for k in (0 , 5):
# print(k * n)
# ```
# + deletable=false nbgrader={"cell_type": "code", "checksum": "e276742d3477f18007b3d340b0039271", "grade": false, "grade_id": "cell-a693afffedab4203", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1, m))
inc = mini_batch_size
# Step 2 - Partition (shuffled_X, shuffled_Y).
# Cases with a complete mini batch size only i.e each of 64 examples.
num_complete_minibatches = math.floor(m / mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
# (approx. 2 lines)
# mini_batch_X =
# mini_batch_Y =
# YOUR CODE STARTS HERE
mini_batch_X = shuffled_X[:, k*(mini_batch_size) : mini_batch_size*(1+k)]
mini_batch_Y = shuffled_Y[:, k*(mini_batch_size) : mini_batch_size*(1+k)]
# YOUR CODE ENDS HERE
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# For handling the end case (last mini-batch < mini_batch_size i.e less than 64)
if m % mini_batch_size != 0:
#(approx. 2 lines)
# mini_batch_X =
# mini_batch_Y =
# YOUR CODE STARTS HERE
mini_batch_X = shuffled_X[:, (num_complete_minibatches * mini_batch_size):]
mini_batch_Y = shuffled_Y[:, (num_complete_minibatches * mini_batch_size):]
# YOUR CODE ENDS HERE
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# +
np.random.seed(1)
mini_batch_size = 64
nx = 12288
m = 148
X = np.array([x for x in range(nx * m)]).reshape((m, nx)).T
Y = np.random.randn(1, m) < 0.5
mini_batches = random_mini_batches(X, Y, mini_batch_size)
n_batches = len(mini_batches)
assert n_batches == math.ceil(m / mini_batch_size), f"Wrong number of mini batches. {n_batches} != {math.ceil(m / mini_batch_size)}"
for k in range(n_batches - 1):
assert mini_batches[k][0].shape == (nx, mini_batch_size), f"Wrong shape in {k} mini batch for X"
assert mini_batches[k][1].shape == (1, mini_batch_size), f"Wrong shape in {k} mini batch for Y"
assert np.sum(np.sum(mini_batches[k][0] - mini_batches[k][0][0], axis=0)) == ((nx * (nx - 1) / 2 ) * mini_batch_size), "Wrong values. It happens if the order of X rows(features) changes"
if ( m % mini_batch_size > 0):
assert mini_batches[n_batches - 1][0].shape == (nx, m % mini_batch_size), f"Wrong shape in the last minibatch. {mini_batches[n_batches - 1][0].shape} != {(nx, m % mini_batch_size)}"
assert np.allclose(mini_batches[0][0][0][0:3], [294912, 86016, 454656]), "Wrong values. Check the indexes used to form the mini batches"
assert np.allclose(mini_batches[-1][0][-1][0:3], [1425407, 1769471, 897023]), "Wrong values. Check the indexes used to form the mini batches"
print("\033[92mAll test passed!")
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "c9a50964c5ab5622435c64a4f7d9e44a", "grade": true, "grade_id": "cell-9bd796497095573b", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
t_X, t_Y, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(t_X, t_Y, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
random_mini_batches_test(random_mini_batches)
# -
# <font color='blue'>
#
# **What you should remember**:
# - Shuffling and Partitioning are the two steps required to build mini-batches
# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
#
# <a name='4'></a>
# ## 4 - Momentum
#
# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
#
# Momentum takes into account the past gradients to smooth out the update. The 'direction' of the previous gradients is stored in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
#
# <img src="images/opt_momentum.png" style="width:400px;height:250px;">
# <caption><center> <u><font color='purple'><b>Figure 3</b> </u><font color='purple'>: The red arrows show the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, the gradient is allowed to influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
#
# <a name='ex-3'></a>
# ### Exercise 3 - initialize_velocity
# Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
# for $l =1,...,L$:
# ```python
# v["dW" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l)])
# v["db" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l)])
# ```
# **Note** that the iterator l starts at 1 in the for loop as the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript).
# + deletable=false nbgrader={"cell_type": "code", "checksum": "73f98ff4232b1eb6ddd045f4a052d495", "grade": false, "grade_id": "cell-667cf6695880506a", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(1, L + 1):
# (approx. 2 lines)
# v["dW" + str(l)] =
# v["db" + str(l)] =
# YOUR CODE STARTS HERE
v["dW" + str(l)] = np.zeros( parameters["W" + str(l)].shape)
v["db" + str(l)] = np.zeros( parameters["b" + str(l)].shape)
# YOUR CODE ENDS HERE
return v
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "09a811f004e96833a7e6cc47a55de653", "grade": true, "grade_id": "cell-c129a0130218c80f", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
initialize_velocity_test(initialize_velocity)
# -
# <a name='ex-4'></a>
# ### Exercise 4 - update_parameters_with_momentum
#
# Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
#
# $$ \begin{cases}
# v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
# W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
# \end{cases}\tag{3}$$
#
# $$\begin{cases}
# v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
# b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
# \end{cases}\tag{4}$$
#
# where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 1 in the `for` loop as the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript).
# + deletable=false nbgrader={"cell_type": "code", "checksum": "09ff6600367fba5cb96155b80a2b3688", "grade": false, "grade_id": "cell-a5f80aecc1d4e020", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(1, L + 1):
# compute velocities
v["dW" + str(l)] = beta * v["dW" + str(l)] + (1 - beta) * grads['dW' + str(l)]
v["db" + str(l)] = beta * v["db" + str(l)] + (1 - beta) * grads['db' + str(l)]
# update parameters
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * v["dW" + str(l)]
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * v["db" + str(l)]
return parameters, v
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "01b9bf272f5f4d7ed4e26ca3fb956b9b", "grade": true, "grade_id": "cell-4c7cb001c56beb5d", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
update_parameters_with_momentum_test(update_parameters_with_momentum)
# -
# **Note that**:
# - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
# - If $\beta = 0$, then this just becomes standard gradient descent without momentum.
#
# **How do you choose $\beta$?**
#
# - The larger the momentum $\beta$ is, the smoother the update, because it takes the past gradients into account more. But if $\beta$ is too big, it could also smooth out the updates too much.
# - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
# - Tuning the optimal $\beta$ for your model might require trying several values to see what works best in terms of reducing the value of the cost function $J$.
# <font color='blue'>
#
# **What you should remember**:
# - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
# - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
# <a name='5'></a>
# ## 5 - Adam
#
# Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
#
# **How does Adam work?**
# 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
# 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
# 3. It updates parameters in a direction based on combining information from "1" and "2".
#
# The update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
# v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
# s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
# \end{cases}$$
# where:
# - t counts the number of steps taken of Adam
# - L is the number of layers
# - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
# - $\alpha$ is the learning rate
# - $\varepsilon$ is a very small number to avoid dividing by zero
#
# As usual, all parameters are stored in the `parameters` dictionary
# <a name='ex-5'></a>
# ### Exercise 5 - initialize_adam
#
# Initialize the Adam variables $v, s$ which keep track of the past information.
#
# **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
# for $l = 1, ..., L$:
# ```python
# v["dW" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l)])
# v["db" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l)])
# s["dW" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l)])
# s["db" + str(l)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l)])
#
# ```
# + deletable=false nbgrader={"cell_type": "code", "checksum": "8eb19ce4b30a9c2af428853c24d8b80a", "grade": false, "grade_id": "cell-f985b4ecf2e3b4b1", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient. Initialized with zeros.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient. Initialized with zeros.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(1, L + 1):
v["dW" + str(l)] = np.zeros(parameters["W" + str(l)].shape)
v["db" + str(l)] = np.zeros(parameters["b" + str(l)].shape)
s["dW" + str(l)] = np.zeros(parameters["W" + str(l)].shape)
s["db" + str(l)] = np.zeros(parameters["b" + str(l)].shape)
return v, s
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "63c23c13e1cfb6e1c04b62541ea07cae", "grade": true, "grade_id": "cell-66f5f68aa23508d7", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
initialize_adam_test(initialize_adam)
# -
# <a name='ex-6'></a>
# ### Exercise 6 - update_parameters_with_adam
#
# Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
# v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
# s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
# \end{cases}$$
#
#
# **Note** that the iterator `l` starts at 1 in the `for` loop as the first parameters are $W^{[1]}$ and $b^{[1]}$.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "db9c45f6bfd6b1395c2662f6f127d12b", "grade": false, "grade_id": "cell-d72b0d5fd3ac5c42", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
t -- Adam variable, counts the number of taken steps
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(1, L + 1):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
v["dW" + str(l)] = beta1 * v["dW" + str(l)] + (1 - beta1) * grads['dW' + str(l)]
v["db" + str(l)] = beta1 * v["db" + str(l)] + (1 - beta1) * grads['db' + str(l)]
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
v_corrected["dW" + str(l)] = v["dW" + str(l)] / (1 - np.power(beta1, t))
v_corrected["db" + str(l)] = v["db" + str(l)] / (1 - np.power(beta1, t))
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
s["dW" + str(l)] = beta2 * s["dW" + str(l)] + (1 - beta2) * np.power(grads['dW' + str(l)], 2)
s["db" + str(l)] = beta2 * s["db" + str(l)] + (1 - beta2) * np.power(grads['db' + str(l)], 2)
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
s_corrected["dW" + str(l)] = s["dW" + str(l)] / (1 - np.power(beta2, t))
s_corrected["db" + str(l)] = s["db" + str(l)] / (1 - np.power(beta2, t))
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * (v_corrected["dW" + str(l)])/(np.sqrt((s_corrected["dW" + str(l)])) + epsilon)
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * (v_corrected["db" + str(l)])/(np.sqrt((s_corrected["db" + str(l)])) + epsilon)
# YOUR CODE ENDS HERE
return parameters, v, s, v_corrected, s_corrected
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "a3fe7eb303d8942f4e51a1f6afe587bb", "grade": true, "grade_id": "cell-c2a35a4cdbfa242c", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
parametersi, grads, vi, si = update_parameters_with_adam_test_case()
t = 2
learning_rate = 0.02
beta1 = 0.8
beta2 = 0.888
epsilon = 1e-2
parameters, v, s, vc, sc = update_parameters_with_adam(parametersi, grads, vi, si, t, learning_rate, beta1, beta2, epsilon)
print(f"W1 = \n{parameters['W1']}")
print(f"W2 = \n{parameters['W2']}")
print(f"b1 = \n{parameters['b1']}")
print(f"b2 = \n{parameters['b2']}")
update_parameters_with_adam_test(update_parameters_with_adam)
# -
# **Expected values:**
#
# ```
# W1 =
# [[ 1.63942428 -0.6268425 -0.54320974]
# [-1.08782943 0.85036983 -2.2865723 ]]
# W2 =
# [[ 0.33356139 -0.26425199 1.47707772]
# [-2.04538458 -0.30744933 -0.36903141]
# [ 1.14873036 -1.09256871 -0.15734651]]
# b1 =
# [[ 1.75854357]
# [-0.74616067]]
# b2 =
# [[-0.89228024]
# [ 0.02707193]
# [ 0.56782561]]
# ```
# You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
# <a name='6'></a>
# ## 6 - Model with different Optimization algorithms
#
# Below, you'll use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
train_X, train_Y = load_dataset()
# A 3-layer neural network has already been implemented for you! You'll train it with:
# - Mini-batch **Gradient Descent**: it will call your function:
# - `update_parameters_with_gd()`
# - Mini-batch **Momentum**: it will call your functions:
# - `initialize_velocity()` and `update_parameters_with_momentum()`
# - Mini-batch **Adam**: it will call your functions:
# - `initialize_adam()` and `update_parameters_with_adam()`
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 5000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s, _, _ = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# Now, run this 3 layer neural network with each of the 3 optimization methods.
#
# <a name='6-1'></a>
# ### 6.1 - Mini-Batch Gradient Descent
#
# Run the following code to see how the model does with mini-batch gradient descent.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# <a name='6-2'></a>
# ### 6.2 - Mini-Batch Gradient Descent with Momentum
#
# Next, run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small - but for more complex problems you might see bigger gains.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# <a name='6-3'></a>
# ### 6.3 - Mini-Batch with Adam
#
# Finally, run the following code to see how the model does with Adam.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# <a name='6-4'></a>
# ### 6.4 - Summary
#
# <table>
# <tr>
# <td>
# <b>optimization method</b>
# </td>
# <td>
# <b>accuracy</b>
# </td>
# <td>
# <b>cost shape</b>
# </td>
# </tr>
# <td>
# Gradient descent
# </td>
# <td>
# >71%
# </td>
# <td>
# smooth
# </td>
# <tr>
# <td>
# Momentum
# </td>
# <td>
# >71%
# </td>
# <td>
# smooth
# </td>
# </tr>
# <tr>
# <td>
# Adam
# </td>
# <td>
# >94%
# </td>
# <td>
# smoother
# </td>
# </tr>
# </table>
#
# Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligible.
#
# On the other hand, Adam clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
#
# Some advantages of Adam include:
#
# - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
# - Usually works well even with little tuning of hyperparameters (except $\alpha$)
# **References**:
#
# - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
# <a name='7'></a>
# ## 7 - Learning Rate Decay and Scheduling
#
# Lastly, the learning rate is another hyperparameter that can help you speed up learning.
#
# During the first part of training, your model can get away with taking large steps, but over time, using a fixed value for the learning rate alpha can cause your model to get stuck in a wide oscillation that never quite converges. But if you were to slowly reduce your learning rate alpha over time, you could then take smaller, slower steps that bring you closer to the minimum. This is the idea behind learning rate decay.
#
# Learning rate decay can be achieved by using either adaptive methods or pre-defined learning rate schedules.
#
# Now, you'll apply scheduled learning rate decay to a 3-layer neural network in three different optimizer modes and see how each one differs, as well as the effect of scheduling at different epochs.
#
# This model is essentially the same as the one you used before, except in this one you'll be able to include learning rate decay. It includes two new parameters, decay and decay_rate.
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 5000, print_cost = True, decay=None, decay_rate=1):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
lr_rates = []
learning_rate0 = learning_rate # the original learning rate
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s, _, _ = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
if decay:
learning_rate = decay(learning_rate0, i, decay_rate)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if decay:
print("learning rate after epoch %i: %f"%(i, learning_rate))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# <a name='7-1'></a>
# ### 7.1 - Decay on every iteration
#
# For this portion of the assignment, you'll try one of the pre-defined schedules for learning rate decay, called exponential learning rate decay. It takes this mathematical form:
#
# $$\alpha = \frac{1}{1 + decayRate \times epochNumber} \alpha_{0}$$
#
# <a name='ex-7'></a>
# ### Exercise 7 - update_lr
#
# Calculate the new learning rate using exponential weight decay.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "68d0f6e5b2a1a462ee981bf6c4ac6414", "grade": false, "grade_id": "cell-1f75dd71cfae785a", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: update_lr
def update_lr(learning_rate0, epoch_num, decay_rate):
"""
Calculates updated the learning rate using exponential weight decay.
Arguments:
learning_rate0 -- Original learning rate. Scalar
epoch_num -- Epoch number. Integer
decay_rate -- Decay rate. Scalar
Returns:
learning_rate -- Updated learning rate. Scalar
"""
learning_rate = 1/(1 + (decay_rate * epoch_num)) * learning_rate0
return learning_rate
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "e94cc1e45ead743ed2c013bea09a2170", "grade": true, "grade_id": "cell-84c8bdb20bc64216", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
learning_rate = 0.5
print("Original learning rate: ", learning_rate)
epoch_num = 2
decay_rate = 1
learning_rate_2 = update_lr(learning_rate, epoch_num, decay_rate)
print("Updated learning rate: ", learning_rate_2)
update_lr_test(update_lr)
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd", learning_rate = 0.1, num_epochs=5000, decay=update_lr)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# Notice that if you set the decay to occur at every iteration, the learning rate goes to zero too quickly - even if you start with a higher learning rate.
# <table>
# <tr>
# <td>
# <b>Epoch Number</b>
# </td>
# <td>
# <b>Learning Rate</b>
# </td>
# <td>
# <b>Cost</b>
# </td>
# </tr>
# <tr>
# <td>
# 0
# </td>
# <td>
# 0.100000
# </td>
# <td>
# 0.701091
# </td>
# </tr>
# <tr>
# <td>
# 1000
# </td>
# <td>
# 0.000100
# </td>
# <td>
# 0.661884
# </td>
# </tr>
# <tr>
# <td>
# 2000
# </td>
# <td>
# 0.000050
# </td>
# <td>
# 0.658620
# </td>
# </tr>
# <tr>
# <td>
# 3000
# </td>
# <td>
# 0.000033
# </td>
# <td>
# 0.656765
# </td>
# </tr>
# <tr>
# <td>
# 4000
# </td>
# <td>
# 0.000025
# </td>
# <td>
# 0.655486
# </td>
# </tr>
# <tr>
# <td>
# 5000
# </td>
# <td>
# 0.000020
# </td>
# <td>
# 0.654514
# </td>
# </tr>
# </table>
#
# When you're training for a few epoch this doesn't cause a lot of troubles, but when the number of epochs is large the optimization algorithm will stop updating. One common fix to this issue is to decay the learning rate every few steps. This is called fixed interval scheduling.
# <a name='7-2'></a>
# ### 7.2 - Fixed Interval Scheduling
#
# You can help prevent the learning rate speeding to zero too quickly by scheduling the exponential learning rate decay at a fixed time interval, for example 1000. You can either number the intervals, or divide the epoch by the time interval, which is the size of window with the constant learning rate.
#
# <img src="images/lr.png" style="width:400px;height:250px;">
# <a name='ex-8'></a>
# ### Exercise 8 - schedule_lr_decay
#
# Calculate the new learning rate using exponential weight decay with fixed interval scheduling.
#
# **Instructions**: Implement the learning rate scheduling such that it only changes when the epochNum is a multiple of the timeInterval.
#
# **Note:** The fraction in the denominator uses the floor operation.
#
# $$\alpha = \frac{1}{1 + decayRate \times \lfloor\frac{epochNum}{timeInterval}\rfloor} \alpha_{0}$$
#
# **Hint:** [numpy.floor](https://numpy.org/doc/stable/reference/generated/numpy.floor.html)
# + deletable=false nbgrader={"cell_type": "code", "checksum": "6684151ebcddc6e4aaad1040b9e3d80a", "grade": false, "grade_id": "cell-e5b733253d9006fc", "locked": false, "schema_version": 3, "solution": true, "task": false}
# GRADED FUNCTION: schedule_lr_decay
def schedule_lr_decay(learning_rate0, epoch_num, decay_rate, time_interval=1000):
"""
Calculates updated the learning rate using exponential weight decay.
Arguments:
learning_rate0 -- Original learning rate. Scalar
epoch_num -- Epoch number. Integer.
decay_rate -- Decay rate. Scalar.
time_interval -- Number of epochs where you update the learning rate.
Returns:
learning_rate -- Updated learning rate. Scalar
"""
learning_rate = 1/(1 + (decay_rate * np.floor(epoch_num/time_interval))) * learning_rate0
return learning_rate
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "221cccee108f8b2db6ff3c6c76ee3db9", "grade": true, "grade_id": "cell-03cd771ef9f3be85", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false}
learning_rate = 0.5
print("Original learning rate: ", learning_rate)
epoch_num_1 = 10
epoch_num_2 = 100
decay_rate = 0.3
time_interval = 100
learning_rate_1 = schedule_lr_decay(learning_rate, epoch_num_1, decay_rate, time_interval)
learning_rate_2 = schedule_lr_decay(learning_rate, epoch_num_2, decay_rate, time_interval)
print("Updated learning rate after {} epochs: ".format(epoch_num_1), learning_rate_1)
print("Updated learning rate after {} epochs: ".format(epoch_num_2), learning_rate_2)
schedule_lr_decay_test(schedule_lr_decay)
# -
# **Expected output**
# ```
# Original learning rate: 0.5
# Updated learning rate after 10 epochs: 0.5
# Updated learning rate after 100 epochs: 0.3846153846153846
# ```
# <a name='7-3'></a>
# ### 7.3 - Using Learning Rate Decay for each Optimization Method
#
# Below, you'll use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
# <a name='7-3-1'></a>
# #### 7.3.1 - Gradient Descent with Learning Rate Decay
#
# Run the following code to see how the model does gradient descent and weight decay.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd", learning_rate = 0.1, num_epochs=5000, decay=schedule_lr_decay)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# <a name='7-3-2'></a>
# #### 7.3.2 - Gradient Descent with Momentum and Learning Rate Decay
#
# Run the following code to see how the model does gradient descent with momentum and weight decay.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "momentum", learning_rate = 0.1, num_epochs=5000, decay=schedule_lr_decay)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent with momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# <a name='7-3-3'></a>
# #### 7.3.3 - Adam with Learning Rate Decay
#
# Run the following code to see how the model does Adam and weight decay.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam", learning_rate = 0.01, num_epochs=5000, decay=schedule_lr_decay)
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# <a name='7-4'></a>
# ### 7.4 - Achieving similar performance with different methods
#
# With Mini-batch GD or Mini-batch GD with Momentum, the accuracy is significantly lower than Adam, but when learning rate decay is added on top, either can achieve performance at a speed and accuracy score that's similar to Adam.
#
# In the case of Adam, notice that the learning curve achieves a similar accuracy but faster.
#
# <table>
# <tr>
# <td>
# <b>optimization method</b>
# </td>
# <td>
# <b>accuracy</b>
# </td>
# </tr>
# <td>
# Gradient descent
# </td>
# <td>
# >94.6%
# </td>
# <tr>
# <td>
# Momentum
# </td>
# <td>
# >95.6%
# </td>
# </tr>
# <tr>
# <td>
# Adam
# </td>
# <td>
# 94%
# </td>
# </tr>
# </table>
# **Congratulations**! You've made it to the end of the Optimization methods notebook. Here's a quick recap of everything you're now able to do:
#
# * Apply three different optimization methods to your models
# * Build mini-batches for your training set
# * Use learning rate decay scheduling to speed up your training
#
# Great work!
| Hyperparameter Tuning, Regularization and Optimization/Week 2/Optimization_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CNN study case: train on Cifar10.
#
# On this notebook we will cover the training of a simple model on the CIFAR 10 dataset, and we will cover the next topics:
# - Cifar10 dataset
# - Model architecture:
# - 2D Convolutional layers
# - MaxPooling
# - Relu activation
# - Batch normalization
# - Image generator data augmentation
# - TTA (Test time augmentation)
# ### The dataset
# The dataset is composed by 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. These are the image classes:
# - airplane
# - automobile
# - bird
# - cat
# - deer
# - dog
# - frog
# - horse
# - ship
# - truck
# +
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.keras.backend.clear_session()
from tensorflow.keras.datasets.cifar10 import load_data
from tensorflow.keras.utils import to_categorical
(trainX, trainY), (testX, testY) = load_data()
# normalize pixel values
trainX = trainX.astype('float32') / 255
testX = testX.astype('float32') / 255
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
trainX[0].shape
# -
# <font color=red><b>Plot some examples of the dataset.
# <br>Hint: use the imshow function of the pyplot package</b>
# </font>
from matplotlib import pyplot as plt
# %matplotlib inline
for j in range (10):
...
plt.show()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import BatchNormalization
from numpy import mean
from numpy import std
# ## Model Architecture
# Build the CNN model to be trained train on the data, on this config:
# - Conv2d layer, with 32 units and 3x3 filter, with relu activation and padding of "same" type. Use the "he_uniform" initializer.
# - batchnorm
# - max pooling (2x2)
# - Conv2d layer, with 64 units and 3x3 filter, with relu activation and padding of "same" type. Use the "he_uniform" initializer.
# - batchnorm
# - max pooling (2x2)
# - Dense layer, with 128 units
# - Dense softmax layer
# - On compilation, use adam as the optimizer and categorical_crossentropy as the loss function. Add 'accuracy' as a metric
# - Print the summary
#
#
# <font color=red><b>Remember to initialize it propperly and to include input_shape on the first layer. <br> Hint:
# - Use the imported libraries</b></font>
def define_model():
# define model
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer='he_uniform', input_shape=(32, 32, 3)))
...
model.add(Dense(10, activation='softmax'))
# compile model
...
return model
# We are going to train small steps of our model in order to evaluate the hyperparameters and the strategy. In order to do that, we will define a step epochs number and will train and evaluate the model for that amount of epochs. after a number of repeats we will reduce the effect random initialization of certain parameters.
#
# <font color=red>Evaluate the built model by training 10 times on different initializations<b> Hint: we would like to have some parameters of the score distribution, like the ones imported </b></font>
# +
step_epochs = 3
batch_size = 128
def evaluate_model(model, trainX, trainY, testX, testY):
# fit model
model.fit(trainX, trainY, epochs=step_epochs, batch_size=batch_size, verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
return acc
def evaluate(trainX, trainY, testX, testY, repeats=10):
scores = list()
for _ in range(repeats):
# define model
model = define_model()
# fit and evaluate model
accuracy = evaluate_model(model, trainX, trainY, testX, testY)
# store score
scores.append(accuracy)
print('> %.3f' % accuracy)
return scores
# evaluate model
scores = ...
# summarize result
...
# -
# ### Keras Image data generator
# In order to perform some data augmentation, Keras includes the Image data generator, which can be used to improve performance and reduce generalization error when training neural network models for computer vision problems.
# A range of techniques are supported, as well as pixel scaling methods. Some of the most commn are:
#
# - Image shifts via the width_shift_range and height_shift_range arguments.
# - Image flips via the horizontal_flip and vertical_flip arguments.
# - Image rotations via the rotation_range argument
# - Image brightness via the brightness_range argument.
# - Image zoom via the zoom_range argument.
#
#
# Let's see it with an example:
#
# +
# expand dimension to one sample
from numpy import expand_dims
from tensorflow.keras.preprocessing.image import ImageDataGenerator
data = trainX[0]*255
samples = expand_dims(data, 0)
# create image data augmentation generator
datagen = ImageDataGenerator(horizontal_flip=True,
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2)
# prepare iterator
it = datagen.flow(samples, batch_size=1)
# generate samples and plot
for i in range(9):
# define subplot
plt.subplot(330 + 1 + i)
# generate batch of images
batch = it.next()
# convert to unsigned integers for viewing
image = batch[0].astype('uint8')
# plot raw pixel data
plt.imshow(image)
# show the figure
plt.show()
# -
batch.shape
# <font color=red>Evaluate the model with data augmentation <br> Hint: Use the ?model.fit_generator command and please take into acount the parameters of the model.fit_generator: It needs to include: epochs, steps_per_epoch and a generator (i.e: a flow of images). </font>
# +
# fit and evaluate a defined model
def evaluate_model_increased(model, trainX, trainY, testX, testY):
datagen = ImageDataGenerator(horizontal_flip=True)
# in case there is mean/std to normalize
datagen.fit(trainX)
# Fit the model on the batches generated by datagen.flow().
generator = datagen.flow(trainX, trainY,
batch_size=batch_size)
model.fit_generator(...
verbose = 0)
# evaluate model
...
return acc
# repeatedly evaluate model, return distribution of scores
def repeated_evaluation_increased(trainX, trainY, testX, testY, repeats=10):
scores = list()
for _ in range(repeats):
# define model
...
# fit and evaluate model
...
# store score
...
return scores
# evaluate model
scores = repeated_evaluation_increased(trainX, trainY, testX, testY)
# summarize result
...
# -
# ### Test Time augmentation (TTA)
#
# The image data augmentation technique can also be applied when making predictions with a fit model in order to allow the model to make predictions for multiple different versions of each image in the test dataset. Specifically, it involves creating multiple augmented copies of each image in the test set, having the model make a prediction for each, then returning an ensemble of those predictions.(e.g: majority voting in case of classification)
#
# Augmentations are chosen to give the model the best opportunity for correctly classifying a given image, and the number of copies of an image for which a model must make a prediction is often small, such as less than 10 or 20. Often, a single simple test-time augmentation is performed, such as a shift, crop, or image flip.
# <font color=red>Evaluate the model with data augmentation. <b>Please note on this case we are not going to use the generateor on training, but on testing.</b> <br> Hint: Use the model.predict_generator function </font>
# +
# make a prediction using test-time augmentation
def prediction_augmented_on_test(datagen, model, image, n_examples):
# convert image into dataset
samples = expand_dims(image, 0)
# prepare iterator
it = datagen.flow(samples, batch_size=n_examples)
# make predictions for each augmented image and return the maj voted class
...
# evaluate a model on a dataset using test-time augmentation
def evaluate_model_test_time_agumented(model, testX, testY):
# configure image data augmentation
datagen = ImageDataGenerator(horizontal_flip=True)
# define the number of augmented images to generate per test set image
yhats = list()
for i in range(len(testX)):
# augment prediction per test image
# calculate accuracy
testY_labels = ...
acc = accuracy_score(testY_labels, yhats)
return acc
def evaluate_model_test_augmented(model, trainX, trainY, testX, testY):
# fit model wih no generator
...
# evaluate model
acc = ...
return acc
def evaluate_test_augmented(trainX, trainY, testX, testY, repeats=10):
scores = list()
for _ in range(repeats):
# define model
model = define_model()
# fit and evaluate model
...
# store score
...
return scores
# evaluate model
...
# -
| training/CIFAR10-Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/amnbg/Linear-Algebra-58020/blob/main/Matrix_Algebra.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="3U4K4i3FQaD8"
# ##Python program to inverse
# + colab={"base_uri": "https://localhost:8080/"} id="mvZfuJfpWW2j" outputId="c3b0af12-80fa-4196-941c-61737dd639c9"
import numpy as np
A = np.array([[1,3],[2,7]])
print(A)
invA = (np.linalg.inv(A))
print(invA)
# + colab={"base_uri": "https://localhost:8080/"} id="os_yjSn2W4uD" outputId="8e46d3a4-302f-4cc0-d26e-f90ac81e9182"
C = np.dot(A,invA)
print (C)
# + colab={"base_uri": "https://localhost:8080/"} id="s5PxmMBmXf6j" outputId="70441036-29c5-4592-be76-801581fbf985"
##Python Programming to Transpose a 3x3 Matrix A =([7,2,3],[5,-3,2],[3,9,6])
A = np.array([[7,2,3],[5,-3,2],[3,9,6]])
print(A)
B = np.transpose(A)
print(B)
# + colab={"base_uri": "https://localhost:8080/"} id="yPOc8JyOaDrb" outputId="6d37bc49-78bf-4236-8e1e-02f8b4537641"
##Python Programming to Inverse a 3x3 Matrix A =([7,2,3],[5,-3,2],[3,9,6])
A = np.array([[7,2,3],[5,-3,2],[3,9,6]])
print(A)
invA = (np.linalg.inv(A))
print(invA)
| Matrix_Algebra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# %matplotlib inline
# I had to change the instructor's from pandas.io.data import DataReader to the following:
from pandas_datareader import DataReader
#It still gave me a future warning, but I think I'll still be able to do the exercises
from datetime import datetime
# The tech stocks we'll use for this analysis
tech_list = ['AAPL','GOOG','MSFT','AMZN']
# Set up End and Start times for data grab
end = datetime.now()
start = datetime(end.year - 1,end.month,end.day)
# +
#For loop for grabing yahoo finance data and setting as a dataframe
for stock in tech_list:
# Set DataFrame as the Stock Ticker
globals()[stock] = DataReader(stock,'yahoo',start,end)
#Using globals() sets the stock name as the dataframe name in this case - instructor's shortcut
# -
AAPL.describe()
#Now using the stock name is the dataframe name and we can do the usual things like describe() for math specs
AAPL.info()
#info() can be used for column and row specs
#Using indexing, we can choose what we want to plot with .plot as shown below:
AAPL['Adj Close'].plot(legend=True, figsize=(10,4))
#The legend usually auto populates for me, but the figsize=() was awesome to learn to make my visualizations scale better.
#I applied it to a few other items with good results, but I'm still getting stuck when trying to figure out which
# items should be graphed and how.
AAPL['Volume'].plot(legend=True, figsize=(12,5))
# Here we just keep picking different columns to index with the stock name and the .plot with the figsize to make it readable.
# +
#pandas has a built-in rolling mean calculator
#this allows us to calculate MA (moving averages) which is nothing more than the mean of specific numbers over time.
ma_day = [10,20,50]
for ma in ma_day:
column_name = "MA for %s days" %(str(ma))
#The instructor's line of AAPL[column_name]=pd.rolling_mean(AAPL['Adj Close'],ma) had to be changed as shown below:
#This was very hard to find online and I had to infer it from a solution given to someone on stackoverflow
AAPL[column_name]=AAPL['Adj Close'].rolling(ma).mean()
# -
AAPL.drop(columns='MA for 100 days')
AAPL[['Adj Close', 'MA for 10 days', 'MA for 20 days', 'MA for 50 days']].plot(subplots=False,figsize=(10,5) )
# +
#I forgot the () at the end of the "rolling.mean()" solution and it wouldn't show all of the moving averages on the viz
# -
| Stock data exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Causal Impact
#
#
#
# <NAME>
#
# https://github.com/WillianFuks/tfcausalimpact
#
# [LinkedIn](https://www.linkedin.com/in/willian-fuks-62622217/)
#
# + [markdown] slideshow={"slide_type": "slide"}
# https://github.com/WillianFuks/pyDataSP-tfcausalimpact
#
#
# ```sh
# git clone <EMAIL>:WillianFuks/pyDataSP-tfcausalimpact.git
# # # cd pyDataSP-tfcausalimpact/
# python3.9 -m venv .env
# source .env/bin/activate
# pip install -r requirements.txt
#
# .env/bin/jupyter notebook
# ```
# + slideshow={"slide_type": "slide"}
import daft
import os
import collections
os. environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import matplotlib.pyplot as plt
from matplotlib import rc
from IPython.display import HTML
import tensorflow as tf
import tensorflow_probability as tfp
import seaborn as sns
import pandas as pd
import numpy as np
# Attempts to disable TF warnings
tf.get_logger().setLevel('ERROR')
tf.autograph.set_verbosity(tf.compat.v1.logging.ERROR)
import logging
tf.get_logger().setLevel(logging.ERROR)
# TFP namespaces
tfd = tfp.distributions
tfb = tfp.bijectors
# Remove prompt from notebook css
HTML(open('styles/custom.css').read())
# + [markdown] slideshow={"slide_type": "slide"}
# ## Here's our final destination:
#
# <center><img src="./imgs/tfcausal_plot_original_example.png"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Our Journey:
#
# 1. Causality
#
# 2. Bayesian Time Series
#
# 3. Causal Impact
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Causality Is Simple!
# + slideshow={"slide_type": "fragment"}
rc("font", family="serif", size=12)
rc("text", usetex=False)
pgm = daft.PGM(grid_unit=4.0, node_unit=2.5)
rect_params = {"lw": 2}
edge_params = {
'linewidth': 1,
'head_width': .8
}
pgm.add_node("rain", r"$Rain$", 0.5, 1.5, scale=1.5, fontsize=24)
pgm.add_node("wet", r"$Wet$", 2.5 + 0.2, 1.5, scale=1.5, fontsize=24)
pgm.add_edge("rain", "wet", plot_params=edge_params)
pgm.render();
# + [markdown] slideshow={"slide_type": "slide"}
# ## Until It's Not...
# + slideshow={"slide_type": "fragment"}
pgm = daft.PGM(grid_unit=4.0, node_unit=4.5)
pgm.add_node("fatigue", r"$Fatigue Train$", 0.5, 1.5, scale=1.5, fontsize=24)
pgm.add_node("perf", r"$Performance$", 2.5 + 0.2, 1.5, scale=1.5, fontsize=24)
pgm.add_edge("fatigue", "perf", plot_params=edge_params)
pgm.render();
# + slideshow={"slide_type": "slide"}
pgm = daft.PGM(grid_unit=4.0, node_unit=2.5)
pgm.add_node("diet", r"$Diet$", 0.5, 3, scale=1.5, fontsize=24)
pgm.add_node("rest", r"$Rest$", 0.5, 1.5, scale=1.5, fontsize=24)
pgm.add_node("volume", r"$Volume$", 0.5, 0, scale=1.5, fontsize=24)
pgm.add_node("fatigue", r"$Fatigue$", 0.5, -1.5, scale=1.5, fontsize=24)
pgm.add_node("perf", r"$Performance$", 2.5 + 0.2, 1.5, scale=2.5, fontsize=24)
pgm.add_edge("diet", "perf", plot_params=edge_params)
pgm.add_edge("volume", "perf", plot_params=edge_params)
pgm.add_edge("rest", "perf", plot_params=edge_params)
pgm.add_edge("fatigue", "perf", plot_params=edge_params)
# pgm.add_edge("diet", "rest", plot_params=edge_params)
# pgm.add_edge("rest", "diet", plot_params=edge_params)
# pgm.add_edge("diet", "volume", plot_params=edge_params)
# pgm.add_edge("volume", "diet", plot_params=edge_params)
# pgm.add_edge("diet", "fatigue", plot_params=edge_params)
# pgm.add_edge("fatigue", "diet", plot_params=edge_params)
pgm.render();
# + [markdown] slideshow={"slide_type": "fragment"}
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## So How To Compute Causality?!
# + [markdown] slideshow={"slide_type": "fragment"}
#
# ### Correlations?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Let's Explore The Idea
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Tensorflow Probability
# + [markdown] slideshow={"slide_type": "fragment"}
# (Random Variables)
# + slideshow={"slide_type": "slide"}
import os
import tensorflow as tf
import tensorflow_probability as tfp
# tf.get_logger().setLevel('INFO')
# os. environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.autograph.set_verbosity(1)
tfd = tfp.distributions
tfb = tfp.bijectors
# + slideshow={"slide_type": "slide"}
X = tfd.Normal(loc=2, scale=1)
sns.displot(X.sample(1000), kde=True);
# + slideshow={"slide_type": "fragment"}
b_X = tfd.TransformedDistribution(
tfd.Normal(loc=2, scale=1),
bijector=tfb.Exp()
)
sns.displot(b_X.sample(1000), kde=True);
# + slideshow={"slide_type": "fragment"}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Suppose This (Simplified) Structure
# + slideshow={"slide_type": "fragment"}
pgm = daft.PGM(grid_unit=4.0, node_unit=2.5)
pgm.add_node("diet", r"$Diet$", 0., 0., scale=1.5, fontsize=24)
pgm.add_node("volume", r"$Volume$", 1.25, 0, scale=1.5, fontsize=24)
pgm.add_node("perf", r"$Performance$", 3., 0., scale=2.5, fontsize=24)
pgm.add_edge("diet", "volume", plot_params=edge_params)
pgm.add_edge("volume", "perf", plot_params=edge_params)
pgm.render();
# + [markdown] slideshow={"slide_type": "slide"}
# ## And Respective Data
# + slideshow={"slide_type": "fragment"}
dist = tfd.JointDistributionNamed(
{
'diet': tfd.Normal(loc=3, scale=1),
'volume': lambda diet: tfd.Normal(diet * 2, scale=0.5),
'performance': lambda volume: tfd.Normal(volume * 1.3, scale=0.3)
}
)
data = dist.sample(3000)
data = pd.DataFrame(data, columns=['performance', 'diet', 'volume'])
# data.set_index(pd.date_range(start='20200101', periods=len(data)), inplace=True)
# data
# + [markdown] slideshow={"slide_type": "slide"}
# ## Linear Regression Keras
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ performance = W\cdot[x_{diet}, x_{volume}]^T$$
# + slideshow={"slide_type": "fragment"}
import tensorflow as tf
linear_model = tf.keras.Sequential([
tf.keras.layers.Dense(units=1, use_bias=False)
])
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss=tf.keras.losses.MeanSquaredError()
)
linear_model.fit(
data[['diet']],
data['performance'],
epochs=100,
verbose=0,
)
w = linear_model.get_weights()
print(f"Linear Relationship is: {w[0][0][0]:.2f}")
# + slideshow={"slide_type": "slide"}
linear_model = tf.keras.Sequential([
tf.keras.layers.Dense(units=1, use_bias=False)
])
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss=tf.keras.losses.MeanSquaredError()
)
linear_model.fit(
data[['volume']],
data['performance'],
epochs=100,
verbose=0,
)
w = linear_model.get_weights()
print(f"Linear Relationship is: {w[0][0][0]:.2f}")
# + slideshow={"slide_type": "slide"}
linear_model = tf.keras.Sequential([
tf.keras.layers.Dense(units=1, use_bias=False)
])
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss=tf.keras.losses.MeanSquaredError()
)
linear_model.fit(
data[['diet', 'volume']],
data['performance'],
epochs=100,
verbose=0,
)
w = linear_model.get_weights()
print(f"Linear Relationship is: {w[0]}")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bayesian Linear Regression
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Recipe!
# + [markdown] slideshow={"slide_type": "slide"}
# $$\begin{equation} \label{eq1}
# \begin{split}
# P(A|B) & = \frac{P(B|A)P(A)}{P(B)} = \frac{P(A, B)}{P(B)}
# \end{split}
# \end{equation}
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# $$\begin{equation} \label{eq1}
# \begin{split}
# P(\theta|D) & = \frac{P(\theta, D)}{P(D)}
# \end{split}
# \end{equation}
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Step 1: Priors
# + slideshow={"slide_type": "fragment"}
pgm = daft.PGM(grid_unit=4.0, node_unit=2.5)
pgm.add_node("diet", r"$w_{diet}$", 0., 0., scale=1.5, fontsize=24)
pgm.add_node("volume", r"$w_{volume}$", 1.25, 0, scale=1.5, fontsize=24)
pgm.add_node("sigma", r"$\sigma^2$", 2.5, 0, scale=1.5, fontsize=24)
pgm.add_node("perf", r"$Performance$", 1.25, -2, scale=2.25, fontsize=24)
pgm.add_edge("diet", "perf", plot_params=edge_params)
pgm.add_edge("volume", "perf", plot_params=edge_params)
pgm.add_edge("sigma", "perf", plot_params=edge_params)
pgm.render();
# + [markdown] slideshow={"slide_type": "fragment"}
# \begin{equation} \label{eq1}
# \begin{split}
# w_{diet} & \sim N(3, 5) \\
# w_{volume} & \sim N(3, 5) \\
# \sigma^2 & \sim Exp(2) \\
# performance & \sim N(w_{diet} \cdot x_{diet} + w_{volume} \cdot x_{volume}, \sigma^2)
# \end{split}
# \end{equation}
# + slideshow={"slide_type": "fragment"}
joint_dist = tfd.JointDistributionNamedAutoBatched(dict(
sigma=tfd.Exponential(2),
w_diet=tfd.Normal(loc=3, scale=5),
w_volume=tfd.Normal(loc=3, scale=5),
performance=lambda sigma, w_diet, w_volume: tfd.Normal(loc=data['diet'].values * w_diet + data['volume'].values * w_volume, scale=sigma)
))
# + slideshow={"slide_type": "fragment"}
prior_samples = joint_dist.sample(500)
nrows = 3
labels = ['sigma', 'w_diet', 'w_volume']
fig, axes = plt.subplots(nrows=nrows, ncols=1, figsize=(10, 8))
for i in range(nrows):
sns.histplot(prior_samples[labels[i]], kde=True, ax=axes[i], label=labels[i]);
axes[i].legend();
# + [markdown] slideshow={"slide_type": "slide"}
# # Step 2: Joint Distribution
#
# $$P(\theta, X)$$
# + slideshow={"slide_type": "slide"}
def target_log_prob_fn(sigma, w_diet, w_volume):
return joint_dist.log_prob(sigma=sigma, w_diet=w_diet, w_volume=w_volume, performance=data['performance'].values)
# + [markdown] slideshow={"slide_type": "slide"}
# # Step 3: MCMC
# + slideshow={"slide_type": "slide"}
num_results = int(1e4)
num_burnin_steps = int(1e3)
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.3,
num_leapfrog_steps=3
)
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel,
bijector=[tfb.Exp(), tfb.Identity(), tfb.Identity()]
)
kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
inner_kernel=kernel,
num_adaptation_steps=int(num_burnin_steps * 0.8)
)
# + slideshow={"slide_type": "slide"}
@tf.function(autograph=False)
def sample_chain():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
kernel=kernel,
current_state=[
tf.constant(0.5, dtype=tf.float32),
tf.constant(0.3, dtype=tf.float32),
tf.constant(0.2, dtype=tf.float32)
],
trace_fn=lambda _, pkr: [pkr.inner_results.inner_results.accepted_results.step_size,
pkr.inner_results.inner_results.log_accept_ratio]
)
# + slideshow={"slide_type": "slide"}
samples, [step_size, log_accept_ratio] = sample_chain()
# + slideshow={"slide_type": "slide"}
p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))
p_accept
# + slideshow={"slide_type": "slide"}
nrows = 3
labels = ['sigma', 'w_diet', 'w_volume']
fig, axes = plt.subplots(nrows=nrows, ncols=2, figsize=(10, 8))
axes[0][0].set_title('Prior')
axes[0][1].set_title('Posterior')
for i in range(nrows):
sns.histplot(prior_samples[labels[i]], kde=True, ax=axes[i][0], label=labels[i]);
sns.histplot(samples[i], kde=True, ax=axes[i][1], label=labels[i]);
axes[i][0].legend();
axes[i][1].legend();
plt.tight_layout()
# + slideshow={"slide_type": "fragment"}
sigmas, w_diets, w_volumes = samples
# + slideshow={"slide_type": "fragment"}
performance_estimated = (
tf.linalg.matmul(data['diet'].values[..., tf.newaxis], w_diets[..., tf.newaxis], transpose_b=True) +
tf.linalg.matmul(data['volume'].values[..., tf.newaxis], w_volumes[..., tf.newaxis], transpose_b=True)
)
# + slideshow={"slide_type": "fragment"}
quanties_performance = tf.transpose(tfp.stats.percentile(
performance_estimated, [2.5, 97.5], axis=1, interpolation=None, keepdims=False,
))
# + slideshow={"slide_type": "fragment"}
mean_y = tf.math.reduce_mean(performance_estimated, axis=1)
mean_y
# + slideshow={"slide_type": "fragment"}
std_y = tf.math.reduce_std(performance_estimated, axis=1)
std_y
# + slideshow={"slide_type": "fragment"}
fig, ax = plt.subplots(figsize=(9, 8))
ax.errorbar(
x=data['performance'],
y=mean_y,
yerr=2*std_y,
fmt='o',
capsize=2,
label='predictions +/- CI'
)
sns.regplot(
x=data['performance'],
y=mean_y,
scatter=False,
line_kws=dict(alpha=0.5),
label='performance / predicted performance',
truncate=False,
ax=ax
);
ax.set(ylabel='predicted performance');
plt.legend();
# + [markdown] slideshow={"slide_type": "slide"}
# When we fit `diet` and `volume` together
# + [markdown] slideshow={"slide_type": "fragment"}
# `diet` seems to lose causality!!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Interesting Problem
# + slideshow={"slide_type": "fragment"}
pgm = daft.PGM(grid_unit=4.0, node_unit=2.5)
pgm.add_node("diet", r"$Diet$", 0.5, 3, scale=1.5, fontsize=24)
pgm.add_node("rest", r"$Rest$", 0.5, 1.5, scale=1.5, fontsize=24)
pgm.add_node("volume", r"$Volume$", 0.5, 0, scale=1.5, fontsize=24)
pgm.add_node("fatigue", r"$Fatigue$", 0.5, -1.5, scale=1.5, fontsize=24)
pgm.add_node("question", r"$?$", 2.5 + 0.2, 1.5, scale=1.5, fontsize=24)
pgm.add_node("perf", r"$Performance$", 5, 1.5, scale=2.5, fontsize=24)
pgm.add_edge("diet", "question", plot_params=edge_params)
pgm.add_edge("volume", "question", plot_params=edge_params)
pgm.add_edge("rest", "question", plot_params=edge_params)
pgm.add_edge("fatigue", "question", plot_params=edge_params)
pgm.add_edge("question", "perf", plot_params=edge_params)
pgm.render();
# + slideshow={"slide_type": "fragment"}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Correlations won't work
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Better Solution?
# + [markdown] slideshow={"slide_type": "fragment"}
#
# ## A/B Test!
# + [markdown] slideshow={"slide_type": "fragment"}
# <center><img src="imgs/ab.png" /></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Not so fast...
# + [markdown] slideshow={"slide_type": "slide"}
# <center><img src='imgs/Store-WP.png'/></center>
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Control Group Fail...
# + [markdown] slideshow={"slide_type": "slide"}
# ## Solution: Quasi Experiments
# + [markdown] slideshow={"slide_type": "slide"}
# <center><img src="imgs/ci.png"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Structural Time Series
# + [markdown] slideshow={"slide_type": "fragment"}
# <center><img src="imgs/stsequation.png" /></center>
# + [markdown] slideshow={"slide_type": "fragment"}
# <center><img src="imgs/stsgraph.png" /></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Important thing is: Structures
# + [markdown] slideshow={"slide_type": "fragment"}
# - [AutoRegressive](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/autoregressive.py#L258)
# - [DynamicRegression](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/dynamic_regression.py#L230)
# - [LocalLevel](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/local_level.py#L254)
# - [Seasonal](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/seasonal.py#L688)
# - [LocalLinearTrend](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/local_linear_trend.py#L222)
# - [SemiLocalLinearTrend](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/semilocal_linear_trend.py#L294)
# - [SmoothSeasonal](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/smooth_seasonal.py#L321)
# - [Regression](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/regression.py#L51)
# - [SparseLinearRegression](https://github.com/tensorflow/probability/blob/v0.11.1/tensorflow_probability/python/sts/regression.py#L264)
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Local Level
# + [markdown] slideshow={"slide_type": "fragment"}
# $$\mu_t = \mu_{t-1} + Normal(0, \sigma^2_{\mu})$$
# + slideshow={"slide_type": "fragment"}
local_level_model = tfp.sts.LocalLevelStateSpaceModel(
num_timesteps=20,
level_scale=.1,
initial_state_prior=tfd.MultivariateNormalDiag(scale_diag=[1.])
)
s = local_level_model.sample(1)
plt.plot(tf.squeeze(s));
# + slideshow={"slide_type": "fragment"}
local_level_model.log_prob(s)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Local And Regression (Model Fit)
# + slideshow={"slide_type": "fragment"}
data = pd.read_csv('https://raw.githubusercontent.com/WillianFuks/tfcausalimpact/master/tests/fixtures/arma_data.csv', dtype=np.float32)[['y', 'X']]
data.iloc[70:, 0] += 5
data.plot()
plt.axvline(70, 0, 130, linestyle='--', color='r', linewidth=0.85);
y = tf.cast(data['y'].values[:70], tf.float32)
# + slideshow={"slide_type": "fragment"}
local_level = tfp.sts.LocalLevel(
observed_time_series=y
)
# + slideshow={"slide_type": "fragment"}
regression = tfp.sts.LinearRegression(
design_matrix=tf.cast(data['X'].values[..., tf.newaxis], tf.float32)
)
# + slideshow={"slide_type": "fragment"}
model = tfp.sts.Sum([local_level, regression], observed_time_series=y)
# + slideshow={"slide_type": "fragment"}
samples, _ = tfp.sts.fit_with_hmc(model, y)
# + slideshow={"slide_type": "fragment"}
one_step_predictive_dist = tfp.sts.one_step_predictive(model, observed_time_series=y, parameter_samples=samples)
# + slideshow={"slide_type": "fragment"}
predictive_means = one_step_predictive_dist.mean()
predictive_means
# + slideshow={"slide_type": "fragment"}
predictive_scales = one_step_predictive_dist.stddev()
predictive_scales
# + slideshow={"slide_type": "fragment"}
plt.figure(figsize=(10, 9))
color = (1.0, 0.4981, 0.0549)
plt.plot(y, label='y', color='k')
# plt.plot(predictive_means[1:], color=color, label='predictive mean')
plt.fill_between(
np.arange(1, 70),
predictive_means[1:70] - predictive_scales[1:70],
predictive_means[1:70] + predictive_scales[1:70],
alpha=0.4,
color=color,
label='predictive std'
)
plt.legend();
# + slideshow={"slide_type": "fragment"}
forecast_dist = tfp.sts.forecast(model, observed_time_series=y, parameter_samples=samples, num_steps_forecast=30)
# + slideshow={"slide_type": "fragment"}
forecast_means = tf.squeeze(forecast_dist.mean())
forecast_scales = tf.squeeze(forecast_dist.stddev())
# + slideshow={"slide_type": "fragment"}
plt.figure(figsize=(10, 9))
plt.plot(data['y'], label='y', color='k')
plt.fill_between(
np.arange(1, 70),
predictive_means[1:] - predictive_scales[1:],
predictive_means[1:] + predictive_scales[1:],
alpha=0.4,
color=color,
label='predictive std'
)
plt.plot(np.arange(70, 100), forecast_means, color='r', label='mean forecast')
plt.fill_between(
np.arange(70, 100),
forecast_means - forecast_scales,
forecast_means + forecast_scales,
alpha=0.4,
color='red',
label='forecast std'
)
plt.legend();
# + [markdown] slideshow={"slide_type": "slide"}
# ## How to obtain causal impact?
# + [markdown] slideshow={"slide_type": "fragment"}
# ### tfcausalimpact
# + slideshow={"slide_type": "fragment"}
# !pip install tfcausalimpact > /dev/null
# + slideshow={"slide_type": "fragment"}
from causalimpact import CausalImpact
data = pd.read_csv('https://raw.githubusercontent.com/WillianFuks/tfcausalimpact/master/tests/fixtures/arma_data.csv')[['y', 'X']]
data.iloc[70:, 0] += 5
pre_period = [0, 69]
post_period = [70, 99]
ci = CausalImpact(data, pre_period, post_period)
# + slideshow={"slide_type": "fragment"}
print(ci.summary())
# + slideshow={"slide_type": "fragment"}
print(ci.summary(output='report'))
# + slideshow={"slide_type": "fragment"}
ci.plot(figsize=(15, 15))
# + slideshow={"slide_type": "fragment"}
ci.model.components_by_name
# + slideshow={"slide_type": "fragment"}
ci.model_samples
# + slideshow={"slide_type": "slide"}
# https://www.tensorflow.org/probability/examples/Structural_Time_Series_Modeling_Case_Studies_Atmospheric_CO2_and_Electricity_Demand
component_dists = tfp.sts.decompose_by_component(
ci.model,
observed_time_series=y,
parameter_samples=ci.model_samples
)
component_means, component_stddevs = (
{k.name: c.mean() for k, c in component_dists.items()},
{k.name: c.stddev() for k, c in component_dists.items()}
)
def plot_components(dates,
component_means_dict,
component_stddevs_dict):
x_locator, x_formatter = None, None
colors = sns.color_palette()
c1, c2 = colors[0], colors[1]
axes_dict = collections.OrderedDict()
num_components = len(component_means_dict)
fig = plt.figure(figsize=(12, 2.5 * num_components))
for i, component_name in enumerate(component_means_dict.keys()):
component_mean = component_means_dict[component_name]
component_stddev = component_stddevs_dict[component_name]
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(dates, component_mean, lw=2)
ax.fill_between(dates, component_mean-2*component_stddev,
component_mean+2*component_stddev,
color=c2, alpha=0.5)
ax.set_title(component_name)
if x_locator is not None:
ax.xaxis.set_major_locator(x_locator)
ax.xaxis.set_major_formatter(x_formatter)
axes_dict[component_name] = ax
# fig.autofmt_xdate()
fig.tight_layout()
plot_components(np.arange(0, 70), component_means, component_stddevs);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Real Example: Bitcoin
# + slideshow={"slide_type": "fragment"}
# ! pip install pandas-datareader > /dev/null
# + slideshow={"slide_type": "fragment"}
import datetime
import pandas_datareader as pdr
btc_data = pdr.get_data_yahoo(['BTC-USD'],
start=datetime.datetime(2018, 1, 1),
end=datetime.datetime(2020, 12, 3))['Close']
btc_data = btc_data.reset_index().drop_duplicates(subset='Date', keep='last').set_index('Date').sort_index()
btc_data = btc_data.resample('D').fillna('nearest')
X_data = pdr.get_data_yahoo(['TWTR', 'GOOGL', 'AAPL', 'MSFT', 'AMZN', 'FB', 'GOLD'],
start=datetime.datetime(2018, 1, 1),
end=datetime.datetime(2020, 12, 2))['Close']
X_data = X_data.reset_index().drop_duplicates(subset='Date', keep='last').set_index('Date').sort_index()
X_data = X_data.resample('D').fillna('nearest')
data = pd.concat([btc_data, X_data], axis=1)
data.dropna(inplace=True)
data = data.resample('W-Wed').last() # Weekly is easier to process. We select Wednesday so 2020-10-21 is available.
data = data.astype(np.float32)
np.log(data).plot(figsize=(15, 12))
plt.axvline('2020-10-14', 0, np.max(data['BTC-USD']), lw=2, ls='--', c='red', label='PayPal Impact')
plt.legend(loc='upper left');
# + slideshow={"slide_type": "fragment"}
pre_period=['20180103', '20201014']
post_period=['20201021', '20201125']
ci = CausalImpact(data, pre_period, post_period, model_args={'fit_method': 'vi'})
# + slideshow={"slide_type": "fragment"}
print(ci.summary())
# + slideshow={"slide_type": "fragment"}
ci.plot(figsize=(15, 15))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tips
# + [markdown] slideshow={"slide_type": "slide"}
# ## Q - How select covariates?
# + [markdown] slideshow={"slide_type": "fragment"}
# ## A - Yes!
# + [markdown] slideshow={"slide_type": "fragment"}
# tfp.sts.SparseLinearRegression
# + [markdown] slideshow={"slide_type": "slide"}
# ## Decompose by [`statsmodels`](https://github.com/statsmodels/statsmodels)
# + slideshow={"slide_type": "fragment"}
# !pip install statsmodels > /dev/null
# + slideshow={"slide_type": "fragment"}
from statsmodels.tsa.seasonal import seasonal_decompose
fig, axes = plt.subplots(4, 1, figsize=(15, 15))
fig.tight_layout()
res = seasonal_decompose(data['BTC-USD'])
axes[0].plot(res.observed)
axes[0].set_title('Observed')
axes[1].plot(res.trend)
axes[1].set_title('Trend')
axes[2].plot(res.seasonal)
axes[2].set_title('Seasonal')
axes[3].plot(res.resid)
axes[3].set_title('Residuals');
# + [markdown] slideshow={"slide_type": "slide"}
# # Fourier FTW
# + slideshow={"slide_type": "fragment"}
# https://colab.research.google.com/drive/10VADEg8F5t_FuryEf_ObFfeIFwX-CxII?usp=sharing#scrollTo=UyA6K6GTyJqF
from numpy.fft import rfft, irfft, rfftfreq
def annot_max(x, y, ax=None):
xmax = x[np.argmax(y)]
ymax = y.max()
text= "x={:.3f}, y={:.3f}".format(xmax, ymax)
#text = f"{xmax=}, {ymax=}, (period: {1./xmax} days)" #Eh, Colab has Python 3.6 ...
text = f"x={xmax:.3f}, y={ymax:.3f}, (period: {(1./xmax):.2f} weeks)"
if not ax:
ax=plt.gca()
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
arrowprops=dict(arrowstyle="->",connectionstyle="angle,angleA=0,angleB=60")
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
ax.annotate(text, xy=(xmax, ymax), xytext=(0.94, 0.96), **kw)
y = data['BTC-USD']
nobs = len(y)
btc_ft = np.abs(rfft(y))
btc_freq = rfftfreq(nobs)
plt.plot(btc_freq[2:], btc_ft[2:])
annot_max(btc_freq[2:], btc_ft[2: ]);
# + [markdown] slideshow={"slide_type": "slide"}
# # Cross Validation
# + slideshow={"slide_type": "fragment"}
plt.figure(figsize=(15, 10))
plt.plot(y)
plt.axvline(pd.to_datetime('2018-09-01'), 0, 19000, c='r')
plt.axvline(pd.to_datetime('2019-09-01'), 0, 19000, c='g')
plt.text(pd.to_datetime('2018-03-01'), 18000, 'train', bbox=dict(fill=False, edgecolor='k', linewidth=0.5), fontdict={'fontsize': 20})
plt.text(pd.to_datetime('2019-01-01'), 18000, 'cross-validate', bbox=dict(fill=False, edgecolor='k', linewidth=0.5), fontdict={'fontsize': 20})
plt.text(pd.to_datetime('2020-02-01'), 18000, 'causal impact', bbox=dict(fill=False, edgecolor='k', linewidth=0.5), fontdict={'fontsize': 20});
# + [markdown] slideshow={"slide_type": "slide"}
# ## And that's pretty much it ;)!
#
# ## Thanks!
| pyDataSP - tfcausalimpact.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Plot sulci on the flatmap
#
#
# The sulci are defined in a sub-layer of the sulci layer in
# <filestore>/<subject>/overlays.svg.
#
# The parameter `with_sulci` in `quickflat.make_figure` controls
# displaying the sulci on the surface.
#
# +
import cortex
import numpy as np
np.random.seed(1234)
# Create a random pycortex Volume
volume = cortex.Volume.random(subject='S1', xfmname='fullhead')
# Plot a flatmap with the data projected onto the surface
# Highlight the curvature and display the sulci
_ = cortex.quickflat.make_figure(volume,
with_curvature=True,
with_sulci=True)
| example-notebooks/quickflat/plot_sulci.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="KbOGOZ-CeT41" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="43a353eb-3bf9-4fc0-828e-f092a14a7f08" executionInfo={"status": "ok", "timestamp": 1582921554512, "user_tz": -60, "elapsed": 452, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAVVn034VMrT5M_Pk6e7dM4iESfV_v-xtbJWBhz=s64", "userId": "16128401549265485168"}}
print("Hello Github")
# + id="QGQC-POGegbL" colab_type="code" colab={}
| HelloGithub.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Algoritmo Truly PPO: CartPole
# #### TFG: Aprendizaje por refuerzo: fundamentos teóricos del algoritmo PPO e implementación
# #### Autor: <NAME>
#
# En este Notebook se implementa el algoritmo PPO para el problema Pong, siguiendo las indicaciones de la memoria.
#
# En primer lugar se crea la función `preprocess_image`, que se encargará de preprocesar los fotogramas del Pong para adaptarlos a nuestras redes neuronales.
# +
import time
import cv2
import numpy as np
import random
def preprocess_image(image):
preprocessed_image = [[0 for _ in range(0,160)] for _ in range(0, 210)]
#Collapse the data
for i in range(0, 210):
for j in range(0, 160):
preprocessed_image[i][j] = max([image[i][j][0], image[i][j][1], image[i][j][2]])
#Reduce its size
reduced_image = [[0 for _ in range(0,80)] for _ in range(0, 105)]
max_pixel = 0
for i in range(0, 105):
for j in range(0, 80):
reduced_image[i][j] = (preprocessed_image[2*i][2*j] + preprocessed_image[2*i+1][2*j] +
preprocessed_image[2*i][2*j+1] + preprocessed_image[2*i+1][2*j+1])
max_pixel = max(max_pixel, reduced_image[i][j])
for i in range(0, 105):
for j in range(0, 80):
reduced_image[i][j] = reduced_image[i][j]/max_pixel
return reduced_image
# -
# A continuación, se crea la función `run_simulation`, que se encarga de ejecutar una simulación sobre el entorno utilizando las estimaciones de las redes neuronales `model_val` (valor) y `model_res` (política).
# +
# This code is an implementation of the PPO algorithm, mixing the ideas shown in the paper and on the OpenAI Spinning Up webpage:
# https://spinningup.openai.com/en/latest/algorithms/ppo.html
import gym
from gym import envs
import time
import numpy as np
import random
import copy
# run_simulation: Runs a simulation of the chosen environments
# Parameters:
# model_val: value neural network
# model_policy: policy neural network
# Returns:
# observations: list of states the simulation has gone through
# rewards: list of rewards obtained
# values: list of values predicted by the neural network for each state
# actions: list with the action taken at each step
# probabilities: list of policies predicted by the neural network for each state
threshold = 0.05
def run_simulation(model_val, model_policy):
observations = []
states = []
rewards = []
values = []
actions = []
probabilities = []
#Create a fresh environment
env = gym.make('Pong-v0')
raw_observation = env.reset()
#210 x 160 x 3
it = 0
#Iterates until we reach the maximum number of timesteps or our pole is too tilted
while True:
raw_observation = list(raw_observation)
observation = preprocess_image(raw_observation)
observations.append(observation)
state = np.dstack([observations[max(-4,-len(observations))], observations[max(-3,-len(observations))],
observations[max(-2,-len(observations))], observations[max(-1,-len(observations))]])
states.append(state)
#Uncomment if you want to render the simulation
#env.render()
#Ask the neural network for the current value and policy:
value = model_val.predict(np.array([state]))[0][0]
policy = model_policy.predict(np.array([state]))[0]
#0 - NOOP
#2 - UP
#5 - DOWN
#Choose one of the available actions following the discrete distribution provided
action = np.random.choice(a=[i for i in range(0,3)], p=policy)
if action == 0:
action_t = 0
elif action == 1:
action_t = 2
else:
action_t = 5
#Append the gathered information to the arrays
values.append(value)
actions.append(action)
probabilities.append(policy)
#Once we have decided what action we are going to take, take the step and save the reward
raw_observation, reward, done, _ = env.step(action_t)
rewards.append(reward)
#Check if the simulation has ended
if done:
print('Simulation finished')
break
it += 1
if it % 100 == 0:
print(it)
print(policy)
env.close()
#Return all the information
return states, rewards, values, actions, probabilities
# -
# Tras esto, creamos las redes neuronales. La red neuronal del valor (`PPONet_Val`) es bastante sencilla:
# +
import tensorflow as tf
#Neural network to predict the value for each state
class PPONet_Val:
# build: builds a fresh neural network
# Returns:
# model: model with the desired architecture
def build(self):
inputs = tf.keras.layers.Input(shape = (105,80,4), name="input")
x = tf.keras.layers.Conv2D(16, (8, 8), input_shape=(105,80,4), strides=(4, 4), padding='same', name="conv_1")(inputs)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(32, (4, 4), strides=(2, 2), padding='same', name="conv_2")(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256)(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=x, name="ppo_nn_val")
model.compile(optimizer=tf.keras.optimizers.Adam(lr = 0.0002), loss="mean_squared_error") #, run_eagerly=True)
return model
# load_model: loads a previously saved neural network
# Parameters:
# path: path to the file the model has been saved into
# Returns:
# model: neural network
def load_model(self, path):
model = self.build()
model.load_weights(path)
return model
# save_model: saves a trained neural network
# Parameters:
# model: model to be saved
# path: path to the file where we want to save the model
def save_model(self, model, path):
model.save_weights(path)
# -
# La red neuronal para la política (`PPONet_Policy`) es bastante más compleja, puesto que incluye la función especial de pérdida.
# +
eps = 0.05
#Neural network to predict the policy for each state
class PPONet_Policy:
# build: builds a fresh neural network
# Returns:
# model: model with the desired architecture
def build(self):
inputs = tf.keras.layers.Input(shape = (105,80,4))
x = tf.keras.layers.Conv2D(16, (8, 8), input_shape=(105,80,4), strides=(4, 4), padding='same')(inputs)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(32, (4, 4), strides=(2, 2), padding='same')(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256)(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Dense(3)(x)
x = tf.keras.layers.Activation('softmax', name="policy_output")(x)
model = tf.keras.Model(inputs=inputs, outputs=x, name="ppo_nn_val")
model.compile(optimizer=tf.keras.optimizers.Adam(lr = 0.0002), loss=custom_loss)
return model
# load_model: loads a previously saved neural network
# Parameters:
# path: path to the file the model has been saved into
# Returns:
# model: neural network
def load_model(self, path):
model = self.build()
model.load_weights(path)
return model
# save_model: saves a trained neural network
# Parameters:
# model: model to be saved
# path: path to the file where we want to save the model
def save_model(self, model, path):
model.save_weights(path)
# custom_loss: Custom loss function, as described in the original paper
# This is not a standard loss function. Usually, we provide the neural network with 2 things: the predicted labels
# and the original labels. However, here there are no "original" and "predicted" labels, we have just an old policy
# and a new one. To follow the Tensorflow criteria, y_true will be the old policy (the data we provide as input
# during the training) and y_pred will be the new policy (the one we get directly from the neural network as the output of
# the given states).
# In addition, to calculate the loss we don't only need both policies but also the actions taken and the calculated
# advantage estimators. This adds extra complexity, because the loss function can only handle 2 arguments, and encapsulating
# the loss function into another function leads to weird Tensorflow optimizing bugs. In addition, the length of both arrays
# (y_true and y_pred) has to be the same.
# My solution is to include the actions and advantage estimators in y_true. Imagine we have the following data:
# old_policy: [[0.7, 0.3], [0.65, 0.35], [0.47, 0.53]]
# new_policy: [[0.62, 0.38], [0.73, 0.27], [0.52, 0.48]]
# actions: [1,0,1]
# advantage estimators: [-1.5, 0.75, -0.25]
# Then, our parameters would be:
# y_true: [[0.7, 0.3, 1, -1.5], [0.65, 0.35, 0, 0.75], [0.47, 0.53, 1, -0,25]]
# y_pred: [[0.62, 0.38], [0.73, 0.27], [0.52, 0.48]]
# Although the size of the second dimension is different, there is no problem as the lenght of the first dimension remains the same
# Returns:
# loss: calculated loss
def custom_loss(y_true, y_pred):
#Reshape the y_true array (I don't know why the shape is lost during the function call)
y_true = tf.reshape(y_true, (len(y_true)*5,))
#We merge the old policy, actions and advantage estimators during the function call. Now, we have to split them
actions_data = tf.gather(params=y_true, indices=tf.range(start=3, limit=len(y_true), delta=5))
actions_data = tf.cast(actions_data, dtype=tf.int32)
advantage_estimators_data_trim = tf.gather(params=y_true, indices=tf.range(start=4, limit=len(y_true), delta=5))
advantage_estimators_data_trim = tf.cast(advantage_estimators_data_trim, dtype=tf.float32)
y_true_0 = tf.gather(params=y_true, indices=tf.range(start=0, limit=len(y_true), delta=5))
y_true_1 = tf.gather(params=y_true, indices=tf.range(start=1, limit=len(y_true), delta=5))
y_true_2 = tf.gather(params=y_true, indices=tf.range(start=2, limit=len(y_true), delta=5))
y_true_trim = tf.stack([y_true_0, y_true_1, y_true_2], axis = 1)
y_true_trim = tf.cast(y_true_trim, dtype=tf.float32)
#We provide the neural network with the probability of taking both actions for each state. However, we just need the
# probability of the selected action
old_predictions = tf.gather(params=tf.reshape(y_true_trim, [-1]), indices=tf.math.add(tf.range(start=0, limit=3*len(y_true_trim), delta=3), actions_data))
old_predictions = tf.math.add(old_predictions, tf.fill([len(old_predictions)],1e-8))
#Same with the new policy
new_predictions = tf.gather(params=tf.reshape(y_pred, [-1]), indices=tf.math.add(tf.range(start=0, limit=3*len(y_pred), delta=3), actions_data))
new_predictions = tf.math.add(new_predictions, tf.fill([len(new_predictions)],1e-8))
#Now, we calculate the r_theta ratios by dividing the new probability and the old one. Instead of dividing (which might
# lead to precision issues), we substract the logarithms, and exponentiate again the result.
r_theta = tf.math.exp(tf.math.subtract(tf.math.log(new_predictions), tf.math.log(old_predictions)))
# a and b are the two terms into the minimum on the loss function, as explained in the original paper.
a = tf.math.multiply(r_theta, advantage_estimators_data_trim, name = 'a')
b = tf.math.multiply(tf.clip_by_value(r_theta, 1-eps, 1+eps), advantage_estimators_data_trim, name = 'b')
#Select the minimum
min_elems = tf.math.minimum(a, b, name = 'min')
#Add all the minimums
clip = tf.math.reduce_sum(min_elems, name='clip')
#We have to change the sign. The original loss function maximizes the loss function, while Tensorflow always minimize the
# loss. Due to that, we minimize the opposite of the original loss function, which equals to maximize the original one.
clip2 = tf.math.multiply(clip, -1)
return clip2
# -
# Tras esto, solo queda unir todas las partes del algoritmo. En primer lugar se definen los hiperparámetros, de acuerdo a lo explicado en la memoria.
# +
#HYPERPARAMETERS
#Number of iterations of the PPO algorithm
NUM_ITERATIONS = 1500
#Simulations run each iteration
NUM_ACTORS = 1
gamma = 0.99
#Number of steps the estimator will look ahead
horizon = 128
# -
# Este código es el encargado de realizar las simulaciones, calcular los estimadores de ventaja y las recompensas y entrenar los modelos
# +
import time
import copy
import sys
# Load/Create value network
neural_network = PPONet_Val()
model_val = neural_network.build()
#model_val = neural_network.load_model('pong_weights_3/val_it_1494.h5')
# Load/Create policy network
neural_network2 = PPONet_Policy()
model_pol = neural_network2.build()
#model_pol = neural_network2.load_model('pong_weights_3/pol_it_1494.h5')
#Iterate the desired amount of iterations
for iter in range(0, NUM_ITERATIONS):
observations_data = []
rewards_data = []
values_data = []
probabilities_data = []
advantage_estimators_data = []
actions_data = []
values_obtained_data = []
#Gather all the rollouts
for r in range(0, NUM_ACTORS):
#Run one simulation
observations, rewards, values, actions, probabilities = run_simulation(model_val, model_pol)
advantage_estimators = []
values_obtained = []
#Calculate the rewards
for i in range(0, len(rewards)):
val = 0
for j in range(i, len(rewards)):
val = val + gamma**(j-i)*rewards[j]
values_obtained.append(val)
#Calculamos los advantage estimators
for i in range(0,len(rewards)): #Puede que esto sea -1
adv = -values[i]
if i + horizon < len(rewards):
adv = adv + gamma**horizon*values[i+horizon]
for j in range(0, horizon):
if i + j == len(rewards):
break
adv = adv + gamma**j*rewards[i+j]
advantage_estimators.append(adv)
observations_data.extend(copy.deepcopy(observations))
rewards_data.extend(copy.deepcopy(rewards))
values_obtained_data.extend(copy.deepcopy(values_obtained))
actions_data.extend(copy.deepcopy(actions))
probabilities_data.extend(copy.deepcopy(probabilities))
advantage_estimators_data.extend(copy.deepcopy(advantage_estimators))
#Update the policy network
print('average_reward: ', sum(rewards_data)/NUM_ACTORS)
advantage_estimators_data = np.reshape(np.array(advantage_estimators_data), (-1))
y_true_np = np.array([np.concatenate([a, [b], [c]]) for a, b, c in zip(probabilities_data, actions_data, advantage_estimators_data)])
print(y_true_np.shape)
model_pol.fit(x=np.array(observations_data), y=y_true_np, batch_size = 64, epochs = 3, verbose=1)
print('it: ', iter)
#Update the value network
model_val.fit(x=np.array(observations_data), y=np.array(values_obtained_data), batch_size = 64, epochs = 3, verbose=1)
#Save the trained neural networks
neural_network.save_model(model_val, 'pong_weights_3/val_it_' + str(iter) + '.h5')
neural_network2.save_model(model_pol, 'pong_weights_3/pol_it_' + str(iter) + '.h5')
# -
# Una vez que hemos entrenado el modelo, podemos utilizarlo para pasarnos el entorno modificando la política probabilista por una política determinista, en la que en todo momento se ejecuta la acción con mayor probabilidad. Para ello sustituimos la función `run_simulation` por `run_game`, donde la única diferencia está en cómo se escoge la acción a realizar.
# +
import matplotlib.pyplot as plt
def run_game(model_val, model_policy):
observations = []
rewards = []
values = []
#Create a fresh environment
env = gym.make('Pong-v0')
raw_observation = env.reset()
#210 x 160 x 3
it = 0
#Iterates until we reach the maximum number of timesteps or our pole is too tilted
while True:
raw_observation = list(raw_observation)
#generate the current state preprocessing the image and stacking the last observations
observation = preprocess_image(raw_observation)
#print(np.array(observation).shape)
observations.append(observation)
#print(observation)
state = np.dstack([observations[max(-4,-len(observations))], observations[max(-3,-len(observations))],
observations[max(-2,-len(observations))], observations[max(-1,-len(observations))]])
#Uncomment if you want to render the simulation
env.render()
#Ask the neural network for the current value and policy:
value = model_val.predict(np.array([state]))[0][0]
values.append(value)
policy = model_policy.predict(np.array([state]))[0]
if policy[0] > policy[1] and policy[0] > policy[2]:
action_t = 0
elif policy[1] > policy[0] and policy[1] > policy[2]:
action_t = 2
else:
action_t = 5
#Once we have decided what action we are going to take, take the step and save the reward
raw_observation, reward, done, _ = env.step(action_t)
rewards.append(reward)
#Check if the simulation has ended
if done:
print('Simulation finished')
break
it += 1
#print(policy)
#print(value)
env.close()
#Return all the information
return rewards, values
# -
# Además de jugar, el siguiente código nos permite ver la recompensa predicha y obtenida
# +
import matplotlib.pyplot as plt
neural_network = PPONet_Val()
model_val = neural_network.load_model('val_it_1500.h5')
neural_network2 = PPONet_Policy()
model_pol = neural_network2.load_model('pol_it_1500.h5')
rewards, values = run_game(model_val,model_pol)
values_obtained = []
#Calculate the rewards
for i in range(0, len(rewards)):
val = 0
for j in range(i, len(rewards)):
val = val + gamma**(j-i)*rewards[j]
values_obtained.append(val)
axis = [i for i in range(0,len(rewards))]
plt.subplots(figsize=(10,8))
plt.plot(axis, values, color='lightskyblue')
plt.plot(axis, values_obtained, color='navy')
plt.legend(['predicho','real'], loc='upper left')
plt.ylabel('valor')
plt.xlabel('paso')
plt.savefig('pong_val_vs_reward.png')
plt.show()
# -
| PPO/Pong/Pong PPO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Course set-up
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2021"
# This notebook covers the steps you'll need to take to get set up for [CS224u](http://web.stanford.edu/class/cs224u/).
# ## Contents
#
# 1. [Anaconda](#Anaconda)
# 1. [The course Github repository](#The-course-Github-repository)
# 1. [Main data distribution](#Main-data-distribution)
# 1. [Additional installations](#Additional-installations)
# 1. [Installing the package requirements](#Installing-the-package-requirements)
# 1. [PyTorch](#PyTorch)
# 1. [Hugging Face transformers](#Hugging-Face-transformers)
# 1. [Jupyter notebooks](#Jupyter-notebooks)
# ## Anaconda
#
# We recommend installing [the free Anaconda Python distribution](https://www.anaconda.com/products/individual).
#
# Please be sure that you download the __Python 3__ version, which currently installs Python 3.8.
#
# One you have Anaconda installed, create a virtual environment for the course. In a terminal, run
#
# ```conda create -n nlu python=3.8 anaconda```
#
# to create an environment called `nlu`.
#
# Then, to enter the environment, run
#
# ```conda activate nlu```
#
# To leave it, you can just close the window, or run
#
# ```conda deactivate```
#
# [This page](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) has more detailed instructions on managing virtual environments with Anaconda.
# ## The course Github repository
#
# https://github.com/cgpotts/cs224u
# ## Main data distribution
#
# The datasets needed to run the course notebooks and complete the assignments are in the following zip archive:
#
# http://web.stanford.edu/class/cs224u/data/data.tgz
#
# We recommend that you download it, unzip it, and place it in the same directory as your local copy of this Github repository. If you decide to put it somewhere else, you'll need to adjust the paths given in the "Set-up" sections of essentially all the notebooks.
#
# We recommend you to check the `md5` checksum of the `data.tgz` afte the download. The current version (as of 8/22/2021), the checksum is `a447b2a81835707ad7882f8f881af79a`. If you see the different checksum, then ask this to the teaching staff.
# ## Additional installations
#
# Be sure to do these additional installations from [inside your virtual environment](#Anaconda) for the course! Before you proceed from here, perhaps run
#
# ```conda activate nlu```
#
# to make sure you are in that environment.
# ### Installing the package requirements
#
# If you are running Anaconda, then most of the requirements are already met – you'll just need to add PyTorch and the Hugging Face `transformers` library, both discussed below.
#
# People who aren't using Anaconda should edit `requirements.txt` so that it installs all the prerequisites that come with Anaconda and then run
#
# ```pip install -r requirements.txt```
#
# from inside the course virtual environment to install the core additional packages. __The above command will not install anything unless you edit it.__
# ### PyTorch
#
# The PyTorch library has special installation instructions depending on your computing environment. For Anaconda users, we recommend
#
# ```conda install pytorch=1.8.0 torchvision -c pytorch```
#
# For non-Anaconda users, or if you have a [CUDA-enabled GPU](https://developer.nvidia.com/cuda-gpus), we recommend following the instructions posted here:
#
# https://pytorch.org/get-started/locally/
#
# For this course, you should be running at least version `1.7.0` and preferably `1.8.0`:
# +
import torch
print(torch.__version__)
print(torch.cuda.is_available())
# -
# ### Hugging Face transformers
# We will be making extensive use of the [Hugging Face](https://huggingface.co/) `transformers` library. To install it, run the following from inside your virtual environment:
#
# ```pip install transformers==4.3.3```
# +
import transformers
print(transformers.__version__)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Jupyter notebooks
#
# The majority of the materials for this course are Jupyter notebooks, which allow you to work in a browser, mixing code and description. It's a powerful form of [literate programming](https://en.wikipedia.org/wiki/Literate_programming), and increasingly a standard for open science.
#
# To start a notebook server, navigate to the directory where you want to work and run
#
# ```jupyter notebook --port 5656```
#
# The port specification is optional.
#
# This should launch a browser that takes you to a view of the directory you're in. You can then open notebooks for working and create new notebooks.
#
# A major advantage of working with Anaconda is that you can switch virtual environments from inside a notebook, via the __Kernel__ menu. If this isn't an option for you, then run this command while inside your virtual environment:
#
# ```python -m ipykernel install --user --name nlu --display-name "nlu"```
#
# (If you named your environment something other than `nlu`, then change the `--name` and `--display-name` values.)
#
# [Additional discussion of Jupyter and kernels.](https://stackoverflow.com/questions/39604271/conda-environments-not-showing-up-in-jupyter-notebook)
#
# For some tips on getting started with notebooks, see [our Jupyter tutorial](tutorial_jupyter_notebooks.ipynb).
| setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit (conda)
# metadata:
# interpreter:
# hash: 006b2a4c59105e151f59d7a783e9669671dc3b315a85df72952575de8c58cde6
# name: python3
# ---
# importing dependencies
import pandas as pd
from sqlalchemy import create_engine
# +
# reading in csv data
# alec_salaries_data_clean
salaries_final_path = "output_csv/final_salaries_team_names_1985_to_2015.csv"
salaries_final_df = pd.read_csv(salaries_final_path)
# collin_team_data_clean
teams_collin_path = "../collin-work/output_csv/Cleaned_Teams_info.csv"
teams_final_df = pd.read_csv(teams_collin_path)
# -
# previewing data
salaries_final_df.head(5)
# creating db for just 2015
salaries_final_df_2015 = salaries_final_df.loc[salaries_final_df["yearID"] == 2015]
teams_final_df_2015 = teams_final_df.loc[teams_final_df["Year"] == 2015]
salaries_df_2015 = salaries_final_df_2015.reset_index(drop=True)
teams_df_2015_1 = teams_final_df_2015.reset_index(drop=True)
teams_df_2015_1
teams_df_2015 = teams_df_2015_1[['Year','Team ID','Team Name','Games','Wins','Loses']]
teams_df_2015
# grouping by teams for 2015 team salary totals
sal_team_groupby_2015 = salaries_df_2015.groupby(['teamID'])
sal_cap_2015 = sal_team_groupby_2015.sum()
sal_cap_2015_df_1 = pd.DataFrame(sal_cap_2015)
sal_cap_2015_df = sal_cap_2015_df_1["salary"]
sal_cap_2015_df
# merging sal cap 2015 sum into teams df
teams_sal_cap_2015_2 = teams_df_2015.merge(sal_cap_2015_df,how="left",left_on="Team ID",right_index=True)
teams_sal_cap_2015_3 = teams_sal_cap_2015_2.reset_index(drop=True)
teams_sal_cap_2015 = teams_sal_cap_2015_3.rename(columns={"salary" : "Team Salary"})
teams_sal_cap_2015
# sorting by wins
teams_sal_cap_2015_sorted_wins_1 = teams_sal_cap_2015.sort_values(by='Wins', ascending=False)
teams_sal_cap_2015_sorted_wins = teams_sal_cap_2015_sorted_wins_1.reset_index(drop=True)
teams_sal_cap_2015_sorted_wins
# outputing csvs
team_sal_2105_output_path = "output_csv/Teams_Wins_Salary_2015.csv"
teams_sal_cap_2015_sorted_wins.to_csv(team_sal_2105_output_path,index=False)
| alec-work/salaries_groupby_2015.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
#
# Filtering and resampling data
# =============================
#
# Some artifacts are restricted to certain frequencies and can therefore
# be fixed by filtering. An artifact that typically affects only some
# frequencies is due to the power line.
#
# Power-line noise is a noise created by the electrical network.
# It is composed of sharp peaks at 50Hz (or 60Hz depending on your
# geographical location). Some peaks may also be present at the harmonic
# frequencies, i.e. the integer multiples of
# the power-line frequency, e.g. 100Hz, 150Hz, ... (or 120Hz, 180Hz, ...).
#
# This tutorial covers some basics of how to filter data in MNE-Python.
# For more in-depth information about filter design in general and in
# MNE-Python in particular, check out
# `disc-filtering`.
#
# +
import numpy as np
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
tmin, tmax = 0, 20 # use the first 20s of data
# Setup for reading the raw data (save memory by cropping the raw data
# before loading it)
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(tmin, tmax).load_data()
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # bads + 2 more
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Pick a subset of channels (here for speed reasons)
selection = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
# -
# Removing power-line noise with notch filtering
# ----------------------------------------------
#
# Removing power-line noise can be done with a Notch filter, directly on the
# Raw object, specifying an array of frequency to be cut off:
#
#
raw.notch_filter(np.arange(60, 241, 60), picks=picks, fir_design='firwin')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
# Removing power-line noise with low-pass filtering
# -------------------------------------------------
#
# If you're only interested in low frequencies, below the peaks of power-line
# noise you can simply low pass filter the data.
#
#
# low pass filtering below 50 Hz
raw.filter(None, 50., fir_design='firwin')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
# High-pass filtering to remove slow drifts
# -----------------------------------------
#
# To remove slow drifts, you can high pass.
#
# <div class="alert alert-danger"><h4>Warning</h4><p>In several applications such as event-related potential (ERP)
# and event-related field (ERF) analysis, high-pass filters with
# cutoff frequencies greater than 0.1 Hz are usually considered
# problematic since they significantly change the shape of the
# resulting averaged waveform (see examples in
# `tut_filtering_hp_problems`). In such applications, apply
# high-pass filters with caution.</p></div>
#
#
raw.filter(1., None, fir_design='firwin')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
# To do the low-pass and high-pass filtering in one step you can do
# a so-called *band-pass* filter by running the following:
#
#
# band-pass filtering in the range 1 Hz - 50 Hz
raw.filter(1, 50., fir_design='firwin')
# Downsampling and decimation
# ---------------------------
#
# When performing experiments where timing is critical, a signal with a high
# sampling rate is desired. However, having a signal with a much higher
# sampling rate than necessary needlessly consumes memory and slows down
# computations operating on the data. To avoid that, you can downsample
# your time series. Since downsampling raw data reduces the timing precision
# of events, it is recommended only for use in procedures that do not require
# optimal precision, e.g. computing EOG or ECG projectors on long recordings.
#
# <div class="alert alert-info"><h4>Note</h4><p>A *downsampling* operation performs a low-pass (to prevent
# aliasing) followed by *decimation*, which selects every
# $N^{th}$ sample from the signal. See
# :func:`scipy.signal.resample` and
# :func:`scipy.signal.resample_poly` for examples.</p></div>
#
# Data resampling can be done with *resample* methods.
#
#
raw.resample(100, npad="auto") # set sampling frequency to 100Hz
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=True)
# To avoid this reduction in precision, the suggested pipeline for
# processing final data to be analyzed is:
#
# 1. low-pass the data with :meth:`mne.io.Raw.filter`.
# 2. Extract epochs with :class:`mne.Epochs`.
# 3. Decimate the Epochs object using :meth:`mne.Epochs.decimate` or the
# ``decim`` argument to the :class:`mne.Epochs` object.
#
# We also provide the convenience methods :meth:`mne.Epochs.resample` and
# :meth:`mne.Evoked.resample` to downsample or upsample data, but these are
# less optimal because they will introduce edge artifacts into every epoch,
# whereas filtering the raw data will only introduce edge artifacts only at
# the start and end of the recording.
#
#
| stable/_downloads/d15c1ed8d2bf297d3ebea4bf898564e3/plot_artifacts_correction_filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Analyze ferrozine incubation data
# Copyright <NAME>, Neufeld lab, 2019
# -
import pandas as pd
from plotnine import *
# User variables
sample_concentrations_filepath = 'data/Fe_incubation_plotting_data_191122.tsv'
sample_metadata_filepath = 'sample_metadata.tsv'
timepoint_metadata_filepath = 'timepoint_metadata.tsv'
output_pdf_filepath_1 = 'pdf/Fe_incubation_plot_191122_vs1a.pdf'
output_pdf_filepath_2 = 'pdf/Fe_incubation_plot_191122_vs1b.pdf'
# Load sample metadata
sample_metadata = pd.read_csv(sample_metadata_filepath, sep='\t')
sample_metadata.head()
# Load timepoint metadata
timepoint_metadata = pd.read_csv(timepoint_metadata_filepath, sep='\t')
timepoint_metadata = timepoint_metadata[['Timepoint_code', 'Incubation_duration_days']]
timepoint_metadata.head()
# +
# Load concentration data
sample_concentrations = pd.read_csv(sample_concentrations_filepath, sep='\t')
# Clean up columns
sample_concentrations.rename(columns={'Treatment': 'Fe_fraction',
'Ave_concentration': 'Fe_concentration_uM',
'StdDev_concentration': 'Fe_concentration_sd_uM'}, inplace=True)
sample_concentrations = sample_concentrations[['Sample_name', 'Fe_fraction',
'Fe_concentration_uM', 'Fe_concentration_sd_uM']]
sample_concentrations.head()
# +
# Calculate Fe(III)
# First, make the table wide format based on the Fe fraction
sample_concentrations_wide = sample_concentrations.pivot_table(values=['Fe_concentration_uM',
'Fe_concentration_sd_uM'],
index=['Sample_name'],
columns=['Fe_fraction'])
# Calculate Fe3 average and standard deviation
sample_concentrations_wide['Fe_concentration_uM', 'Fe3'] = sample_concentrations_wide['Fe_concentration_uM', 'FeTot'] - sample_concentrations_wide['Fe_concentration_uM', 'Fe2']
# See https://www.researchgate.net/post/How_do_I_add_subtract_one_standard_deviation_from_the_other, accessed 191121
sample_concentrations_wide['Fe_concentration_sd_uM', 'Fe3'] = sample_concentrations_wide['Fe_concentration_sd_uM', 'FeTot'] + sample_concentrations_wide['Fe_concentration_sd_uM', 'Fe2']
sample_concentrations_wide.head()
# Get rid of the multiindex that can cause problems during the melt
sample_concentrations_wide.reset_index(inplace=True)
# +
# Convert back to long format
sample_concentrations = sample_concentrations_wide.melt(id_vars=['Sample_name'],
var_name=['Data_type', 'Fe_fraction'],
value_name='uM')
# The above code makes the data too long. I need to get the ave and stddev as separate columns
sample_concentrations = sample_concentrations.pivot_table(values=['uM'],
index=['Sample_name', 'Fe_fraction'],
columns=['Data_type'])
# Get rid of the extraneous column level and bring the Sample_name and Fe_fraction back as columns
sample_concentrations.columns = sample_concentrations.columns.droplevel()
sample_concentrations.reset_index(inplace=True)
sample_concentrations.head()
# +
# Split sample_name into sample_code and timepoint_code
sample_naming = sample_concentrations['Sample_name'].str.split(pat='_', expand=True)
sample_naming.rename(columns={0: 'Timepoint_code', 1: 'Sample_code'}, inplace=True)
sample_naming['Sample_code'] = sample_naming['Sample_code'].astype('int64')
sample_concentrations = pd.concat([sample_naming, sample_concentrations], axis=1)
sample_concentrations.drop(columns=['Sample_name'], inplace=True)
sample_concentrations.head()
# +
# Bind on metadata
sample_concentrations = pd.merge(sample_concentrations, timepoint_metadata, how='left', on='Timepoint_code')
sample_concentrations = pd.merge(sample_concentrations, sample_metadata, how='left', on='Sample_code')
sample_concentrations.head()
# +
# Make specific sample groups for drawing lines in plotnine
sample_concentrations['group'] = sample_concentrations['Sample_ID'] + '_' + sample_concentrations['Fe_fraction']
# Change Fe2 to Fe(II) and Fe3 to Fe(III)
sample_concentrations['Fe_fraction'] = sample_concentrations['Fe_fraction'].str.replace('^Fe2$', 'Fe(II)')
sample_concentrations['Fe_fraction'] = sample_concentrations['Fe_fraction'].str.replace('^Fe3$', 'Fe(III)')
# +
# Plot
Fe_plot = (ggplot(sample_concentrations, aes(x='Incubation_duration_days', y='Fe_concentration_uM'))
+ geom_line(aes(group='group', colour='Treatment'), alpha=0.7)
+ geom_errorbar(aes(ymin='Fe_concentration_uM - Fe_concentration_sd_uM',
ymax='Fe_concentration_uM + Fe_concentration_sd_uM'),
width=0.1, alpha=0.7)
+ geom_point(aes(fill='Treatment', shape='Fe_fraction'), size=3, alpha=0.7)
+ facet_grid('Organism ~ Rep')
+ scale_colour_brewer(type='qual', palette='Dark2')
+ scale_fill_brewer(type='qual', palette='Dark2')
+ theme_bw()
+ theme(panel_grid=element_blank(),
line=element_line(colour='black'),
panel_border=element_rect(colour='black'),
legend_title=element_text(size=10, face='bold'),
legend_key=element_blank(),
legend_key_height=5,
text=element_text(family='Arial', colour='black'))
+ xlab('Incubation time (days)')
+ ylab('Fe concentration (uM)'))
Fe_plot
# -
Fe_plot.save(filename=output_pdf_filepath_1, width=110, height=150, units='mm')
# +
# Plot
Fe_plot_2 = (ggplot(sample_concentrations, aes(x='Incubation_duration_days', y='Fe_concentration_uM'))
+ geom_line(aes(group='group', colour='Organism'), alpha=0.7)
+ geom_errorbar(aes(ymin='Fe_concentration_uM - Fe_concentration_sd_uM',
ymax='Fe_concentration_uM + Fe_concentration_sd_uM'),
width=0.1, alpha=0.7)
+ geom_point(aes(fill='Organism', shape='Fe_fraction'), size=3, alpha=0.7)
+ facet_grid('Treatment ~ Rep')
+ scale_colour_brewer(type='qual', palette='Dark2')
+ scale_fill_brewer(type='qual', palette='Dark2')
+ theme_bw()
+ theme(panel_grid=element_blank(),
line=element_line(colour='black'),
panel_border=element_rect(colour='black'),
legend_title=element_text(size=10, face='bold'),
legend_key=element_blank(),
legend_key_height=5,
text=element_text(family='Arial', colour='black'))
+ xlab('Incubation time (days)')
+ ylab('Fe concentration (uM)'))
Fe_plot_2
# -
Fe_plot_2.save(filename=output_pdf_filepath_2, width=110, height=150, units='mm')
| Figure_04_photoferrotrophy_incubation/plot/.ipynb_checkpoints/incubation_plotter_191122-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="JSjG64ra4aFu"
# from google.colab import drive
# drive.mount('/content/drive')
# + id="3uDWznlVHuI6"
# path = "/content/drive/MyDrive/Research/cods_comad_plots/sdc_task/cifar/"
# + id="V8-7SARDZErK"
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# + id="acRFqJNrZErV" colab={"base_uri": "https://localhost:8080/"} outputId="d5d0f842-46c2-4670-effc-a9dcd81e6bd6"
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
# print(type(foreground_classes))
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000): #5000*batch_size = 50000 data points
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
# + id="mfVy58xnLkM7"
fg1, fg2, fg3 = 0,1,2
# + id="iyyWx5g58erM"
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="U5w9S6xb6EFl" outputId="5b6a90f2-c06c-49c8-8fa3-a87d34589ee5"
foreground_data.shape, foreground_label.shape, background_data.shape, background_label.shape
# + id="Sly62nHh6VJy"
mean_bg = torch.mean(background_data, dim=0, keepdims= True)
std_bg = torch.std(background_data, dim=0, keepdims= True)
# + colab={"base_uri": "https://localhost:8080/"} id="K89Qj57m6axj" outputId="c98a0182-260d-4637-b419-ee0710d2e8fc"
mean_bg.shape, std_bg.shape
# + id="wGVjRbqZ6lzV"
foreground_data = (foreground_data - mean_bg) / std_bg
background_data = (background_data - mean_bg) / std_bg
# + colab={"base_uri": "https://localhost:8080/"} id="yxZVq5OW6o3L" outputId="ff24868f-1002-4ac3-d9f0-9a6580738611"
foreground_data.shape, foreground_label.shape, background_data.shape, background_label.shape
# + colab={"base_uri": "https://localhost:8080/"} id="pTrGcqI7GsBA" outputId="aabd0bb6-c5db-4b6e-94f7-6bf0a87f3e1b"
torch.sum(torch.isnan(foreground_data)), torch.sum(torch.isnan(background_data))
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="fIqpbILY-BiG" outputId="2f1092b7-7c14-44a8-da18-79156b50e0f1"
imshow(foreground_data[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="tRvTvlVo98xz" outputId="0859579e-d355-4182-ce6e-b44392426a78"
imshow(background_data[2])
# + [markdown] id="NPOxPMEe0P-B"
# ## generating CIN train and test data
# + id="SRhpBoad0LOc"
m = 50
desired_num = 30000
# + colab={"base_uri": "https://localhost:8080/"} id="4Vg1hwlP0-gg" outputId="07f7c736-24b4-4374-8bee-4ee8c26fdcee"
np.random.seed(0)
bg_idx = np.random.randint(0,35000,m-1)
fg_idx = np.random.randint(0,15000)
bg_idx, fg_idx
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="IOxbMpSn126g" outputId="601e1f50-bd52-423b-c9ac-d3c345a573c3"
for i in background_data[bg_idx]:
imshow(i)
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="dHJanbIF1Fon" outputId="6c2ad8ed-f4c9-4958-bbc6-c0a1610d4f91"
imshow(torch.sum(background_data[bg_idx], axis = 0))
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="ZT0h2FmO2CiH" outputId="49ed34f1-d470-489f-e81e-a8e7ffcec7e6"
imshow(foreground_data[fg_idx])
# + colab={"base_uri": "https://localhost:8080/"} id="6NsL6KKD2OoH" outputId="f010c292-e434-4545-cb6b-b91eb2d7fa8e"
tr_data = ( torch.sum(background_data[bg_idx], axis = 0) + foreground_data[fg_idx] )/m
tr_data.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="xItMJ5dy2iDy" outputId="534d7ae8-dfe5-4494-a46d-982c25b7553e"
imshow(tr_data)
# + colab={"base_uri": "https://localhost:8080/"} id="w-RwePOU3EjA" outputId="89cacadc-2ca0-4ef8-d09e-3e42e7b5b99a"
foreground_label[fg_idx]
# + id="eqk-YE9A0gtB"
train_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
train_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,m-1)
fg_idx = np.random.randint(0,15000)
tr_data = ( torch.sum(background_data[bg_idx], axis = 0) + foreground_data[fg_idx] ) / m
label = (foreground_label[fg_idx].item())
train_images.append(tr_data)
train_label.append(label)
# + colab={"base_uri": "https://localhost:8080/"} id="23hF67Hb3ioz" outputId="9c9b41d3-1ec9-475e-ad44-29dcd5a10581"
train_images = torch.stack(train_images)
train_images.shape, len(train_label)
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="eWIah23w5Tf3" outputId="2fee42ec-b595-4fa5-9de4-0d87333acdf3"
imshow(train_images[0])
# + id="y2LgVhv64TbG"
test_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i)
fg_idx = np.random.randint(0,15000)
tr_data = ( foreground_data[fg_idx] ) / m
label = (foreground_label[fg_idx].item())
test_images.append(tr_data)
test_label.append(label)
# + colab={"base_uri": "https://localhost:8080/"} id="ZeFlUKSw4knK" outputId="8cee20e4-413c-46c1-9848-3bd60299fa78"
test_images = torch.stack(test_images)
test_images.shape, len(test_label)
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="ZQbTT2qVMPTE" outputId="d5f7f1d6-218f-46f2-b726-033b679ebde1"
imshow(test_images[0])
# + colab={"base_uri": "https://localhost:8080/"} id="LaUSQrnBFzud" outputId="1ab907f4-b0d1-478f-a26c-e54d52cb88d4"
torch.sum(torch.isnan(train_images)), torch.sum(torch.isnan(test_images))
# + colab={"base_uri": "https://localhost:8080/"} id="sQHvecSl7bso" outputId="09f168f9-cf28-4352-b1a2-b522b14da036"
np.unique(train_label), np.unique(test_label)
# + [markdown] id="wu4JV_4F6xYO"
# ## creating dataloader
# + id="AJuGak6_zXgx"
class CIN_Dataset(Dataset):
"""CIN_Dataset dataset."""
def __init__(self, list_of_images, labels):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.image = list_of_images
self.label = labels
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.image[idx] , self.label[idx]
# + id="ckIH5YcU6lo2"
batch = 250
train_data = CIN_Dataset(train_images, train_label)
train_loader = DataLoader( train_data, batch_size= batch , shuffle=True)
test_data = CIN_Dataset( test_images , test_label)
test_loader = DataLoader( test_data, batch_size= batch , shuffle=False)
# + colab={"base_uri": "https://localhost:8080/"} id="_-mYFWJT66yf" outputId="6781c6b0-111c-4e34-d522-ae1b6a215d51"
train_loader.dataset.image.shape, test_loader.dataset.image.shape
# + [markdown] id="<KEY>"
# ## model
# + id="KoP6hoBqNJxX"
class Classification(nn.Module):
def __init__(self):
super(Classification, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.conv1.weight)
torch.nn.init.zeros_(self.conv1.bias)
torch.nn.init.xavier_normal_(self.conv2.weight)
torch.nn.init.zeros_(self.conv2.bias)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.zeros_(self.fc1.bias)
torch.nn.init.xavier_normal_(self.fc2.weight)
torch.nn.init.zeros_(self.fc2.bias)
torch.nn.init.xavier_normal_(self.fc3.weight)
torch.nn.init.zeros_(self.fc3.bias)
torch.nn.init.xavier_normal_(self.fc4.weight)
torch.nn.init.zeros_(self.fc4.bias)
def forward(self,z):
y1 = self.pool(F.relu(self.conv1(z)))
y1 = self.pool(F.relu(self.conv2(y1)))
y1 = y1.view(-1, 16 * 5 * 5)
y1 = F.relu(self.fc1(y1))
y1 = F.relu(self.fc2(y1))
y1 = F.relu(self.fc3(y1))
y1 = self.fc4(y1)
return y1
# + id="uPYplUGazU9I"
torch.manual_seed(12)
classify = Classification().double()
classify = classify.to("cuda")
# + [markdown] id="aSYvs7FAA76j"
# ## training
# + id="n5g3geNJ5zEu"
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer_classify = optim.Adam(classify.parameters(), lr=0.001 ) #, momentum=0.9)
# + id="4ZZ-kb-aaPv7" colab={"base_uri": "https://localhost:8080/"} outputId="8cc675a5-89a4-4b3c-a7b4-fb7e5e0c3a88"
correct = 0
total = 0
count = 0
flag = 1
with torch.no_grad():
for data in train_loader:
inputs, labels = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % ( desired_num , 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
# + id="TNd3Qz_RaPmK" colab={"base_uri": "https://localhost:8080/"} outputId="f37bc5c6-20d0-4cdc-8027-a7f846b31a0d"
correct = 0
total = 0
count = 0
flag = 1
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % ( 10000 , 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
# + id="tFfAJZkcZEsY" colab={"base_uri": "https://localhost:8080/"} outputId="d8f23578-5aca-4d93-907a-1e8082266312"
nos_epochs = 200
tr_loss = []
for epoch in range(nos_epochs): # loop over the dataset multiple times
epoch_loss = []
cnt=0
iteration = desired_num // batch
running_loss = 0
#training data set
for i, data in enumerate(train_loader):
inputs, labels = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
inputs = inputs.double()
# zero the parameter gradients
optimizer_classify.zero_grad()
outputs = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
# print(outputs)
# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))
loss = criterion(outputs, labels)
loss.backward()
optimizer_classify.step()
running_loss += loss.item()
mini = 20
if cnt % mini == mini-1: # print every 40 mini-batches
# print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
epoch_loss.append(running_loss/mini)
running_loss = 0.0
cnt=cnt+1
tr_loss.append(np.mean(epoch_loss))
if(np.mean(epoch_loss) <= 0.001):
break;
else:
print('[Epoch : %d] loss: %.3f' %(epoch + 1, np.mean(epoch_loss) ))
print('Finished Training')
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="6xlUH59C_Ee_" outputId="4b0dad9d-bd7f-4bc9-9ea5-d2a44ca07301"
plt.plot(tr_loss)
# + colab={"base_uri": "https://localhost:8080/"} id="INq6r_ld9vX1" outputId="8b0426e6-eb3e-4b39-c4e7-e08dbed63f53"
correct = 0
total = 0
count = 0
flag = 1
with torch.no_grad():
for data in train_loader:
inputs, labels = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %f %%' % ( desired_num , 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
# + colab={"base_uri": "https://localhost:8080/"} id="-HNFmO9Z9vX7" outputId="58040af6-f9a2-4d38-af9c-7cedb41f8268"
correct = 0
total = 0
count = 0
flag = 1
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = classify(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %f %%' % ( 10000 , 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
# + id="dSsZLhV39xS9"
| AAAI/Learnability/CIN/Linear/CIFAR/CIFAR_CIN_30k_cnn2_m_50.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
#some basic imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
from IPython.display import display
#TODO: add your imports here
from elasticsearch import Elasticsearch
from matplotlib.lines import Line2D
#seting up some styles
import seaborn as sns
sns.set_context("paper")
sns.set_style("whitegrid")
#avoid Font-problem in plots for ACM/IEEE papers
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
experiment_name = "DITAS-Performance-Benchmark"
#getting data
from load import load_elastic,load_experiment,load_vmstat,load_rmstats,_load_vmstat
use_cache=False
load_from_disk = False
es = Elasticsearch(host="172.16.17.32",
http_auth=('admin', 'ditasmetrics'),
scheme="http",
port=50035)
# -
to_download = [pd.Timestamp("2019-12-18")]
mon__cache_file="rmstats/rmstats_{}.csv".format(datetime.now().strftime("%d-%m-%Y"))
esData = load_elastic(False,True,mon__cache_file,es,to_download)
# +
data_cache_file="{}_{}.data.csv".format(experiment_name,datetime.now().strftime("%d-%m-%Y"))
data = load_experiment(use_cache,True,data_cache_file)
experiment_dates =list(map(lambda x:pd.Timestamp(x),data['runDate'].unique()))
esData = load_rmstats()
vstat__cache_file="{}_{}.vmstat.csv".format(experiment_name,datetime.now().strftime("%d-%m-%Y"))
vsData = load_vmstat(use_cache,True,vstat__cache_file)
esData["timestamp"] = pd.to_datetime(esData["@timestamp"],format='%Y-%m-%dT%H:%M:%S.%f',utc=True)
esData["timestamp"] = esData["timestamp"].dt.ceil(freq='s')
esData["timestamp"] = esData["timestamp"].dt.tz_convert(None)
data["timestamp"] = data["startTime"]+data["offset"].astype(int).apply(lambda x:pd.Timedelta(seconds=x))
vsData["timestamp"] = vsData["timestamp"] -pd.Timedelta(seconds=3600)
# +
def load(subjects,offset):
#select the exp we want to look at
exp = data[data["experiment"].isin(subjects)]
#remove data not from that exp
EStart,EEnd = exp["timestamp"].min(),exp["timestamp"].max()
expVMStats = vsData[vsData["timestamp"].between(EStart,EEnd)]
expRMStats = esData[esData["timestamp"].between(EStart,EEnd)]
#remove timeout errors (timeout of client is 120)
exp = exp[exp["response-time"] < 120]
#create a copy to work with and normilize the time across each view
exp = exp.copy()
expVMStats = expVMStats.copy()
expRMStats = expRMStats.copy()
exp["ETime"] = exp["timestamp"]-EStart
expVMStats["ETime"] = expVMStats["timestamp"]-EStart
expRMStats["ETime"] = expRMStats["timestamp"]-EStart+pd.Timedelta(seconds=offset)
exp["ETime"] = exp["ETime"].apply(lambda x:x.total_seconds())
expVMStats["ETime"] = expVMStats["ETime"].apply(lambda x:x.total_seconds())
expRMStats["ETime"] = expRMStats["ETime"].apply(lambda x:x.total_seconds())
return exp,expVMStats,expRMStats
def plot(subjects,name,offset,statistics,show_vrrl=True):
exp,expVMStats,expRMStats = load(subjects,offset)
exp["sliceID"] = np.round(exp["ETime"]/slice_size)
client_view = exp.groupby("sliceID")["response-time","size"].agg(["min","max","mean","sum"])
if len(expRMStats) > 0:
expRMStats["sliceID"] = np.round(expRMStats["ETime"]/slice_size)
expRMStats["requestTime_sec"] = np.round(expRMStats["request.requestTime"]/1000000000,2)
vdc_view = expRMStats.groupby("sliceID")["requestTime_sec","response.length"].agg(["min","max","mean","sum"])
else:
vdc_view = pd.DataFrame([],columns=pd.MultiIndex(levels=[['requestTime_sec', 'response.length'], ['min', 'max', 'mean', 'sum']],
codes=[[0, 0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 3, 0, 1, 2, 3]]))
expVMStats["sliceID"] = np.round(expVMStats["ETime"]/slice_size)
mashine_view = expVMStats.groupby("sliceID")[["cpu_user","free"]].mean()
fig = plt.figure(figsize=(15,6))
gs = fig.add_gridspec(ncols=3, nrows=1)
plt.suptitle("{} Results".format(name), fontsize=16,y=0.99)
ax = fig.add_subplot(gs[0, 0:2])
ax2 = ax.twinx()
ax.plot(client_view["response-time"]["mean"],label="CRRL",color=colors["CRRL"],zorder=2)
ax.fill_between(client_view.index,client_view["response-time"]["min"],client_view["response-time"]["max"],color=colors["CRRL"],alpha=0.4,zorder=1)
if len(vdc_view) > 0 and show_vrrl:
ax.plot(vdc_view["requestTime_sec"]["mean"],label="VRRL",color=colors["VRRL"],zorder=2)
ax.fill_between(vdc_view.index,vdc_view["requestTime_sec"]["min"],vdc_view["requestTime_sec"]["max"],color=colors["VRRL"],alpha=0.4,zorder=1)
ax.set_title("Client-View")
if "size" in client_view:
ax2.plot(client_view["size"]["mean"]/slice_size,label="Throughput",color=colors["Thr"],zorder=2)
ax.set_ylabel("Request-Response-Time [s]")
ax2.set_ylabel("Throughput [bytes/s]")
ax.set_xlabel("Time [s]")
ax.set_xticklabels([""])
legendNames = ["CRRL","VRRL","Throughput"]
legendShapes = [
Line2D([0], [0], color=colors["CRRL"], lw=2),
Line2D([0], [0], color=colors["VRRL"], lw=2),
Line2D([0], [0], color=colors["Thr"], lw=2)
]
ax.legend(legendShapes, legendNames, fancybox=True, shadow=True)
ax = fig.add_subplot(gs[0, 2])
ax2 = ax.twinx()
ax2.fill_between(mashine_view.index,mashine_view["free"],np.zeros(len(mashine_view["free"])),alpha=0.5,zorder=10,color=colors["MEM"])
sns.lineplot(mashine_view.index,mashine_view["free"],zorder=10,color=colors["MEM"],ax=ax2)
sns.lineplot(x=mashine_view.index,y=mashine_view["cpu_user"],ax=ax,color=colors["CPU"],zorder=1,linewidth=2)
ax.set_ylabel("CPU Usage [%]")
ax2.set_ylabel("Memory Usage [MB]")
ax.set_title("VDC-View")
ax.set_xlabel("Time [s]")
ax.set_xticklabels([""])
plt.tight_layout()
fig.savefig("images/{}.pdf".format(name), bbox_inches="tight")
fig.savefig("images/{}.png".format(name), bbox_inches="tight")
if len(vdc_view) > 0:
transport_latency = exp["response-time"].mean()-expRMStats["requestTime_sec"].mean()
rmMean = expRMStats["requestTime_sec"].mean()
p90 = expRMStats["requestTime_sec"].quantile(0.9)
else:
transport_latency = float("nan")
rmMean = float("nan")
p90 = float("nan")
df = [name,np.round(transport_latency,2),exp["response-time"].mean(),exp["response-time"].quantile(0.9),rmMean,p90]
if (statistics is None):
statistics = [df]
else:
statistics.append(df)
return statistics
# +
colors = {
"CRRL":"#448ee4",
"VRRL":"#25a36f",
"MEM":"#89a203",
"CPU":"#000000",
"Thr":"#555555"
}
experiments = {
"B1":(["B1"],325),
"B2":(["B3A","B3B"],325),
"B3":(["B4A","B4B"],325),
"B5":(["B7"],325),
"B6":(["B8"],325),
"B7":(["B6"],325),
"B8":(["B10"],325),
"B9":(["B11"],325),
}
slice_size = 60
# -
statistics = None
for s in ["B1","B2","B3"]:
statistics = plot(experiments[s][0],s,experiments[s][1],statistics)
statistics = pd.DataFrame(statistics,columns=["name","Mean TL","Mean CRRL","p90 CRRL","Mean VRRL","p90 VRRL"])
statistics
# +
import math
four_two = {"with":["B2","B7"],"without":["B5","B6"]}
statistics = []
for l in four_two:
for e in four_two[l]:
exp,expVMStats,expRMStats = load(*experiments[e])
a = exp[["status-code","timestamp","ETime","response-time"]]
r_100 = a["status-code"].count()
a = a[a["status-code"] == 200][:100]
start = a["ETime"].min()
end = a["ETime"].max()
total_response_time = end-start
if len(a) == 0:
total_response_time = 0
statistics.append([l,e,total_response_time,r_100,a["response-time"].mean()])
statistics = pd.DataFrame(statistics,columns=["type","name","TRT","R100","mean R"])
statistics.groupby("type")["TRT","R100","mean R"].mean()
# -
exp,expVMStats,_ = load(*experiments["B9"])
expVMStats = _load_vmstat(pd.read_csv("vmstats/load_2019_12_17.csv"))
B9 = expVMStats[expVMStats.timestamp.between(exp.timestamp.min()-pd.Timedelta(minutes=3),exp.timestamp.max()+pd.Timedelta(minutes=2))]
B9 = B9.copy()
B9['EStart'] = B9['timestamp']-B9['timestamp'].min()
B9['EStart'] = B9['EStart'].apply(lambda x:x.total_seconds())
#B9[["cpu_user","free"]].agg(["mean","max"])
# +
exp,expVMStats,_ = load(*experiments["B8"])
expVMStats = _load_vmstat(pd.read_csv("vmstats/load_2019_12_17_withMon.csv"))
B8 = expVMStats[expVMStats.timestamp.between(exp.timestamp.min()-pd.Timedelta(minutes=3),exp.timestamp.max()+pd.Timedelta(minutes=2))]
B8 = B8.copy()
B8["cpu_user"] *= 0.25
B8['EStart'] = B8['timestamp']-B8['timestamp'].min()
B8['EStart'] = B8['EStart'].apply(lambda x:x.total_seconds())
# -
B8["slice_id"] = np.round(B8["EStart"]/10)
B8 = B8[B8["slice_id"] < 9]
B9["slice_id"] = np.round(B8["EStart"]/10)
B9 = B9[B9["slice_id"] < 9]
# +
fig, ax = plt.subplots(figsize=(12,8))
X = B8.groupby("slice_id")["cpu_user"].mean()
sns.lineplot(data=X,ax=ax,label="with Monitoring")
Y = B9.groupby("slice_id")["cpu_user"].mean()
sns.lineplot(data=Y,ax=ax,label="without Monitoring")
#sns.lineplot(x="cpu_user",y=range(0,len(B9)),data=B9,ax=ax,label="without Monitoring")
ax.hlines(10,xmin=B9["slice_id"].min(),xmax=B9["slice_id"].max(),label="10% cutoff",color="r")
ax.set_xlabel("Time")
ax.set_xticks([])
ax.set_ylabel("CPU Usage [%]")
X = B8[["cpu_user","free"]].mean()
Y = B9[["cpu_user","free"]].mean()
display(X)
display(Y)
| Analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:halomod]
# language: python
# name: conda-env-halomod-py
# ---
# # Going beyond galaxies as tracers with `halomod`
# `halomod` is written in a way that is most native to applications of halo models of galaxies. Therefore, modifications and extensions in the context of galaxy clustering (as well as HI assuming HI is trivially related to galaxies) are very straightforward. However, it may not be as straightforward when dealing with other tracers. In this tutorial, we use the flux density power spectrum of [arxiv:0906.3020](https://arxiv.org/abs/0906.3020) to demonstrate how to fully utilise the flexibility of `halomod`.
# The flux density power spectrum can modelled as (see Sec 2.3 of [arxiv:0906.3020](https://arxiv.org/abs/0906.3020)):
# $$
# P_{1h}(k) = |u_J(k)|^2 \int_{M_{\rm min}}^{\infty} {\rm d}m\, n(m) \bigg(\frac{m}{\bar{\rho}_{\rm gal}}\bigg)^2
# $$
#
# $$
# P_{2h}(k)=|u_J(k)|^2\bigg[\int_{M_{\rm min}}^{\infty}{\rm d}m\,n(m)b(m)\Big(\frac{m}{\bar{\rho}_{\rm gal}}\Big)\bigg]^2 P_{\rm lin}(k)
# $$
#
# where $u_J(k)={\rm arctan}(k\lambda_{\rm mfp})/(k\lambda_{\rm mfp})$
# ## HOD
# Once we have the expression of the power spectrum we want, we should try to identify the halo model components. Comparing it to the standard halo model formalism, it's easy to see that it effectively means:
# $$
# \langle M_{\rm cen}\rangle \equiv 0
# $$
#
# $$
# \langle M_{\rm sat}\rangle \equiv A_{\rm sat}
# $$
#
# where $A_{\rm sat}$ is a constant so that the total satellite occupation is equal to the mean mass density of galaxies:
#
# $$
# \int_{M_{\rm min}} {\rm d}m\, n(m)A = \bar{\rho}_{\rm gal}
# $$
#
# This HOD has already been defined within `halomod` by the `Constant` HOD class:
from halomod import TracerHaloModel
import numpy as np
from matplotlib import pyplot as plt
hm = TracerHaloModel(hod_model="Constant", transfer_model='EH')
hm.central_occupation
plt.plot(np.log10(hm.m),hm.satellite_occupation)
# ## Density Profile
#
# The density profile from [arxiv:0906.3020](https://arxiv.org/abs/0906.3020) is already included as `PowerLawWithExpCut`:
#
# $$
# \rho(r) = \rho_s \big(r/r_s \big)^{-b}{\rm exp}\big[-a r/r_s\big]
# $$
#
# and in this specific case we have $b=2$.
#
# However, the native way of defining density profile in `halomod` is to relate it to the characteristic scale $r_s$, which is related to the concentration parameter. Therefore, for each halo of different mass the shape of the density profile is different. But in this case we want to keep the shape of the profile the same for all halos. Although `halomod` does not provide a readily available solution, note:
#
# $$
# m \sim r_s^3c^3(m,z)
# $$
#
# $$
# r_s \sim m^{1/3}c^{-1}(m,z)
# $$
#
# Therefore, we only need to define a special concentration-mass relation to keep $r_s$ constant. Suppose we construct a C-M relation:
from halomod.concentration import CMRelation
from hmf.halos.mass_definitions import SOMean
class CMFlux(CMRelation):
_defaults = {'c_0': 4}
native_mdefs = (SOMean(),)
def cm(self,m,z):
return self.params['c_0']*(m*10**(-11))**(1/3)
hm = TracerHaloModel(
halo_concentration_model = CMFlux,
halo_profile_model = "PowerLawWithExpCut",
halo_profile_params = {"b":2.0,"a":1.0},
hod_model = "Constant",
transfer_model='EH',
)
plt.plot(np.log10(hm.k_hm),hm.tracer_profile.u(hm.k_hm,m=1e12), label='$m = 10^{12}$')
plt.plot(np.log10(hm.k_hm),hm.tracer_profile.u(hm.k_hm,m=1e13), label='$m = 10^{13}$')
plt.plot(np.log10(hm.k_hm),hm.tracer_profile.u(hm.k_hm,m=1e14), label='$m = 10^{14}$')
plt.legend();
# One can see that indeed the density profile is now independant of halo mass
# ## Tuning parameters
# So far the parameters are randomly set without clear physical meanings. We can easily tune these parameters to desired physical values.
#
# Suppose we want the mass density of galaxies to be $10^{-2}$ of the total critical density:
rhoc = hm.cosmo.critical_density0.to("Msun/Mpc^3").value*hm.cosmo.h**2
hm.mean_tracer_den/rhoc
# That means the parameter `logA` for the HOD should be changed to:
-np.log10(hm.mean_tracer_den/rhoc)
# We can simply set this on the existing model (everything that's dependent on it will be auto-updated):
hm.hod_params = {"logA":-np.log10(hm.mean_tracer_den/rhoc)}
hm.mean_tracer_den/rhoc
# The density profile should satisfy $r_s/a = \lambda_{\rm mfp}$. $r_s$ can be obtained as:
rs = hm.halo_profile.scale_radius(1e11)
print(rs)
# Just to make sure, we calculate $r_s$ for a different halo mass:
hm.halo_profile.scale_radius(1e12)
# in the units of Mpc/h. Assume we want $\lambda_{\rm mfp} = 10$Mpc/h:
hm.halo_profile_params = {"a":rs/10}
# Check the density profile to see the cut-off:
plt.plot(hm.k_hm,hm.tracer_profile.u(hm.k_hm,m=1e12))
plt.xlabel("Scale [h/Mpc]")
plt.ylabel("Normalized Fourier Density")
plt.xscale('log')
# You can see it's indeed around 0.1 Mpc$^{-1}$h
# Finally we can see the power spectrum:
plt.plot(hm.k_hm,hm.power_1h_auto_tracer, ls='--', color='C0', label='1halo')
plt.plot(hm.k_hm,hm.power_2h_auto_tracer, ls=':', color='C0', label='2halo')
plt.plot(hm.k_hm,hm.power_auto_tracer, color='C0', label='full')
plt.legend()
plt.xscale('log')
plt.yscale('log')
plt.ylim(1e-5,)
plt.xlabel("Fourier Scale, $k$")
plt.ylabel("Auto Power Spectrum")
| docs/examples/beyond_galaxy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from tika import parser
import camelot
import os
import pickle
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
folder = 'F:/Environmental Baseline Data/Version 4 - Final/PDF'
files_in_folder = os.listdir(path)
files_in_folder_path = [path + '/' + str(x) for x in files_in_folder]
print('number of CSVs in folder - ' + str(len(files_in_folder_path)))
files_in_folder_path
# ## Dump to pickle
for x in files_in_folder_path[1:2]:
DataID = x.replace('F:/Environmental Baseline Data/Version 4 - Final/PDF/', '').replace('.pdf', '')
tables = camelot.read_pdf(x, pages = 'all', flag_size=True, copy_text=['v'], line_scale=40)
open_string = 'F:/Environmental Baseline Data/Version 4 - Final/Camelot - Pickles/camelot_' + DataID + '.pkl'
with open(open_string, 'wb') as f:
pickle.dump(tables, f)
tables
len(tables)
# ## Load from pickle
with open('F:/Environmental Baseline Data/Version 4 - Final/Camelot - Pickles/camelot_1059614.pkl', 'rb') as f:
tables2 = pickle.load(f)
tables2
| Camelot to pickle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %load_ext rpy2.ipython
# ## Growth rate in the following two blocks fwas calculated using the formula in the manuscript.
# + language="R"
# library(ggplot2)
# theme=theme_classic()+
# theme(plot.title = element_text(size = 28,hjust=0.5,face="bold"),
# axis.title.y = element_text(size = 24),
# axis.title.x = element_text(size = 24),
# axis.text.x = element_text(size = 20),
# axis.text.y = element_text(size = 20))
# + language="R"
# data1<-read.delim("S4B_data1.tab", header=TRUE)
# growRate=rep(-1,27)
# doublingTime=rep(-1,27)
# for (i in seq(1,27)){
# tmp=data1[,2*i][!is.na(data1[,2*i])]
# len=length(tmp)
#
# init_time=data1[1,2*i-1]
# term_time=data1[len,2*i-1]
#
# init_OD=data1[1,2*i]
# term_OD=data1[len,2*i]
#
# growRate[i]=log(term_OD/init_OD)/(term_time-init_time)
# doublingTime[i]=log(2)/growRate[i]
# }
#
# tmp1=data.frame(x=seq(0,312,12),y=growRate)
# p1=ggplot(tmp1,aes(x,y))
# q1 = p1 + geom_point(size=c(5,rep(3,26)),pch=c(17,rep(16,26)),col=c("red",rep("black",26))) +
# labs(x="Time (hour)", y="Growth rate", title="Turbidostat replicate R7 fitness") +
# geom_text(label=c("0 h",rep("",25),"312 h"),vjust = 1.5,size=6) +
# theme +
# theme(plot.title = element_text(hjust=0.8)) +
# theme(axis.title.x = element_text(hjust=0.6)) +
# scale_x_continuous(limits=c(0,324),breaks = seq(0,324,36)) +
# scale_y_continuous(limits = c(0,0.016),breaks = seq(0,0.016,0.004))
# + language="R"
# data2<-read.delim("S4B_data2.tab", header=TRUE)
# growRate2=rep(-1,12)
# doublingTime2=rep(-1,12)
# for (i in seq(1,12)){
# tmp=data2[,2*i][!is.na(data2[,2*i])]
# len=length(tmp)
#
# init_time=data2[1,2*i-1]
# term_time=data2[len,2*i-1]
#
# init_OD=data2[1,2*i]
# term_OD=data2[len,2*i]
#
# growRate2[i]=log(term_OD/init_OD)/(term_time-init_time)
# doublingTime2[i]=log(2)/growRate2[i]
# }
#
# tmp2=data.frame(x=rep(1:2,each=3),y=growRate2[7:12])
# p2=ggplot(tmp2,aes(x,y))
# q2=p2 + geom_point(size=5,pch=17,col="red") +
# labs(x="", y="", title="") +
# annotate("text", label = c("0 h","318 h"), x=c(1,2), y=c(0.006,0.0097), size=6,vjust=9) +
# theme +
# scale_x_discrete(limits=c("0","318")) +
# scale_y_continuous(limits = c(0,0.016),breaks = seq(0,0.016,0.004))
# + language="R"
# library(grid)
# pdf("Figure_S4B.pdf",height=8.5, width=11)
# vp1<-viewport(width=0.7,height=1,x=0.4,y=0.5)
# vp2<-viewport(width=0.25,height=1,x=0.85,y=0.5)
# print(q2,vp=vp2)
# print(q1,vp=vp1)
# dev.off()
# -
| Figure_S4/Figure_S4B.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get Molecular Libraries
#
# - `obabel` is assumed to be installed: https://anaconda.org/openbabel/openbabel
# - Molecules will be exported into `sdf` and `mol2` formats for loading with `rdkit` and docking with [SMINA](https://sourceforge.net/projects/smina/), respectively.
import wget
import gzip
import shutil
import tarfile
import pandas as pd
from glob import glob
import subprocess as sp
from pathlib import Path
LIB_DIR = './datasets/'
# <h4 style='color: black; background-color: #F9E5AB; padding: 5px;'>
# Important!
# </h4>
#
# - The `prot_name` is used to download the specific protein target's dataset.
# - To proceed with a new protein different to CDK2, FXa, EGFR, and HSP90, please double check the name of the protein as it appears in the respective dataset.
prot_name = 'fxa'
# ## DEKOIS 2.0
#
# Downloaded from: http://www.pharmchem.uni-tuebingen.de/dekois/
# +
# Datasets url
DEKOIS_URL = 'http://www.pharmchem.uni-tuebingen.de/dekois/data/'
dekois_actives_url = f'{DEKOIS_URL}/DEKOIS2_actives/{prot_name.upper()}.sdf.gz'
dekois_decoys_url = f'{DEKOIS_URL}/DEKOIS2_decoys/{prot_name.upper()}_Celling-v1.12_decoyset.sdf.gz'
# Output directory
DEKOIS_DIR = f'{LIB_DIR}/DEKOIS2/'
Path(DEKOIS_DIR).mkdir(parents = True, exist_ok = True)
# Download and extract each file
for lib_set in [dekois_actives_url, dekois_decoys_url]:
# Download it
set_path = Path(DEKOIS_DIR, lib_set.split('/')[-1])
if not set_path.exists():
wget.download(lib_set, out = DEKOIS_DIR)
# Extract the files
with gzip.open(str(set_path)) as f_in, \
open(str(set_path).split('.gz')[0], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# Create the output directories
Path(f'{DEKOIS_DIR}/mol2').mkdir(exist_ok = True)
Path(f'{DEKOIS_DIR}/sdf').mkdir(exist_ok = True)
# + magic_args="-s $DEKOIS_DIR " language="bash"
# # Split the molecules into individual files
#
# # MOL2 Files
# # Actives
# obabel $1/FXA.sdf -O $1/mol2/ligand_.mol2 -m
# # Decoys
# obabel $1/FXA_Celling-v1.12_decoyset.sdf -O $1/mol2/decoy_.mol2 -m
#
# # SDF Files
# # Actives
# obabel $1/FXA.sdf -O $1/sdf/ligand_.sdf -m
# # Decoys
# obabel $1/FXA_Celling-v1.12_decoyset.sdf -O $1/sdf/decoy_.sdf -m
# -
# ## DUD-2006
#
# Downloaded from: http://dud.docking.org/inhibox.html
# +
# Datasets url
DUD_URL = 'http://dud.docking.org/inhibox/allDUDfiles_Gasteiger.tar.gz'
# Output directory
DUD_DIR = f'{LIB_DIR}/DUD/'
Path(DUD_DIR).mkdir(exist_ok = True)
# Download the file
set_path = Path(DUD_DIR, DUD_URL.split('/')[-1])
if not set_path.exists():
wget.download(DUD_URL, out = DUD_DIR)
# Extract the molecules file
with tarfile.open(str(set_path)) as t:
# ligands
ligands_file = f'{prot_name}_ligands_Gasteiger.mol2'
f_in = t.extract(f'allDUDfiles_Gasteiger/{ligands_file}', path = DUD_DIR)
# decoys
decoys_file = f'{prot_name}_decoys_Gasteiger.mol2'
f_in = t.extract(f'allDUDfiles_Gasteiger/{decoys_file}', path = DUD_DIR)
# Create the output directories
Path(f'{DUD_DIR}/mol2').mkdir(exist_ok = True)
Path(f'{DUD_DIR}/sdf').mkdir(exist_ok = True)
# + magic_args="-s $DUD_DIR/allDUDfiles_Gasteiger $ligands_file $decoys_file" language="bash"
# # Split the molecules into individual files
#
# # MOL2 Files
# # Actives
# obabel $1/$2 -O $1/../mol2/ligand_.mol2 -m --gen3d
# # Decoys
# obabel $1/$3 -O $1/../mol2/decoy_.mol2 -m --gen3d
#
# # SDF Files
# # Actives
# obabel $1/$2 -O $1/../sdf/ligand_.sdf -m --gen3d
# # Decoys
# obabel $1/$3 -O $1/../sdf/decoy_.sdf -m --gen3d
# -
# ## Cocrystalized molecules
# Molecules obtained with the notebook `../1_Download_and_prepare_protein_ensembles/5_Get_cocrystalized_molecules_from_PDB`.
# +
# Cocrystalized ligands directory
DIR_MAIN = '../1_Download_and_prepare_protein_ensembles/pdb_structures'
DIR_PREP_LIGS = f'{DIR_MAIN}/pocket_ligands'
# Output directory
COCRYS_DIR = f'{LIB_DIR}/COCRYS/'
Path(COCRYS_DIR).mkdir(exist_ok = True)
# List all available files and create a
# dictionary with the molecule name as key
# Duplicates will be removed in futher notebooks
list_of_files = sorted(glob(f'{DIR_PREP_LIGS}/*pdb'))
cocrys_mols = {file.split('/')[-1].split('.pdb')[0]: file
for file in list_of_files
}
print(f'{len(cocrys_mols)} unique compounds of ' +\
f'{len(list_of_files)} cocrystalized molecules')
# MOL2 Files
Path(f'{COCRYS_DIR}/mol2').mkdir(exist_ok = True)
# Convert the molecules to mol2 using obabel
for name, file in cocrys_mols.items():
sp.run(
f'''
obabel -ipdb {file} \
-omol2 -O {COCRYS_DIR}/mol2/{name}.mol2 \
-p 7.0 --gen3d --partialcharge gasteiger
''',
shell = True
)
# SDF Files
Path(f'{COCRYS_DIR}/sdf').mkdir(exist_ok = True)
# Convert the molecules to sdf using obabel
for name, file in cocrys_mols.items():
sp.run(
f'''
obabel -ipdb {file} \
-osdf -O {COCRYS_DIR}/sdf/{name}.sdf \
-p 7.0 --gen3d --partialcharge gasteiger
''',
shell = True
)
# -
# Finished!
| fxa/2_Molecular_libraries/1_Get_Molecular_libraries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting Started with Qiskit
#
# Here, we provide an overview of working with Qiskit. The fundamental package of Qiskit is Terra that provides the basic building blocks necessary to program quantum computers. The fundamental unit of Qiskit is the [**quantum circuit**](https://en.wikipedia.org/wiki/Quantum_circuit). A basic workflow using Qiskit consists of two stages: **Build** and **Execute**. **Build** allows you to make different quantum circuits that represent the problem you are solving, and **Execute** that allows you to run them on different backends. After the jobs have been run, the data is collected and postprocessed depending on the desired output.
import numpy as np
from qiskit import *
# %matplotlib inline
# ## Circuit Basics <a name='basics'></a>
#
#
# ### Building the circuit
#
# The basic element needed for your first program is the QuantumCircuit. We begin by creating a `QuantumCircuit` comprised of three qubits.
# Create a Quantum Circuit acting on a quantum register of three qubits
circ = QuantumCircuit(3)
# After you create the circuit with its registers, you can add gates ("operations") to manipulate the registers. As you proceed through the tutorials you will find more gates and circuits; below is an example of a quantum circuit that makes a three-qubit GHZ state
#
# $$|\psi\rangle = \left(|000\rangle+|111\rangle\right)/\sqrt{2}.$$
#
# To create such a state, we start with a three-qubit quantum register. By default, each qubit in the register is initialized to $|0\rangle$. To make the GHZ state, we apply the following gates:
# * A Hadamard gate $H$ on qubit 0, which puts it into the superposition state $\left(|0\rangle+|1\rangle\right)/\sqrt{2}$.
# * A controlled-Not operation ($C_{X}$) between qubit 0 and qubit 1.
# * A controlled-Not operation between qubit 0 and qubit 2.
#
# On an ideal quantum computer, the state produced by running this circuit would be the GHZ state above.
#
# In Qiskit, operations can be added to the circuit one by one, as shown below.
# Add a H gate on qubit 0, putting this qubit in superposition.
circ.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state.
circ.cx(0, 1)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 2, putting
# the qubits in a GHZ state.
circ.cx(0, 2)
# ## Visualize Circuit <a name='visualize'></a>
#
# You can visualize your circuit using Qiskit `QuantumCircuit.draw()`, which plots the circuit in the form found in many textbooks.
circ.draw()
# In this circuit, the qubits are put in order, with qubit zero at the top and qubit two at the bottom. The circuit is read left to right (meaning that gates that are applied earlier in the circuit show up further to the left).
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> If you don't have matplotlib set up as your default in '~/.qiskit/settings.conf' it will use a text-based drawer over matplotlib. To set the default to matplotlib, use the following in the settings.conf
#
# [default]
# circuit_drawer = mpl
#
# For those that want the full LaTeX experience, you can also set the circuit_drawer = latex.
#
# </div>
#
#
# ## Simulating circuits using Qiskit Aer <a name='simulation'></a>
#
# Qiskit Aer is our package for simulating quantum circuits. It provides many different backends for doing a simulation. There is also a basic, Python only, implementation called `BasicAer` in Terra that can be used as a drop-in replacement for `Aer` in the examples below.
#
# ### Statevector backend
#
# The most common backend in Qiskit Aer is the `statevector_simulator`. This simulator returns the quantum
# state, which is a complex vector of dimensions $2^n$, where $n$ is the number of qubits
# (so be careful using this as it will quickly get too large to run on your machine).
# <div class="alert alert-block alert-info">
#
#
# When representing the state of a multi-qubit system, the tensor order used in Qiskit is different than that used in most physics textbooks. Suppose there are $n$ qubits, and qubit $j$ is labeled as $Q_{j}$. Qiskit uses an ordering in which the $n^{\mathrm{th}}$ qubit is on the <em><strong>left</strong></em> side of the tensor product, so that the basis vectors are labeled as $Q_n\otimes \cdots \otimes Q_1\otimes Q_0$.
#
# For example, if qubit zero is in state 0, qubit 1 is in state 0, and qubit 2 is in state 1, Qiskit would represent this state as $|100\rangle$, whereas many physics textbooks would represent it as $|001\rangle$.
#
# This difference in labeling affects the way multi-qubit operations are represented as matrices. For example, Qiskit represents a controlled-X ($C_{X}$) operation with qubit 0 being the control and qubit 1 being the target as
#
# $$C_X = \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 0 \\\end{pmatrix}.$$
#
# </div>
#
# To run the above circuit using the statevector simulator, first you need to import Aer and then set the backend to `statevector_simulator`.
# +
# Import Aer
from qiskit import Aer
# Run the quantum circuit on a statevector simulator backend
backend = Aer.get_backend('statevector_simulator')
# -
# Now that we have chosen the backend, it's time to compile and run the quantum circuit. In Qiskit we provide the `execute` function for this. ``execute`` returns a ``job`` object that encapsulates information about the job submitted to the backend.
#
#
# <div class="alert alert-block alert-info">
# <b>Tip:</b> You can obtain the above parameters in Jupyter. Simply place the text cursor on a function and press Shift+Tab.
# </div>
# Create a Quantum Program for execution
job = execute(circ, backend)
# When you run a program, a job object is made that has the following two useful methods:
# `job.status()` and `job.result()`, which return the status of the job and a result object, respectively.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> Jobs run asynchronously, but when the result method is called, it switches to synchronous and waits for it to finish before moving on to another task.
# </div>
result = job.result()
# The results object contains the data and Qiskit provides the method
# `result.get_statevector(circ)` to return the state vector for the quantum circuit.
outputstate = result.get_statevector(circ, decimals=3)
print(outputstate)
# Qiskit also provides a visualization toolbox to allow you to view these results.
#
# Below, we use the visualization function to plot the real and imaginary components of the state density matrix \rho.
from qiskit.visualization import plot_state_city
plot_state_city(outputstate)
# ### Unitary backend
# Qiskit Aer also includes a `unitary_simulator` that works _provided all the elements in the circuit are unitary operations_. This backend calculates the $2^n \times 2^n$ matrix representing the gates in the quantum circuit.
# +
# Run the quantum circuit on a unitary simulator backend
backend = Aer.get_backend('unitary_simulator')
job = execute(circ, backend)
result = job.result()
# Show the results
print(result.get_unitary(circ, decimals=3))
# -
# ### OpenQASM backend
# The simulators above are useful because they provide information about the state output by the ideal circuit and the matrix representation of the circuit. However, a real experiment terminates by _measuring_ each qubit (usually in the computational $|0\rangle, |1\rangle$ basis). Without measurement, we cannot gain information about the state. Measurements cause the quantum system to collapse into classical bits.
#
# For example, suppose we make independent measurements on each qubit of the three-qubit GHZ state
# $$|\psi\rangle = |000\rangle +|111\rangle)/\sqrt{2},$$
# and let $xyz$ denote the bitstring that results. Recall that, under the qubit labeling used by Qiskit, $x$ would correspond to the outcome on qubit 2, $y$ to the outcome on qubit 1, and $z$ to the outcome on qubit 0.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> This representation of the bitstring puts the most significant bit (MSB) on the left, and the least significant bit (LSB) on the right. This is the standard ordering of binary bitstrings. We order the qubits in the same way, which is why Qiskit uses a non-standard tensor product order.
# </div>
#
# Recall the probability of obtaining outcome $xyz$ is given by
# $$\mathrm{Pr}(xyz) = |\langle xyz | \psi \rangle |^{2}$$ and as such for the GHZ state probability of obtaining 000 or 111 are both 1/2.
#
# To simulate a circuit that includes measurement, we need to add measurements to the original circuit above, and use a different Aer backend.
# +
# Create a Quantum Circuit
meas = QuantumCircuit(3, 3)
meas.barrier(range(3))
# map the quantum measurement to the classical bits
meas.measure(range(3),range(3))
# The Qiskit circuit object supports composition using
# the addition operator.
qc = circ+meas
#drawing the circuit
qc.draw()
# -
# This circuit adds a classical register, and three measurements that are used to map the outcome of qubits to the classical bits.
#
# To simulate this circuit, we use the ``qasm_simulator`` in Qiskit Aer. Each run of this circuit will yield either the bitstring 000 or 111. To build up statistics about the distribution of the bitstrings (to, e.g., estimate $\mathrm{Pr}(000)$), we need to repeat the circuit many times. The number of times the circuit is repeated can be specified in the ``execute`` function, via the ``shots`` keyword.
# +
# Use Aer's qasm_simulator
backend_sim = Aer.get_backend('qasm_simulator')
# Execute the circuit on the qasm simulator.
# We've set the number of repeats of the circuit
# to be 1024, which is the default.
job_sim = execute(qc, backend_sim, shots=1024)
# Grab the results from the job.
result_sim = job_sim.result()
# -
# Once you have a result object, you can access the counts via the function `get_counts(circuit)`. This gives you the _aggregated_ binary outcomes of the circuit you submitted.
counts = result_sim.get_counts(qc)
print(counts)
# Approximately 50 percent of the time, the output bitstring is 000. Qiskit also provides a function `plot_histogram`, which allows you to view the outcomes.
from qiskit.visualization import plot_histogram
plot_histogram(counts)
# The estimated outcome probabilities $\mathrm{Pr}(000)$ and $\mathrm{Pr}(111)$ are computed by taking the aggregate counts and dividing by the number of shots (times the circuit was repeated). Try changing the ``shots`` keyword in the ``execute`` function and see how the estimated probabilities change.
# ## Running circuits from the IBM Q account <a name='ibmq'></a>
#
# To facilitate access to real quantum computing hardware, we have provided a simple API interface.
# To access IBM Q devices, you'll need an API token. You can generate, or view, your API token [here](https://quantum-computing.ibm.com/account) (create an account if you don't already have one).
#
# Your IBM Q account lets you run your circuit on real devices or on our cloud simulator. Basic account usage can be seen in the examples below. For more detailed instructions on using the IBM Q account.
# +
#from qiskit import IBMQ
# -
# After generating your API token, call: `IBMQ.save_account('MY_TOKEN')`.
#
# This will store your IBM Q credentials in a local file. Unless your registration information has changed, you only need to do this once. You may now load your accounts by calling,
# +
#IBMQ.load_account()
# -
# Once your account has been loaded, you have one or more providers available to you
# +
#IBMQ.providers()
# -
# Each provider gives access to a selection of services (e.g. Backends) that is authorized by your account. To see the backends available to a given provider, first select the provider by hub, group, project, or a combination thereof:
# +
#provider = IBMQ.get_provider(group='open')
# -
# then ask the provider to list its backends:
# +
#provider.backends()
# -
# ### Running circuits on real devices
#
# Today's quantum information processors are small and noisy, but are advancing at a fast pace. They provide a great opportunity to explore what [noisy, intermediate-scale quantum (NISQ)](https://arxiv.org/abs/1801.00862) computers can do.
# Let us now grab a backend from the provider on which to run our quantum circuit:
# +
#backend = provider.get_backend('ibmq_essex')
# -
# To run the circuit on the given device we use `execute`. Sometimes the devices are busy with jobs from other users. Your job is added to the list of pending jobs called the queue, and executed in this queue order. To monitor the status of our job through the process, we can use the `job_monitor`
# <div class="alert alert-block alert-warning">
# <b>Info:</b> The <code>execute</code> functions does much more than just send your circuit(s) to a backend. This functionality can be explored in <a href=5_using_the_transpiler.ipynb>Part 5: Using the transpiler.</a></div>
# +
#from qiskit.tools.monitor import job_monitor
#job_exp = execute(qc, backend=backend)
#job_monitor(job_exp)
# -
# ``job_exp`` has a ``.result()`` method that lets us get the results from running our circuit.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> When the .result() method is called, the code block will wait until the job has finished before releasing the cell.
# </div>
# +
#result_exp = job_exp.result()
# -
# Like before, the counts from the execution can be obtained using ```get_counts(qc)```
# +
#counts_exp = result_exp.get_counts(qc)
#plot_histogram([counts_exp,counts], legend=['Device', 'Simulator'])
# -
# ### Simulating circuits using the IBM Q cloud simulator
#
# The IBM Q provider also comes with a remote optimized simulator called ``ibmq_qasm_simulator``. This remote simulator is capable of simulating up to 32 qubits. It can be used the same way as the remote real backends.
# +
#simulator_backend = provider.get_backend('ibmq_qasm_simulator')
# +
#job_cloud = execute(qc, backend=simulator_backend)
# +
#result_cloud = job_cloud.result()
# +
#counts_cloud = result_cloud.get_counts(qc)
#plot_histogram(counts_cloud)
# -
# ### Retrieving a previously run job
#
# If your experiment takes longer to run then you have time to wait around, or if you simply want to retrieve old jobs, the IBM Q backends allow you to do that.
# First, you would need to note your job's ID:
# +
#job_id = job_exp.job_id()
#print('JOB ID: {}'.format(job_id))
# -
# Given a job ID, that job object can be later reconstructed from the backend using ```retrieve_job```:
# +
#retrieved_job = backend.retrieve_job(job_id)
# -
# and then the results can be obtained from the new job object.
# +
#retrieved_job.result().get_counts(qc)
# -
import qiskit.tools.jupyter
# %qiskit_version_table
# %qiskit_copyright
| docs/tutorials/fundamentals/1_getting_started_with_qiskit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The Black-Scholes model was first introduced by <NAME> and <NAME> in 1973 in the paper "The Pricing of Options and Corporate Liabilities". Since being published, the model has become a widely used tool by investors and is still regarded as one of the best ways to determine fair prices of options.
# The purpose of the model is to determine the price of a vanilla European call and put options (option that can only be exercised at the end of its maturity) based on price variation over time and assuming the asset has a lognormal distribution.
# ## Assumptions
# To determine the price of vanilla European options, several assumptions are made:
# * European options can only be exercised at expiration
# * No dividends are paid during the option's life
# * Market movements cannot be predicted
# * The risk-free rate and volatility are constant
# * Follows a lognormal distribution
# ## Non-Dividend Paying Black-Scholes Formula
# In Black-Scholes formulas, the following parameters are defined.
# * $S$, the spot price of the asset at time $t$
# * $T$, the maturity of the option. Time to maturity is defined as $T - t$
# * $K$, strike price of the option
# * $r$, the risk-free interest rate, assumed to be constant between $t$ and $T$
# * $\sigma$, volatility of underlying asset, the standard deviation of the asset returns
# #### $N(d)$ is the cumulative distribution of the standard normal variable Z
# $$N(d) = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^d e^{-\frac{1}{2}x^2} dx$$
# $C(S,t)$ is the value at time $t$ of a call option and $P(S,t)$ is the value at time $t$ of a put option.
# The Black-Scholes call formula is given as:
# $$C(S,t) = SN(d_1) - Ke^{-r(T - t)} N(d_2)$$
# The put formula is given:
# $$P(S,t) = Ke^{-r(T - t)}N(-d_2) - SN(-d_1)$$
# Where:
# $$d_1 = \frac{\ln \left(\frac{S}{K} \right) + \left(r + \frac{\sigma^2}{2} \right)(T - t)}{\sigma \sqrt{T - t}}$$
# $$d_2 = d_1 - \sigma \sqrt{T - t} = \frac{\ln \left(\frac{S}{K} \right) + \left(r - \frac{\sigma^2}{2}\right)(T - t)}{\sigma \sqrt{T - t}}$$
# ## Python Implementation of Black-Scholes formula for non-dividend paying options
import numpy as np
import scipy.stats as si
import sympy as sy
from sympy.stats import Normal, cdf
from sympy import init_printing
init_printing()
def euro_vanilla_call(S, K, T, r, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
call = (S * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
return call
euro_vanilla_call(50, 100, 1, 0.05, 0.25)
def euro_vanilla_put(S, K, T, r, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
put = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * si.norm.cdf(-d1, 0.0, 1.0))
return put
euro_vanilla_put(50, 100, 1, 0.05, 0.25)
# The next function can be called with 'call' or 'put' for the option parameter to calculate the desired option
def euro_vanilla(S, K, T, r, sigma, option = 'call'):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
if option == 'call':
result = (S * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
if option == 'put':
result = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * si.norm.cdf(-d1, 0.0, 1.0))
return result
euro_vanilla(50, 100, 1, 0.05, 0.25, option = 'put')
# Sympy implementation for Exact Results
def euro_call_sym(S, K, T, r, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
N = Normal('x', 0.0, 1.0)
d1 = (sy.ln(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
d2 = (sy.ln(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
call = (S * cdf(N)(d1) - K * sy.exp(-r * T) * cdf(N)(d2))
return call
euro_call_sym(50, 100, 1, 0.05, 0.25)
def euro_put_sym(S, K, T, r, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
N = systats.Normal(0.0, 1.0)
d1 = (sy.ln(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
d2 = (sy.ln(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
put = (K * sy.exp(-r * T) * N.cdf(-d2) - S * N.cdf(-d1))
return put
# Sympy implementation of the above function that enables one to specify a call or put result.
def sym_euro_vanilla(S, K, T, r, sigma, option = 'call'):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
N = Normal('x', 0.0, 1.0)
d1 = (sy.ln(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
d2 = (sy.ln(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
if option == 'call':
result = (S * cdf(N)(d1) - K * sy.exp(-r * T) * cdf(N)(d2))
if option == 'put':
result = (K * sy.exp(-r * T) * cdf(N)(-d2) - S * cdf(N)(-d1))
return result
sym_euro_vanilla(50, 100, 1, 0.05, 0.25, option = 'put')
# ## Dividend Paying Black-Scholes Formula
# For assets that pay dividends, the Black-Scholes formula is rather similar to the non-dividend paying asset formula; however, a new parameter $q$, is added.
# * $S$, the spot price of the asset at time $t$
# * $T$, the maturity of the option. Time to maturity is defined as $T - t$
# * $K$, strike price of the option
# * $r$, the risk-free interest rate, assumed to be constant between $t$ and $T$
# * $\sigma$, volatility of underlying asset, the standard deviation of the asset returns
# * $q$, the dividend rate of the asset. This is assumed to pay dividends at a continuous rate
# In this case, the $q$ parameter is now included in $C(S,t)$ and $P(S,t)$.
# $$C(S,t) = Se^{-q(T - t)} N(d_1) - Ke^{-r(T - t)} N(d_2)$$
# $$P(S,t) = Ke^{-r(T - t)} N(-d_2) - Se^{-q(T - t)} N(-d_1)$$
# Then, $d_1$ and $d_2$ are slightly modified to include the continuous dividends
# $$d_1 = \frac{ln \left(\frac{S}{K} \right) + \left(r - q + \frac{\sigma^2}{2} \right)(T - t)}{\sigma \sqrt{T - t}}$$
# $$d_2 = d_1 - \sigma \sqrt{T - t} = \frac{ln (\frac{S}{K}) + (r - q - \frac{\sigma^2}{2})(T - t)}{\sigma \sqrt{T - t}}$$
# ## Python Implementation
def black_scholes_call_div(S, K, T, r, q, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#q: rate of continuous dividend paying asset
#sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
call = (S * np.exp(-q * T) * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
return call
def black_scholes_put_div(S, K, T, r, q, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#q: rate of continuous dividend paying asset
#sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
put = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * np.exp(-q * T) * si.norm.cdf(-d1, 0.0, 1.0))
return put
# Implementation that can be used to determine the put or call option price depending on specification
def euro_vanilla_dividend(S, K, T, r, q, sigma, option = 'call'):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#q: rate of continuous dividend paying asset
#sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
if option == 'call':
result = (S * np.exp(-q * T) * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
if option == 'put':
result = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * np.exp(-q * T) * si.norm.cdf(-d1, 0.0, 1.0))
return result
# Sympy Implementation of Black-Scholes with Dividend-paying asset
def black_scholes_call_div_sym(S, K, T, r, q, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#q: rate of continuous dividend paying asset
#sigma: volatility of underlying asset
N = Normal('x', 0.0, 1.0)
d1 = (sy.ln(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
d2 = (sy.ln(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
call = S * sy.exp(-q * T) * cdf(N)(d1) - K * sy.exp(-r * T) * cdf(N)(d2)
return call
def black_scholes_call_put_sym(S, K, T, r, q, sigma):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#q: rate of continuous dividend paying asset
#sigma: volatility of underlying asset
N = Normal('x', 0.0, 1.0)
d1 = (sy.ln(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
d2 = (sy.ln(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
put = K * sy.exp(-r * T) * cdf(N)(-d2) - S * sy.exp(-q * T) * cdf(N)(-d1)
return put
# Sympy implementation of pricing a European put or call option depending on specification
def sym_euro_vanilla_dividend(S, K, T, r, q, sigma, option = 'call'):
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#q: rate of continuous dividend paying asset
#sigma: volatility of underlying asset
N = Normal('x', 0.0, 1.0)
d1 = (sy.ln(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
d2 = (sy.ln(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))
if option == 'call':
result = S * sy.exp(-q * T) * cdf(N)(d1) - K * sy.exp(-r * T) * cdf(N)(d2)
if option == 'put':
result = K * sy.exp(-r * T) * cdf(N)(-d2) - S * sy.exp(-q * T) * cdf(N)(-d1)
return result
# ### References
# [<NAME>. (2015). How to price and trade options: identify, analyze, and execute the best trade probabilities.
# Hoboken, NJ: John Wiley & Sons, Inc.](https://amzn.to/37ajBnM)
#
# [<NAME>. (2015). How to calculate options prices and their Greeks: exploring the Black Scholes model from Delta
# to Vega. Chichester: Wiley.](https://amzn.to/2UzXDrD)
| content/posts/Black-Scholes Formula.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # SEC EDGAR database
# ## What is EDGAR?
# [EDGAR](https://www.sec.gov/edgar.shtml) is the Electronic Data Gathering, Analysis, and Retrieval system used at the U.S. Securities and Exchange Commission (SEC). EDGAR is the primary system for submissions by companies and others who are required by law to file information with the SEC.
#
# Containing millions of company and individual filings, EDGAR benefits investors, corporations, and the U.S. economy overall by increasing the efficiency, transparency, and fairness of the securities markets. The system processes about 3,000 filings per day, serves up 3,000 terabytes of data to the public annually, and accommodates 40,000 new filers per year on average.
# ## Who has access to EDGAR’s information?
# Access to EDGAR’s public database is **free**—allowing you to research, for example, a public company’s financial information and operations by reviewing the filings the company makes with the SEC. You can also research information provided by mutual funds (including money market funds), exchange-traded funds (ETFs), variable annuities, and individuals.
# ## What was the goal of this project?
# Even though the database is very comprehensive and requires companies by law to submit their filings electroniclly since 1996, it can be hard to download information in a bulk. For one part of my PhD project, however, it was necessary to do exactly that.
# > Starting from 1996 up to the end of 2019, I was interested in information from the 10-K and 10-Q filings of US corporations. Fortunately their is a R package available that facilitates this task - `edgar`.
# The `edgar` package can be installed and loaded like any other R package:
# +
install.packages('edgar')
library(edgar)
# it is also always a good idea to load the tidyverse
library(tidyverse)
# -
# ## Getting the master indexes from the database
# The filings on the SEC website are organized according to a master index file for each quarter. The code below creates a vector for the sample period and downloads all master indexes to your working directory. It stores them as separate .Rda files for each year.
# +
# Create a vector for the sample period
period <- c(1996:2019)
# Get the EDGAR master indexes
getMasterIndex(period)
## Downloads quarterly master index files for the sample period.
# -
# ## Convert the Master Indexes to a file with all 10-Q and 10-K filings (URLs and Header information)
# The separate .Rda files can be combined into a single dataframe. The code below starts by creating an empty dataframe `out.file`, which is used to store the combined master indexes. In a next step, all the file names for the master indexes get stored in the `file.names` vector. The for loop takes advantage of the objects that were created by iterating over all master indexes and storing them together in the `out.file`.
# +
# Combines the Master Indexes into one file
out.file<-""
file.names <- Sys.glob("*.Rda")
for(i in 1:length(file.names)){
load(file.names[i])
out.file <- rbind(out.file, year.master)
}
out.file <- out.file[-1,]
save(out.file, file = "master.Rda")
# this file contains the index on all EDGAR filings from 1996 to 2019 (around 19 million...)
# -
# The approximately 19 million observations in the resulting file represent all filings made by corporations during the sample period. However, my research interest focused on the 10-K and 10-Q forms. These forms represent the annual and quarterly report, respectively. For more information regarding the different form types visit: [Descriptions of SEC forms](https://www.sec.gov/info/edgar/forms/edgform.pdf)
# New data frame with only the 10-Q and 10-K filings
data <- out.file %>% filter(form.type == "10-K" | form.type == "10-Q")
# Furthermore, in my specific research project, I was only interested in the filings of companies that were also part of the Compustat database. The identifier that was available to me in both sets was the companies CIK code. However, the Compustat database is proprietary and thus, I cannot share the file with all CIK codes of the companies. At this point, you should limit your dataset to companies that matter for your specific problem.
# +
# Read the file with the CIK codes from the quarterly Compustat file
CIK <- read_csv("../CIK.txt", col_names = FALSE)
CIK <- as.integer(CIK[1,])
# Keep only observations from the quarterly Compustat file
data <- data %>% filter(cik %in% CIK)
##### at this point we have a data frame with 10-K and 10-Q filings for the Compustat quarterly universe (incl. URLs) #####
# -
# The next step in my PhD project will involve a text-search through all the filings in my dataframe. Therefore, I have to download the filings as .txt files from the database. In my case the number of distinct companies was around 11'000 over the whole sample period. These companies produced more than 364'000 separate 10-K and 10-Q filings. The final folder of all downloades hat an approximate size of 1TB and it took me 4 days to store. If your project is similar in spirit, I highly recommend downloading to an external drive or NAS ;-)
# Downloads all Filings and gets information from the filing header (change directory to an external drive or NAS)
period <- c(1996:2019)
header.df <- getFilingHeader(cik.no = CIK, form.type = c('10-K', '10-Q'), filing.year = period)
| EDGAR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 1
# Write a python Function for finding whether a given number is prime or not and do Unit Testing on it using 'PyLint' and 'Unittest' Libraries.
# +
# #!pip install pylint
# #!pip install unittest2
# -
#importing libraries
import unittest2
import pylint
# %%writefile prime_testing.py
'''
Code to test if the input number entered by user is prime or not
'''
def prime_test(num):
'''
This function checks if the number is prime or not
'''
num = int(input("Please enter the number to be tested: "))
if num > 1:
for div in range(2, num):
if num%div == 0:
print("Not a prime number!")
break
else:
print("Entered number is a prime number")
prime_test(10)
import prime_testing
prime_testing.prime_test(11)
# ! pylint "prime_testing.py"
# +
# using unittest
import unittest2
import prime_testing
class PrimeTestCase(unittest2.TestCase):
def test_is_eleven_prime(self):
"""Is eleven correctly identified prime?"""
self.assertTrue(prime_testing.prime_test(11))
if __name__ == '__main__':
unittest2.main(argv=['first-arg-is-ignored'], exit=False)
# -
# # Question 2
# Make a small generator program for returning armstrong numbers in between 1-1000 in a generator object.
# +
#Generator function uses 'yield' instead of 'return' to give list at the output
def Armstrong(lst):
for n in lst :
var=0
temp=n
while temp>0 :
digit = temp%10
var = var + digit**3
temp = temp//10
if n == var:
yield n
numbers = list(range(1,1001))
# -
print(numbers)
Armstrong(numbers)
print(list((Armstrong(numbers))))
| Day-9/Day9_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""Есть в папке картинок но нет в базе"""
import os
def chek(file_):
a = True
with open('список из информации об артикулах.txt','r') as p:
for line in p:
if file_ in line.rstrip('\n')+'.jpg':
a = False
break
p.close()
return a
ss = [file for file in os.listdir('/Users/mila1/Desktop/в мусор') if chek(file)]
ss
# +
import os
with open('test_katalog.txt','w') as ss:
for file in os.listdir('/Users/mila1/Desktop/в мусор'):
ss.write(file+'\n')
ss.close()
# +
"""Есть в базе но нет картинок"""
import os
f = open('test_111.txt','w')
p = open('список из информации об артикулах.txt','r')
with open('список из информации об артикулах.txt','r') as p:
for line in p:
a = False;
for file in os.listdir('/Users/mila1/Desktop/в мусор'):
if (line.rstrip('\n')+'.jpeg' == file) or (line.rstrip('\n')+'.jpg' == file):
a = True
if a == False:
f.write(line+'\n')
f.close()
# -
| .ipynb_checkpoints/сравнение директорий-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/16A0/til/blob/master/Aphantasia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="W69TXNGDfW5w" outputId="e3724f89-f002-4c87-a028-ad041573ee7e"
import subprocess
CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1]
print("CUDA version:", CUDA_version)
if CUDA_version == "10.0":
torch_version_suffix = "+cu100"
elif CUDA_version == "10.1":
torch_version_suffix = "+cu101"
elif CUDA_version == "10.2":
torch_version_suffix = ""
else:
torch_version_suffix = "+cu110"
# !pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex
try:
# !pip3 install googletrans==3.1.0a0
from googletrans import Translator, constants
# from pprint import pprint
translator = Translator()
except: pass
# !pip install ftfy==5.8
import os
import io
import time
from math import exp
import random
import imageio
import numpy as np
import PIL
from skimage import exposure
from base64 import b64encode
import moviepy, moviepy.editor
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
from IPython.display import HTML, Image, display, clear_output
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import ipywidgets as ipy
# import glob
from google.colab import output, files
import warnings
warnings.filterwarnings("ignore")
# !pip install git+https://github.com/openai/CLIP.git
import clip
# !pip install sentence_transformers
from sentence_transformers import SentenceTransformer
# !pip install git+https://github.com/Po-Hsun-Su/pytorch-ssim
import pytorch_ssim as ssim
# %cd /content
# !rm -rf aphantasia
# !git clone https://github.com/eps696/aphantasia
# %cd aphantasia/
from clip_fft import to_valid_rgb, fft_image, slice_imgs, checkout
from utils import pad_up_to, basename, img_list, img_read
from progress_bar import ProgressIPy as ProgressBar
workdir = '_out'
tempdir = os.path.join(workdir, 'ttt')
clear_output()
resume = True #@param {type:"boolean"}
if resume:
resumed = files.upload()
params_pt = list(resumed.values())[0]
params_pt = torch.load(io.BytesIO(params_pt))
def makevid(seq_dir, size=None):
# out_sequence = seq_dir + '/%05d.jpg'
out_video = seq_dir + '.mp4'
# # !ffmpeg -y -v quiet -i $out_sequence $out_video
moviepy.editor.ImageSequenceClip(img_list(seq_dir), fps=25).write_videofile(out_video, verbose=False) # , ffmpeg_params=ffmpeg_params, logger=None
data_url = "data:video/mp4;base64," + b64encode(open(out_video,'rb').read()).decode()
wh = '' if size is None else 'width=%d height=%d' % (size, size)
return """<video %s controls><source src="%s" type="video/mp4"></video>""" % (wh, data_url)
# !nvidia-smi -L
print('\nDone!')
# + [markdown] id="g_VMCq-ZgfD2"
# Source https://github.com/eps696/aphantasia/blob/master/Aphantasia.ipynb
# + [markdown] id="M7D9YzmQf_bJ"
#
# Type some text and/or upload some image to start.
# fine_details input would make micro details follow that topic.
# Put to subtract the topics, which you would like to avoid in the result.
# NB: more prompts = more memory! (handled by auto-decreasing samples amount, hopefully you don't need to act).
# invert the whole criteria, if you want to see "the totally opposite".
#
# Options for non-English languages (use only one of them!):
# multilang = use multi-language model, trained with ViT
# translate = use Google translate (works with any visual model)
# + cellView="form" id="2xzyYhv5gCHn"
#@title Input
text = "sunrise on town at the edge of ocean cliff" #@param {type:"string"}
fine_details = "scottish highland " #@param {type:"string"}
subtract = "" #@param {type:"string"}
multilang = False #@param {type:"boolean"}
translate = False #@param {type:"boolean"}
invert = False #@param {type:"boolean"}
upload_image = False #@param {type:"boolean"}
if translate:
text = translator.translate(text, dest='en').text
if upload_image:
uploaded = files.upload()
# + [markdown] id="NWX8sgPmgHZP"
# Settings
# Select CLIP visual model (results do vary!). I prefer ViT for consistency (and it's the only native multi-language option).
# overscan option produces semi-seamlessly tileable texture (when off, it's more centered).
# sync value adds SSIM loss between the output and input image (if there's one), allowing to "redraw" it with controlled similarity.
#
# Decrease samples if you face OOM (it's the main RAM eater).
# Setting steps much higher (1000-..) will elaborate details and make tones smoother, but may start throwing texts like graffiti.
#
# Other tricks:
# diverse boosts compositional & contextual variety (difference between simultaneous samples). good start is ~0.2; can be negative.
# expand boosts training steps in general (difference between consequent samples). good start is ~0.2.
# progressive_grow may boost macro forms creation (especially with lower learning_rate), see more [here](https://github.com/eps696/aphantasia/issues/2).
# + id="09q5wYd1gTqH"
#@title Generate
# from google.colab import drive
# drive.mount('/content/GDrive')
# clipsDir = '/content/GDrive/MyDrive/T2I ' + dtNow.strftime("%Y-%m-%d %H%M")
# !rm -rf $tempdir
os.makedirs(tempdir, exist_ok=True)
sideX = 1280 #@param {type:"integer"}
sideY = 720 #@param {type:"integer"}
#@markdown > Config
model = 'ViT-B/32' #@param ['ViT-B/32', 'RN101', 'RN50x4', 'RN50']
overscan = True #@param {type:"boolean"}
sync = 0.5 #@param {type:"number"}
contrast = 1. #@param {type:"number"}
#@markdown > Training
steps = 200 #@param {type:"integer"}
samples = 200 #@param {type:"integer"}
learning_rate = .05 #@param {type:"number"}
save_freq = 1 #@param {type:"integer"}
#@markdown > Tricks
diverse = 0. #@param {type:"number"}
expand = 0. #@param {type:"number"}
progressive_grow = False #@param {type:"boolean"}
if multilang: model = 'ViT-B/32' # sbert model is trained with ViT
if len(fine_details) > 0:
samples = int(samples * 0.75)
if len(subtract) > 0:
samples = int(samples * 0.75)
print(' using %d samples' % samples)
model_clip, _ = clip.load(model)
modsize = 288 if model == 'RN50x4' else 224
xmem = {'RN50':0.5, 'RN50x4':0.16, 'RN101':0.33}
if 'RN' in model:
samples = int(samples * xmem[model])
if multilang is True:
model_lang = SentenceTransformer('clip-ViT-B-32-multilingual-v1').cuda()
def enc_text(txt):
if multilang is True:
emb = model_lang.encode([txt], convert_to_tensor=True, show_progress_bar=False)
else:
emb = model_clip.encode_text(clip.tokenize(txt).cuda())
return emb.detach().clone()
if diverse != 0:
samples = int(samples * 0.5)
norm_in = torchvision.transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
sign = 1. if invert is True else -1.
if upload_image:
in_img = list(uploaded.values())[0]
print(' image:', list(uploaded)[0])
img_in = torch.from_numpy(imageio.imread(in_img).astype(np.float32)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]
in_sliced = slice_imgs([img_in], samples, modsize, transform=norm_in)[0]
img_enc = model_clip.encode_image(in_sliced).detach().clone()
if sync > 0:
overscan = True
ssim_loss = ssim.SSIM(window_size = 11)
ssim_size = [sideY//8, sideX//8]
img_in = F.interpolate(img_in, ssim_size).float()
# img_in = F.interpolate(img_in, (sideY, sideX)).float()
else:
del img_in
del in_sliced; torch.cuda.empty_cache()
if len(text) > 2:
print(' macro:', text)
if translate:
translator = Translator()
text = translator.translate(text, dest='en').text
print(' translated to:', text)
txt_enc = enc_text(text)
if len(fine_details) > 0:
print(' micro:', fine_details)
if translate:
translator = Translator()
fine_details = translator.translate(fine_details, dest='en').text
print(' translated to:', fine_details)
txt_enc2 = enc_text(fine_details)
if len(subtract) > 0:
print(' without:', subtract)
if translate:
translator = Translator()
subtract = translator.translate(subtract, dest='en').text
print(' translated to:', subtract)
txt_enc0 = enc_text(subtract)
if multilang is True: del model_lang
shape = [1, 3, sideY, sideX]
param_f = fft_image
# param_f = pixel_image
# learning_rate = 1.
init_pt = params_pt if resume is True else None
params, image_f = param_f(shape, resume=init_pt)
image_f = to_valid_rgb(image_f)
if progressive_grow is True:
lr1 = learning_rate * 2
lr0 = lr1 * 0.01
else:
lr0 = learning_rate
optimizer = torch.optim.Adam(params, lr0)
def save_img(img, fname=None):
img = np.array(img)[:,:,:]
img = np.transpose(img, (1,2,0))
img = np.clip(img*255, 0, 255).astype(np.uint8)
if fname is not None:
imageio.imsave(fname, np.array(img))
imageio.imsave('result.jpg', np.array(img))
def checkout(num):
with torch.no_grad():
img = image_f(contrast=contrast).cpu().numpy()[0]
save_img(img, os.path.join(tempdir, '%04d.jpg' % num))
outpic.clear_output()
with outpic:
display(Image('result.jpg'))
prev_enc = 0
def train(i):
loss = 0
img_out = image_f()
micro = False if len(fine_details) > 0 else None
imgs_sliced = slice_imgs([img_out], samples, modsize, norm_in, overscan=overscan, micro=micro)
out_enc = model_clip.encode_image(imgs_sliced[-1])
if diverse != 0:
imgs_sliced = slice_imgs([image_f()], samples, modsize, norm_in, overscan=overscan, micro=micro)
out_enc2 = model_clip.encode_image(imgs_sliced[-1])
loss += diverse * torch.cosine_similarity(out_enc, out_enc2, dim=-1).mean()
del out_enc2; torch.cuda.empty_cache()
if upload_image:
loss += sign * 0.5 * torch.cosine_similarity(img_enc, out_enc, dim=-1).mean()
if len(text) > 0: # input text
loss += sign * torch.cosine_similarity(txt_enc, out_enc, dim=-1).mean()
if len(subtract) > 0: # subtract text
loss += -sign * torch.cosine_similarity(txt_enc0, out_enc, dim=-1).mean()
if sync > 0 and upload_image: # image composition sync
loss -= sync * ssim_loss(F.interpolate(img_out, ssim_size).float(), img_in)
if len(fine_details) > 0: # input text for micro details
imgs_sliced = slice_imgs([img_out], samples, modsize, norm_in, overscan=overscan, micro=True)
out_enc2 = model_clip.encode_image(imgs_sliced[-1])
loss += sign * torch.cosine_similarity(txt_enc2, out_enc2, dim=-1).mean()
del out_enc2; torch.cuda.empty_cache()
if expand > 0:
global prev_enc
if i > 0:
loss += expand * torch.cosine_similarity(out_enc, prev_enc, dim=-1).mean()
prev_enc = out_enc.detach()
del img_out, imgs_sliced, out_enc; torch.cuda.empty_cache()
if progressive_grow is True:
lr_cur = lr0 + (i / steps) * (lr1 - lr0)
for g in optimizer.param_groups:
g['lr'] = lr_cur
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % save_freq == 0:
checkout(i // save_freq)
outpic = ipy.Output()
outpic
pbar = ProgressBar(steps)
for i in range(steps):
train(i)
_ = pbar.upd()
HTML(makevid(tempdir))
torch.save(params, tempdir + '.pt')
files.download(tempdir + '.pt')
files.download(tempdir + '.mp4')
| notebooks/Aphantasia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Select MOFs and create dataset
import xlrd
from random import randint
xl_data = xlrd.open_workbook('mofs-loading.xlsx')
mof_names = xl_data.sheets()[0].col_values(0)[1:]
pza = xl_data.sheets()[0].col_values(3)[1:]
pza_err = xl_data.sheets()[0].col_values(4)[1:]
metal = xl_data.sheets()[0].col_values(7)[1:]
vp = xl_data.sheets()[0].col_values(8)[1:]
vf = xl_data.sheets()[0].col_values(9)[1:]
density = xl_data.sheets()[0].col_values(10)[1:]
gsa = xl_data.sheets()[0].col_values(11)[1:]
lcd = xl_data.sheets()[0].col_values(13)[1:]
n_mofs = 50
len(mof_names)
selected_mofs = []
dataset = []
while len(selected_mofs) < n_mofs:
idx = randint(0, 500)
mof = mof_names[idx]
if mof not in selected_mofs:
selected_mofs.append(mof)
dataset.append([mof, metal[idx], pza[idx], pza_err[idx], vp[idx], vf[idx], density[idx], gsa[idx], lcd[idx]])
plt.hist([i[2] for i in dataset])
# %pylab inline
import csv
with open('pza-uptake.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
spamwriter.writerow(['MOF', 'Metal', 'Uptake', 'Error', 'Pore Volume', 'Void Fraction', 'Density', 'Surface Area', 'Pore Diameter'])
for row in dataset:
spamwriter.writerow(row)
smofs = []
while len(smofs) < 5:
idx = randint(0, 500)
mof = mof_names[idx]
if mof not in smofs:
smofs.append(mof)
print([mof, metal[idx], pza[idx], pza_err[idx], vp[idx], vf[idx], density[idx], gsa[idx], lcd[idx]])
| sciviscomm/pza-dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Exploration
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# -
np.random.seed(1)
# load data
df = pd.read_csv('drug_data2.txt')
df.head()
df2 = df[['Sex','Treatment','Concentration','Day','wakeact']].copy()
df2
df3 = np.array(df2)
df3.shape
# select only day = 5
df = df3[df3[:,3] ==5]
df.shape
# features:
X = df[:,0:3]
X.shape
np.unique(X[:,0],return_counts=True)
np.unique(X[:,1],return_counts=True)
np.unique(X[:,2],return_counts=True)
y = df[:,4].astype(float)
y.min()
bins = np.linspace(min(y), max(y),10, endpoint=False)
bins
plt.hist(y,bins)
from sklearn.preprocessing import KBinsDiscretizer
est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='quantile')
est.fit(y.reshape(-1,1))
ynew = est.transform(y.reshape(-1,1))
np.unique(ynew,return_counts=True)
X
# +
l,n = X.shape
x0 = np.ones(l)
x0[X[:,0] == 'Female'] = -1.
# -
np.unique(x0,return_counts=True)
from sklearn.preprocessing import OneHotEncoder
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
x1 = onehot_encoder.fit_transform(X[:,1].reshape(-1,1))
x1.shape
x2 = X[:,2].astype(float)
X = np.hstack([x0[:,np.newaxis],x1,x2[:,np.newaxis]])
X.shape
| drug_set2/drug2_explore_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="eda636ab-c737-4c65-91d4-6625f317fc1e" _uuid="572db849acfdfb7160978a3fb9955a668af20760"
# # Welcome to data visualization
#
# Welcome to the Data Visualization tutorial!
#
# Data visualization is one of the core skills in data science. In order to start building useful models, we need to understand the underlying dataset. You will never be an expert on the data you are working with, and will always need to explore the variables in great depth before you can move on to building a model or doing something else with the data. Effective data visualization is the most important tool in your arsenal for getting this done, and hence an critical skill for you to master.
#
# In this tutorial series, we will cover building effective data visualizations in Python. We will cover the `pandas` and `seaborn` plotting tools in depth. We will also touch upon `matplotlib`, and discuss `plotly` and `plotnine` in later (optional) sections. We still start with simple single-variable plots and work our way up to plots showing two, three, or even more dimensions. Upon completing this tutorial you should be well-equipped to start doing useful exploratory data analysis (EDA) with your own datasets!
#
# ## Prerequisites
#
# There's one thing to do before we get started, however: learn about `pandas`, the linchpin of the Python data science ecosystem. `pandas` contains all of the data reading, writing, and manipulation tools that you will need to probe your data, run your models, and of course, visualize. As a result, a working understanding of `pandas` is critical to being a sucessful builder of data visualizations.
#
# If you are totally unfamiliar with `pandas`, the following prerequisites will cover just enough `pandas` to get you started. These tutorial sections are targetted at total beginners; if you are already at least mildly familiar with the library, you should skip ahead to the next section.
#
# <table style="width:800px;">
# <tr>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px; width:33%"><a href="https://www.kaggle.com/sohier/tutorial-accessing-data-with-pandas/">Accessing Data with Pandas</a></td>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px; width:33%"><a href="https://www.kaggle.com/dansbecker/selecting-and-filtering-in-pandas">Selecting and Filtering in Pandas</a></td>
# <!--
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px; width:33%"><a href="https://www.kaggle.com/residentmario/just-enough-pandas-optional/">Prerequisite 3</a></td>
# -->
# </tr>
# </table>
#
# ## Contents
#
# This tutorial consists of the following sections:
#
# <table style="width:800px">
# <tr>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px;"><a href="https://www.kaggle.com/residentmario/univariate-plotting-with-pandas">Univariate plotting with pandas</a></td>
# </tr>
# <tr>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px;"><a href="https://www.kaggle.com/residentmario/bivariate-plotting-with-pandas">Bivariate plotting with pandas</a></td>
# </tr>
# <tr>
# <td style="padding:25px; text-align:center; font-size:18px; width:50%"><a href="https://www.kaggle.com/residentmario/styling-your-plots/">Styling your plots</a>
# </td>
# <td style="padding:25px; text-align:center; font-size:18px; width:50%"><a href="https://www.kaggle.com/residentmario/plotting-with-seaborn">Plotting with seaborn</a>
# </td>
# </tr>
# <tr>
# <td style="padding:25px; text-align:center; font-size:18px; width:50%"><a href="https://www.kaggle.com/residentmario/subplots/">Subplots</a></td>
# <td style="padding:25px; text-align:center; font-size:18px;width:50%"><a href="https://www.kaggle.com/residentmario/faceting-with-seaborn/">Faceting with seaborn</a></td>
# </tr>
# <tr>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px;"><a href="https://www.kaggle.com/residentmario/multivariate-plotting">Multivariate plotting</a></td>
# </tr>
# <tr>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px;"><a href="https://www.kaggle.com/residentmario/introduction-to-plotly-optional/">Plotting with plotly</a></td>
# </tr>
# <tr>
# <td colspan=2 style="padding:25px; text-align:center; font-size:18px;"><a href="https://www.kaggle.com/residentmario/grammer-of-graphics-with-plotnine-optional/">Grammar of graphics with plotnine</a></td>
# </tr>
# </table>
#
# .
#
# Each section will focus on one particular aspect of plotting in Python, relying on the knowledge you have aquired up until that point to get the job done.
#
# Ready? [To start the tutorial, proceed to the next section, "Univariate plotting with pandas"](https://www.kaggle.com/residentmario/univariate-plotting-with-pandas/).
# + _uuid="351ef9933058c9769211f965bddb5c675a0c9c5e"
| Data_Visualization_Learning/kernel_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from iterdub import iterdub as ib
from iterpop import iterpop as ip
from keyname import keyname as kn
from matplotlib import pyplot as plt
import matplotlib
from nbmetalog import nbmetalog as nbm
import numpy as np
import pandas as pd
import pathlib
from scipy import stats
import seaborn as sns
from slugify import slugify
import statsmodels.api as sm
import statsmodels.formula.api as smf
from teeplot import teeplot as tp
from conduitpylib.utils import consolidate_merge
nbm.print_metadata()
# # Get Data
#
df_inlet = pd.read_csv(
'https://osf.io/jgpnv/download',
compression='gzip',
).dropna(
subset=['Process Instance UUID'],
)
nbm.print_dataframe_summary(*eval(nbm.nvp_expr(
'df_inlet'
)))
df_outlet = pd.read_csv(
'https://osf.io/ncdfq/download',
compression='gzip',
).dropna(
subset=['Process Instance UUID'],
)
nbm.print_dataframe_summary(*eval(nbm.nvp_expr(
'df_outlet'
)))
df = consolidate_merge(
df_inlet,
df_outlet,
on=['Process Instance UUID', 'Update'],
suffixes=(' Inlet', ' Outlet'),
how='outer',
)
if all(df_inlet['Runtime Seconds Elapsed'] == df_outlet['Runtime Seconds Elapsed']):
df['Runtime Seconds Elapsed Inlet'] = df['Runtime Seconds Elapsed']
df['Runtime Seconds Elapsed Outlet'] = df['Runtime Seconds Elapsed']
nbm.print_dataframe_synopsis(*eval(nbm.nvp_expr(
'df'
)))
# # Prep Data
#
df = df.astype({
'Num Inlets' : 'int64',
'Num Outlets' : 'int64',
'Num Puts Attempted' : 'int64',
'Num Try Puts Attempted' : 'int64',
'Num Blocking Puts' : 'int64',
'Num Try Puts That Succeeded' : 'int64',
'Num Puts That Succeeded Eventually' : 'int64',
'Num Blocking Puts That Succeeded Immediately' : 'int64',
'Num Puts That Succeeded Immediately' : 'int64',
'Num Puts That Blocked' : 'int64',
'Num Dropped Puts' : 'int64',
'Num Round Trip Touches Inlet' : 'int64',
'Net Flux Through Duct' : 'int64',
'proc' : 'int64',
'Snapshot' : 'int64',
'Has Execution Blur' : 'bool',
'Replicate' : 'int64',
'Async Mode' : 'int64',
'Num Threads' : 'int64',
'Num Processes' : 'int64',
'SLURM_NNODES' : 'int64',
'SLURM_NTASKS' : 'int64',
'SLURM_CPUS_ON_NODE' : 'int64',
})
df['Hostname'] = df.apply(
lambda row: kn.unpack(row['Source File Inlet'])['_hostname'],
axis=1,
)
df['Num Nodes'] = df['SLURM_NNODES']
df['Num Tasks'] = df['SLURM_NTASKS']
df['Num Simels Per Cpu'] = df['Num Simulation Elements Per Cpu']
df['Num Cpus'] = df['Num Threads'] * df['Num Processes']
df['Allocated Tasks Per Node'] = df['Num Tasks'] // df['Num Nodes']
df['Cpus Per Node'] = df['Allocated Tasks Per Node']
df['Delivery Time Inlet'] = (df['Num Puts Attempted'] - 1) / df['Num Round Trip Touches Inlet']
df['Delivery Time Outlet'] = (df['Num Pulls Attempted'] - 1) / df['Num Round Trip Touches Outlet']
df['Intermittancy'] = df['Num Pulls That Were Laden Immediately'] / df[['Net Flux Through Duct', 'Num Pulls Attempted']].min(axis=1)
df['Inlet-Seconds Elapsed'] = df['Num Inlets'] * df['Runtime Seconds Elapsed Inlet']
df['Outlet-Seconds Elapsed'] = df['Num Outlets'] * df['Runtime Seconds Elapsed Outlet']
df['Latency Simsteps Inlet'] = df['Delivery Time Inlet']
df['Latency Simsteps Outlet'] = df['Delivery Time Inlet']
df['Simstep Period Inlet (s)'] = df['Inlet-Seconds Elapsed'] / df['Num Puts Attempted']
df['Simstep Period Outlet (s)'] = df['Outlet-Seconds Elapsed'] / df['Num Pulls Attempted']
df['Latency Walltime Inlet (s)'] = df['Latency Simsteps Inlet'] * df['Simstep Period Inlet (s)']
df['Latency Walltime Outlet (s)'] = df['Latency Simsteps Outlet'] * df['Simstep Period Outlet (s)']
df['Log Num Processes'] = np.log(df['Num Processes']) / np.log(4)
# +
row_distiller = lambda row: {k : v for k, v in row.items() if k in ('Num Nodes', 'Num Processes')}
allocation_idx_mapper = {
val : idx
for idx, val
in enumerate(df['Allocation'].unique())
}
allocation_idx_mapped_title = ' | '.join(f'{idx} = {val}' for val, idx in allocation_idx_mapper.items())
df[allocation_idx_mapped_title] = df.apply(
lambda row: allocation_idx_mapper[row['Allocation']],
axis=1,
)
# -
# # Prep DataFrame Variants
#
# https://stackoverflow.com/a/40629420
df_finalized_observations = df.sort_values('Update', ascending=False).drop_duplicates(['Process Instance UUID'])
df_blurry_snapshots = df[
df['Has Execution Blur'].astype(bool)
& (df['Snapshot'] <= 5 )
# exclude excess, unintended snapshots from runs that took a while to shut down
# (i.e., from at the 6 minute mark and beyond)
]
# +
df_world_sum = df_finalized_observations.groupby([
'Replicate',
'Async Mode',
'Num Processes',
'Num Nodes',
'Num Simels Per Cpu',
'Allocated Tasks Per Node',
'Cpus Per Node',
'Allocation',
allocation_idx_mapped_title,
], as_index=False).sum()
df_world_sum['Fraction Messages Utilized'] = df_world_sum['Num Reads That Were Fresh'] / df_world_sum['Num Try Puts Attempted']
df_world_sum['Fraction Messages Delivered'] = df_world_sum['Num Try Puts That Succeeded'] / df_world_sum['Num Try Puts Attempted']
df_world_sum['Delivery Failure Rate'] = 1.0 - df_world_sum['Fraction Messages Delivered']
df_world_sum['Fraction Messages Dropped'] = df_world_sum['Delivery Failure Rate']
df_world_sum['Fraction Try Pulls That Were Laden'] = df_world_sum['Num Try Pulls That Were Laden'] / df_world_sum['Num Try Pulls Attempted']
df_world_sum['Round Trip Touches Per Attempted Pull'] = df_world_sum['Num Round Trip Touches Outlet'] / df_world_sum['Num Try Pulls Attempted']
df_world_sum['Round Trip Touches Per Attempted Put'] = df_world_sum['Num Round Trip Touches Inlet'] / df_world_sum['Num Try Puts Attempted']
df_world_sum['Num Inflight Messages'] = 2.0 / df_world_sum['Round Trip Touches Per Attempted Put'] - 1
df_world_sum['Fraction Duct Flux Stepped Through'] = df_world_sum['Num Revisions Pulled'] / df_world_sum['Net Flux Through Duct']
df_world_sum['Fraction Duct Flux Jumped Over'] = 1.0 - df_world_sum['Fraction Duct Flux Stepped Through']
df_world_sum['Round Trip Touches Per Runtime Second'] = df_world_sum['Num Round Trip Touches Inlet'] / df_world_sum['Runtime Seconds Elapsed Inlet']
df_world_sum['Latency Simsteps Inlet'] = (df_world_sum['Num Puts Attempted'] - 1) / df_world_sum['Num Round Trip Touches Inlet']
df_world_sum['Latency Simsteps Outlet'] = (df_world_sum['Num Pulls Attempted'] - 1) / df_world_sum['Num Round Trip Touches Outlet']
df_world_sum['Delivery Clumpiness'] = 1.0 - df_world_sum['Num Pulls That Were Laden Immediately'] / df_world_sum[['Net Flux Through Duct', 'Num Pulls Attempted']].min(axis=1)
df_world_sum['Intermittancy'] = df_world_sum['Delivery Clumpiness']
df_world_sum['Simstep Period Inlet (s)'] = df_world_sum['Inlet-Seconds Elapsed'] / df_world_sum['Num Puts Attempted']
df_world_sum['Simstep Period Outlet (s)'] = df_world_sum['Outlet-Seconds Elapsed'] / df_world_sum['Num Pulls Attempted']
df_world_sum['Latency Walltime Inlet (s)'] = df_world_sum['Latency Simsteps Inlet'] * df_world_sum['Simstep Period Inlet (s)']
df_world_sum['Latency Walltime Outlet (s)'] = df_world_sum['Latency Simsteps Outlet'] * df_world_sum['Simstep Period Outlet (s)']
# -
df_snapshot_diffs = df_blurry_snapshots.groupby(
[
'Process Instance UUID',
'Snapshot',
# subsequent items aren't meaningful to groupby
# but are just included so they pass through untouched
'Async Mode',
'Num Nodes',
'Allocated Tasks Per Node',
'Cpus Per Node',
'Num Processes',
'Log Num Processes',
'Num Simels Per Cpu',
'Replicate',
'proc',
'Hostname',
'Num Inlets',
'Num Outlets',
'Execution Instance UUID',
'Num Threads',
'Allocation',
allocation_idx_mapped_title,
],
as_index=False,
).aggregate({
'Num Puts Attempted' : np.ptp,
'Num Try Puts Attempted' : np.ptp,
'Num Blocking Puts' : np.ptp,
'Num Try Puts That Succeeded' : np.ptp,
'Num Puts That Succeeded Eventually' : np.ptp,
'Num Blocking Puts That Succeeded Immediately' : np.ptp,
'Num Puts That Succeeded Immediately' : np.ptp,
'Num Puts That Blocked' : np.ptp,
'Num Dropped Puts' : np.ptp,
'Num Reads Performed' : np.ptp,
'Num Reads That Were Fresh' : np.ptp,
'Num Reads That Were Stale' : np.ptp,
'Num Revisions Pulled' : np.ptp,
'Num Try Pulls Attempted' : np.ptp,
'Num Blocking Pulls' : np.ptp,
'Num Blocking Pulls That Blocked' : np.ptp,
'Num Revisions From Try Pulls' : np.ptp,
'Num Revisions From Blocking Pulls' : np.ptp,
'Num Pulls Attempted' : np.ptp,
'Num Pulls That Were Laden Eventually' : np.ptp,
'Num Blocking Pulls That Were Laden Immediately' : np.ptp,
'Num Blocking Pulls That Were Laden Eventually' : np.ptp,
'Num Pulls That Were Laden Immediately' : np.ptp,
'Num Try Pulls That Were Laden' : np.ptp,
'Num Try Pulls That Were Unladen' : np.ptp,
'Net Flux Through Duct' : np.ptp,
'Num Round Trip Touches Inlet' : np.ptp,
'Num Round Trip Touches Outlet' : np.ptp,
# why are these missing?
# 'Row Initial Timepoint (ns) Inlet' : np.ptp,
# 'Row Initial Timepoint (ns) Outlet' : np.ptp,
'Row Final Timepoint (ns) Inlet' : np.ptp,
'Row Final Timepoint (ns) Outlet' : np.ptp,
'Runtime Seconds Elapsed Inlet' : np.mean,
'Runtime Seconds Elapsed Outlet' : np.mean,
})
# +
df_snapshot_diffs['Fraction Messages Delivered'] = (
df_snapshot_diffs['Num Try Puts That Succeeded']
/ df_snapshot_diffs['Num Try Puts Attempted']
)
df_snapshot_diffs['Delivery Success Rate'] = (
df_snapshot_diffs['Num Try Puts That Succeeded']
/ df_snapshot_diffs['Num Try Puts Attempted']
)
df_snapshot_diffs['Delivery Failure Rate'] = 1 - df_snapshot_diffs['Delivery Success Rate']
df_snapshot_diffs['Fraction Messages Dropped'] = df_snapshot_diffs['Delivery Failure Rate']
df_snapshot_diffs['Fraction Try Pulls That Were Laden'] = (
df_snapshot_diffs['Num Try Pulls That Were Laden']
/ df_snapshot_diffs['Num Try Pulls Attempted']
)
df_snapshot_diffs['Round Trip Touches Per Attempted Put'] = (
df_snapshot_diffs['Num Round Trip Touches Inlet']
) / df_snapshot_diffs['Num Try Puts Attempted']
df_snapshot_diffs['Round Trip Touches Per Attempted Pull'] = (
df_snapshot_diffs['Num Round Trip Touches Outlet']
) / df_snapshot_diffs['Num Try Pulls Attempted']
df_snapshot_diffs['Round Trip Touches Per Runtime Nanosecond'] = (
df_snapshot_diffs['Num Round Trip Touches Outlet']
) / df_snapshot_diffs['Row Final Timepoint (ns) Outlet']
df_snapshot_diffs['Latency Simsteps Inlet'] = df_snapshot_diffs['Num Puts Attempted'] / df_snapshot_diffs['Num Round Trip Touches Inlet']
df_snapshot_diffs['Latency Simsteps Outlet'] = df_snapshot_diffs['Num Pulls Attempted'] / df_snapshot_diffs['Num Round Trip Touches Outlet']
df_snapshot_diffs['Delivery Clumpiness'] = 1.0 - df_snapshot_diffs['Num Pulls That Were Laden Immediately'] / df_snapshot_diffs[['Net Flux Through Duct', 'Num Pulls Attempted']].min(axis=1)
df_snapshot_diffs['Intermittancy'] = df_snapshot_diffs['Delivery Clumpiness']
df_snapshot_diffs['Inlet-Nanoseconds Elapsed'] = df_snapshot_diffs['Num Inlets'] * df_snapshot_diffs['Row Final Timepoint (ns) Inlet']
df_snapshot_diffs['Outlet-Nanoseconds Elapsed'] = df_snapshot_diffs['Num Outlets'] * df_snapshot_diffs['Row Final Timepoint (ns) Outlet']
df_snapshot_diffs['Simsteps Elapsed Inlet'] = df_snapshot_diffs['Num Puts Attempted'] / df_snapshot_diffs['Num Inlets']
df_snapshot_diffs['Simsteps Elapsed Outlet'] = df_snapshot_diffs['Num Pulls Attempted'] / df_snapshot_diffs['Num Outlets']
df_snapshot_diffs['Simstep Period Inlet (ns)'] = df_snapshot_diffs['Inlet-Nanoseconds Elapsed'] / df_snapshot_diffs['Num Puts Attempted']
df_snapshot_diffs['Simstep Period Outlet (ns)'] = df_snapshot_diffs['Outlet-Nanoseconds Elapsed'] / df_snapshot_diffs['Num Pulls Attempted']
df_snapshot_diffs['Latency Walltime Inlet (ns)'] = df_snapshot_diffs['Latency Simsteps Inlet'] * df_snapshot_diffs['Simstep Period Inlet (ns)']
df_snapshot_diffs['Latency Walltime Outlet (ns)'] = df_snapshot_diffs['Latency Simsteps Outlet'] * df_snapshot_diffs['Simstep Period Outlet (ns)']
# +
df_snapshot_diffs = df_snapshot_diffs.astype({
'Num Inlets' : 'int64',
'Num Outlets' : 'int64',
'proc' : 'int64',
'Snapshot' : 'int64',
'Replicate' : 'int64',
'Async Mode' : 'int64',
'Num Threads' : 'int64',
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Nodes' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
# -
# # End-state Data Analysis
#
# This data appears to be skewed by ragged network launch/completion.
#
def facet_boxplot(*, data, col=None, row=None, x, y, showfliers=False):
g = sns.FacetGrid(
data,
col=col if col is not None and data[col].nunique() > 1 else None,
row=row if row is not None and data[row].nunique() > 1 else None,
margin_titles=True,
sharey='row',
)
g.map_dataframe(
sns.boxplot,
x,
y,
showfliers=showfliers,
)
# ## Latency Walltime
#
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Inlet (s)',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-inlet-s',
)
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Outlet (s)',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-outlet-s',
)
# ## Latency Simsteps
#
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Inlet',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-inlet',
)
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Outlet',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-outlet',
)
# ## Delivery Failure Rate
#
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Failure Rate',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-failure-rate',
)
# ## Delivery Clumpiness
#
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Clumpiness',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-clumpiness',
)
# ## Simstep Period
#
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Inlet (s)',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-inlet-s',
)
tp.tee(
facet_boxplot,
data=df_world_sum,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Outlet (s)',
showfliers=True,
teeplot_outattrs={
**{
'transform' : 'endstate_sumedbyrep',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-outlet-s',
)
# # Live Snapshot Analysis
#
def facet_barplot(*, data, col=None, row=None, x, y, hue=None):
g = sns.FacetGrid(
data,
col=col if col is not None and data[col].nunique() > 1 else None,
row=row if row is not None and data[row].nunique() > 1 else None,
margin_titles=True,
sharey='row',
)
g.map_dataframe(
sns.barplot,
x=x,
y=y,
hue=hue,
)
# adapted from https://stackoverflow.com/a/48208266
g.set_axis_labels(x_var=x, y_var=y)
def facet_boxplot_withfliers(*, data, col=None, row=None, x, y, hue=None):
g = sns.FacetGrid(
data,
col=col if col is not None and data[col].nunique() > 1 else None,
row=row if row is not None and data[row].nunique() > 1 else None,
margin_titles=True,
sharey='row',
)
g.map_dataframe(
sns.boxplot,
x=x,
y=y,
hue=hue,
showfliers=True,
)
# adapted from https://stackoverflow.com/a/48208266
g.set_axis_labels(x_var=x, y_var=y)
def facet_boxplot_nofliers(*, data, col=None, row=None, x, y, hue=None):
g = sns.FacetGrid(
data,
col=col if col is not None and data[col].nunique() > 1 else None,
row=row if row is not None and data[row].nunique() > 1 else None,
margin_titles=True,
sharey='row',
)
g.map_dataframe(
sns.boxplot,
x=x,
y=y,
hue=hue,
showfliers=False,
)
# adapted from https://stackoverflow.com/a/48208266
g.set_axis_labels(x_var=x, y_var=y)
# ## Latency Walltime
#
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Inlet (ns)',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-inlet-ns',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Latency Walltime Inlet (ns)' : [
np.mean,
np.median,
],
})
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Outlet (ns)',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-outlet-ns',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Latency Walltime Outlet (ns)' : [
np.mean,
np.median,
],
})
# ## Latency Simsteps
#
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Inlet',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-inlet',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Latency Simsteps Inlet' : [
np.mean,
np.median,
],
})
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Outlet',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-outlet',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Latency Simsteps Outlet' : [
np.mean,
np.median,
],
})
# ## Delivery Failure Rate
#
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Failure Rate',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-failure-rate',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Delivery Failure Rate' : [
np.mean,
np.median,
],
})
# ## Delivery Clumpiness
#
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Clumpiness',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-clumpiness',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Delivery Clumpiness' : [
np.mean,
np.median,
],
})
# ## Simstep Period
#
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Inlet (ns)',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-inlet-ns',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Simstep Period Inlet (ns)' : [
np.mean,
np.median,
],
})
for viz in facet_barplot, facet_boxplot_withfliers, facet_boxplot_nofliers:
tp.tee(
viz,
data=df_snapshot_diffs,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Outlet (ns)',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-outlet-ns',
)
# adapted from https://stackoverflow.com/a/13592901
df_snapshot_diffs.groupby([
allocation_idx_mapped_title,
]).agg({
'Simstep Period Outlet (ns)' : [
np.mean,
np.median,
],
})
# # Model Fits
#
def make_regression_row(*, data, independent_variable, dependent_variable, regression, row_filter):
filtered_data = data[ data.apply(eval(row_filter), axis=1) ]
regfun = {
'Ordinary Least Squares Regression' : smf.ols,
'Quantile Regression' : smf.quantreg,
}[regression]
model = regfun(f"Q('{dependent_variable}') ~ Q('{independent_variable}')", filtered_data)
fit_model = model.fit()
slope = fit_model.params[f"Q('{independent_variable}')"]
intercept = fit_model.params['Intercept']
slope_ci_lb, slope_ci_ub = fit_model.conf_int().loc[f"Q('{independent_variable}')"].tolist()
intercept_ci_lb, intercept_ci_ub = fit_model.conf_int().loc['Intercept'].tolist()
p = fit_model.pvalues.loc[f"Q('{independent_variable}')"]
# normalize to "control", i.e., lowest num processes observed
effect_size_normalization_data = data[
data[independent_variable] == data[independent_variable].min()
][dependent_variable]
effect_size_normalization_factor = {
'Ordinary Least Squares Regression' : lambda x: x.mean(),
'Quantile Regression' : lambda x: x.median(),
}[regression](effect_size_normalization_data)
relative_effect_size = slope / effect_size_normalization_factor
relative_effect_size_ci_lb = slope_ci_lb / effect_size_normalization_factor
relative_effect_size_ci_ub = slope_ci_ub / effect_size_normalization_factor
relative_effect_size_ci_width = (
relative_effect_size_ci_ub
- relative_effect_size_ci_lb
)
is_significant = p < 0.05 if np.isfinite(p) else None
res = {
'Independent Variable' : independent_variable,
'Dependent Variable' : dependent_variable,
'Dependent Variable Slug' : slugify(dependent_variable),
'Cpus Per Node' : ib.dub( data['Cpus Per Node'] ),
'Num Simels Per Cpu' : ip.pophomogeneous( data['Num Simels Per Cpu'] ),
'Slope Estimate' : slope,
'Slope Estimate 95% CI Lower Bound' : slope_ci_lb,
'Slope Estimate 95% CI Upper Bound' : slope_ci_ub,
'Absolute Effect Size' : slope,
'Absolute Effect Size 95% CI Lower Bound' : slope_ci_lb,
'Absolute Effect Size 95% CI Upper Bound' : slope_ci_ub,
'Absolute Effect Size 95% CI Width' : slope_ci_ub - slope_ci_lb,
'Relative Effect Size' : relative_effect_size,
'Relative Effect Size 95% CI Lower Bound' : relative_effect_size_ci_lb,
'Relative Effect Size 95% CI Upper Bound' : relative_effect_size_ci_ub,
'Relative Effect Size 95% CI Width' : relative_effect_size_ci_width,
'Intercept Estimate' : intercept,
'Intercept Estimate 95% CI Lower Bound' : intercept_ci_lb,
'Intercept Estimate 95% CI Upper Bound' : intercept_ci_ub,
'R^2' : fit_model.rsquared,
'p' : fit_model.pvalues.loc[f"Q('{independent_variable}')"],
'Significant?' : is_significant,
'Significant Effect Sign' : (
'-' if is_significant and slope < 0
else '+' if is_significant and slope > 0
else '0' if is_significant is not None
else None
),
'n' : len(filtered_data),
'Filter' : row_filter,
'Num Processes' : ib.dub(filtered_data['Num Processes']),
'Num Processes Prettyprint' : (
'/'.join(filtered_data['Num Processes'].sort_values().astype(str).unique())
),
'Regression Model' : regression,
'Regression Model Slug' : slugify(regression),
'Statistic' : {
'Ordinary Least Squares Regression' : 'mean',
'Quantile Regression' : 'median',
}[regression],
}
# dump regression summary to file
summary_filename = kn.pack({
**{
'a' : 'regression_summary',
'ext' : '.txt',
},
**{
slugify(k) : slugify(str(v))
for k, v in res.items()
if k in [
'Independent Variable',
'Dependent Variable',
'Cpus Per Node',
'Num Simels Per Cpu',
'Regression Model',
]
},
})
pathlib.Path('outplots').mkdir(parents=True, exist_ok=True)
with open(f'outplots/{summary_filename}', 'w') as file:
print(fit_model.summary(), file=file)
return res
# +
dependent_variables = [
'Latency Walltime Inlet (ns)',
'Latency Walltime Outlet (ns)',
'Latency Simsteps Inlet',
'Latency Simsteps Outlet',
'Delivery Failure Rate',
'Delivery Clumpiness',
'Simstep Period Inlet (ns)',
'Simstep Period Outlet (ns)',
]
# best-case approximation to replace infs/nans
# see listings of infs/nans below
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
df_snapshot_diffs_copy['Latency Walltime Inlet (ns)'] = (
df_snapshot_diffs_copy['Inlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Inlet'], 1)
)
df_snapshot_diffs_copy['Latency Walltime Outlet (ns)'] = (
df_snapshot_diffs_copy['Outlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Outlet'], 1)
)
df_snapshot_diffs_copy['Latency Simsteps Inlet'] = (
df_snapshot_diffs_copy['Num Puts Attempted']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Inlet'], 1)
)
df_snapshot_diffs_copy['Latency Simsteps Outlet'] = (
df_snapshot_diffs_copy['Num Pulls Attempted']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Outlet'], 1)
)
df_snapshot_diffs_copy['Simstep Period Inlet (ns)'] = (
df_snapshot_diffs_copy['Inlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Puts Attempted'], 1)
)
df_snapshot_diffs_copy['Simstep Period Outlet (ns)'] = (
df_snapshot_diffs_copy['Outlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Pulls Attempted'], 1)
)
regression_data_tuples = [
(
'Ordinary Least Squares Regression',
df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
})
),
(
'Quantile Regression',
df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
})
),
]
row_filters = [
'lambda row: True',
]
regression_results = pd.DataFrame.from_records([
make_regression_row(
data=data_subset,
independent_variable=allocation_idx_mapped_title,
dependent_variable=dependent_variable,
regression=regression,
row_filter=row_filter,
)
for row_filter in row_filters
for regression, data in regression_data_tuples
for _, data_subset in data.groupby([
'Num Simels Per Cpu',
])
for dependent_variable in dependent_variables
])
# +
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
regression_results[ ~np.isfinite(regression_results['p']) ]
# +
input_attrs = pd.DataFrame.from_records([
{
k : v
for k, v in kn.unpack(source_filename).items()
if k and k[0] != '_' and k != 'ext'
}
for source_filename in [
*df['Source File Inlet'].unique(),
*df['Source File Outlet'].unique(),
]
]).dropna(
axis='columns',
how='any',
)
out_filename = lambda readability: kn.pack({
**{
col : ib.dub(input_attrs[col])
for col in input_attrs.columns
},
**{
'a' : 'with_lac_417_vs_sans_lac_417_regression_results',
'readability' : readability,
'ext' : '.csv',
},
})
out_filepath = f"outplots/{out_filename('human')}"
print(out_filepath)
pathlib.Path('outplots').mkdir(parents=True, exist_ok=True)
regression_results.to_csv(
out_filepath,
index=False,
)
out_filepath = f"outplots/{out_filename('latexcsvreader')}"
print(out_filepath)
pathlib.Path('outplots').mkdir(parents=True, exist_ok=True)
regression_results.rename(
columns=lambda col: ''.join(filter(str.isalnum, col)),
).to_csv(
out_filepath,
index=False,
float_format=lambda col: [
'{:_.0f}'.format(float(f'{x:.2g}')).replace('_', "'")
if 10 < abs(x) < 10e5
else f'{x:.2g}' for x in col
],
na_rep='NaN',
)
# -
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
regression_results
# +
# adapted from https://stackoverflow.com/questions/30385975/seaborn-factor-plot-custom-error-bars
# and https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html#visualization-errorbars
def errplot(x, y, err_lb, err_ub, **kwargs):
ax = plt.gca()
data = kwargs.pop('data')
yerr=np.abs(
data[[err_lb, err_ub]].to_numpy()
- data[[y, y]].to_numpy()
).transpose()
plt.axhline(
y=0,
zorder=1,
color='black',
linewidth=2,
)
data.plot(
x=x,
y=y,
yerr=yerr,
kind='bar',
ax=ax,
zorder=3,
**kwargs,
).grid(
axis='y',
zorder=0,
)
if x is None:
# adapted from https://stackoverflow.com/a/12998531
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False,
)
def facet_errplot(*, data, x=None, y, err_lb, err_ub, estimated_statistic, col=None, row=None, size_inches=None, **kwargs):
g = sns.FacetGrid(
subset,
col=col if col is not None and data[col].nunique() > 1 else None,
row=row if row is not None and data[row].nunique() > 1 else None,
margin_titles=True,
sharey=False,
)
g.map_dataframe(
errplot,
x,
y,
err_lb,
err_ub,
**kwargs,
)
if size_inches is not None:
plt.gcf().set_size_inches(*size_inches)
# adapted from https://stackoverflow.com/a/29814281
plt.gcf().subplots_adjust(top=0.9)
plt.gcf().suptitle(
f"Estimated Statistic = {estimated_statistic}",
)
# -
for regression, subset in regression_results.groupby([
'Regression Model',
]):
tp.tee(
# prevent filename length error
lambda *args, **kwargs: facet_errplot(
err_lb='Relative Effect Size 95% CI Lower Bound',
err_ub='Relative Effect Size 95% CI Upper Bound',
*args,
**kwargs,
),
data=subset,
row='Num Simels Per Cpu',
x='Dependent Variable',
y='Relative Effect Size',
estimated_statistic={
'Quantile Regression' : 'Median',
'Ordinary Least Squares Regression' : 'Mean',
}[regression],
size_inches=(8, 8),
teeplot_outattrs={
**{
'transform' : 'fit_regression',
},
**nbm.collate_outattr_metadata(),
},
)
# +
# relative estimates, alternate
for (regression, dependent_variable), subset in regression_results.groupby([
'Regression Model',
'Dependent Variable',
]):
tp.tee(
# prevent filename length error
lambda *args, **kwargs: facet_errplot(
err_lb='Relative Effect Size 95% CI Lower Bound',
err_ub='Relative Effect Size 95% CI Upper Bound',
*args,
**kwargs,
),
data=subset,
row='Num Simels Per Cpu',
y='Relative Effect Size',
estimated_statistic={
'Quantile Regression' : f'{dependent_variable} Median',
'Ordinary Least Squares Regression' : f'{dependent_variable} Mean',
}[regression],
teeplot_outattrs={
**{
'transform' : 'fit_regression',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir=slugify(dependent_variable),
)
# +
# absolute estimates
for (regression, dependent_variable), subset in regression_results.groupby([
'Regression Model',
'Dependent Variable',
]):
tp.tee(
# prevent filename length error
lambda *args, **kwargs: facet_errplot(
err_lb='Absolute Effect Size 95% CI Lower Bound',
err_ub='Absolute Effect Size 95% CI Upper Bound',
*args,
**kwargs,
),
data=subset,
row='Num Simels Per Cpu',
y='Absolute Effect Size',
estimated_statistic={
'Quantile Regression' : f'{dependent_variable} Median',
'Ordinary Least Squares Regression' : f'{dependent_variable} Mean',
}[regression],
teeplot_outattrs={
**{
'transform' : 'fit_regression',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir=slugify(dependent_variable),
)
# -
def quantile_regplot(fit_reg=True, color=None, *args, **kwargs):
x, y, data = kwargs['x'], kwargs['y'], kwargs['data']
sns.regplot(
*args,
**kwargs,
fit_reg=False,
color=color,
)
if fit_reg:
model = smf.quantreg(
f"Q('{y}') ~ Q('{x}')",
data
)
res = model.fit(q=0.5)
m = res.params[f"Q('{x}')"]
b = res.params['Intercept']
m_ci = res.conf_int().loc[f"Q('{x}')"].tolist()
b_ci = res.conf_int().loc['Intercept'].tolist()
center_x = np.mean([data[x].min(), data[x].max()])
center_y = m * center_x + b
xs = sorted(set(data[x]) | {center_x})
ys = [
m * x_ + b
for x_ in xs
]
y1 = [ min(
m_ * ( x_ - center_x ) + center_y
for m_ in m_ci
) for x_ in xs ]
y2 = [ max(
m_ * ( x_ - center_x ) + center_y
for m_ in m_ci
) for x_ in xs ]
plt.gca().plot(
xs,
ys,
color=color,
)
plt.gca().fill_between(
xs,
y1,
y2,
alpha=0.2,
color=color,
)
# +
def unsplit_regression(*args, regplot, **kwargs):
del kwargs['color']
regplot(
*args,
**kwargs,
color='black',
fit_reg=False,
)
regplot(
*args,
**kwargs,
color='purple',
scatter=False,
)
# adapted from https://www.scivision.dev/matplotlib-force-integer-labeling-of-axis/
plt.gca().xaxis.set_major_locator(
matplotlib.ticker.MaxNLocator(
integer=True,
),
)
def facet_unsplit_regression(*, data, col=None, row=None, x, y, regression, **kwargs):
g = sns.FacetGrid(
data,
col=col if col is not None and data[col].nunique() > 1 else None,
row=row if row is not None and data[row].nunique() > 1 else None,
margin_titles=True,
sharey=False,
)
g.map_dataframe(
unsplit_regression,
regplot={
'Ordinary Least Squares Regression' : sns.regplot,
'Quantile Regression' : quantile_regplot,
}[regression],
x=x,
y=y,
**kwargs,
)
# adapted from https://stackoverflow.com/a/48208266
g.set_axis_labels(x_var=x, y_var=y)
# adapted from https://stackoverflow.com/a/29814281
plt.gcf().subplots_adjust(top=0.8)
plt.gcf().suptitle(regression)
# -
# ## Latency Walltime Inlet (ns)
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Latency Walltime Inlet (ns)'])
][[
'Latency Walltime Inlet (ns)',
'Latency Walltime Outlet (ns)',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
# best-case approximation to replace infs/nans
# see listing of infs/nans above
df_snapshot_diffs_copy['Latency Walltime Inlet (ns)'] = (
df_snapshot_diffs_copy['Inlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Inlet'], 1)
)
data = df_snapshot_diffs_copy.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Inlet (ns)',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-inlet-ns',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Inlet (ns)',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-inlet-ns',
)
# -
# ## Latency Walltime Outlet (ns)
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Latency Walltime Outlet (ns)'])
][[
'Latency Walltime Inlet (ns)',
'Latency Walltime Outlet (ns)',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
# best-case approximation to replace infs/nans
# see listing of infs/nans above
df_snapshot_diffs_copy['Latency Walltime Outlet (ns)'] = (
df_snapshot_diffs_copy['Outlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Outlet'], 1)
)
data = df_snapshot_diffs_copy.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Outlet (ns)',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-outlet-ns',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Walltime Outlet (ns)',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-walltime-outlet-ns',
)
# -
# ## Latency Simsteps Inlet
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Latency Simsteps Inlet'])
][[
'Latency Simsteps Inlet',
'Latency Simsteps Outlet',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
# best-case approximation to replace infs/nans
# see listing of infs/nans above
df_snapshot_diffs_copy['Latency Simsteps Inlet'] = (
df_snapshot_diffs_copy['Num Puts Attempted']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Inlet'], 1)
)
data = df_snapshot_diffs_copy.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Inlet',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-inlet',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Inlet',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-inlet',
)
# -
# ## Latency Simsteps Outlet
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Latency Simsteps Outlet'])
][[
'Latency Simsteps Inlet',
'Latency Simsteps Outlet',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
# best-case approximation to replace infs/nans
# see listing of infs/nans above
df_snapshot_diffs_copy['Latency Simsteps Outlet'] = (
df_snapshot_diffs_copy['Num Pulls Attempted']
/ np.maximum(df_snapshot_diffs_copy['Num Round Trip Touches Outlet'], 1)
)
data = df_snapshot_diffs_copy.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Outlet',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-outlet',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Latency Simsteps Outlet',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='latency-simsteps-outlet',
)
# -
# ## Delivery Failure Rate
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Delivery Failure Rate'])
][[
'Delivery Failure Rate',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Failure Rate',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-failure-rate',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Failure Rate',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-failure-rate',
)
# -
# ## Delivery Clumpiness
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Delivery Clumpiness'])
][[
'Delivery Clumpiness',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Clumpiness',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-clumpiness',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Delivery Clumpiness',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='delivery-clumpiness',
)
# -
# ## Simstep Period Inlet (ns)
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Simstep Period Inlet (ns)'])
][[
'Simstep Period Inlet (ns)',
'Simstep Period Outlet (ns)',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
# best-case approximation to replace infs
# see listing of infs above
df_snapshot_diffs_copy['Simstep Period Inlet (ns)'] = (
df_snapshot_diffs_copy['Inlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Puts Attempted'], 1)
)
data = df_snapshot_diffs_copy.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Inlet (ns)',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-inlet-ns',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Inlet (ns)',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-inlet-ns',
)
# -
# ## Simstep Period Outlet (ns)
#
df_snapshot_diffs[
~np.isfinite(df_snapshot_diffs['Simstep Period Outlet (ns)'])
][[
'Simstep Period Inlet (ns)',
'Simstep Period Outlet (ns)',
'Snapshot',
'Runtime Seconds Elapsed Outlet',
'Hostname',
'Replicate',
'Num Simels Per Cpu',
'Cpus Per Node',
'Num Processes',
]]
# +
df_snapshot_diffs_copy = df_snapshot_diffs.copy()
# best-case approximation to replace infs
# see listing of infs above
df_snapshot_diffs_copy['Simstep Period Outlet (ns)'] = (
df_snapshot_diffs_copy['Outlet-Nanoseconds Elapsed']
/ np.maximum(df_snapshot_diffs_copy['Num Pulls Attempted'], 1)
)
data = df_snapshot_diffs_copy.groupby([
'Execution Instance UUID',
]).mean().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Outlet (ns)',
marker='+',
x_jitter=0.15,
regression='Ordinary Least Squares Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-mean',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-outlet-ns',
)
# +
data = df_snapshot_diffs.groupby([
'Execution Instance UUID',
]).median().reset_index().astype({
'Num Processes' : 'int64',
'Allocated Tasks Per Node' : 'int64',
'Cpus Per Node' : 'int64',
'Num Simels Per Cpu' : 'int64',
allocation_idx_mapped_title : 'int64',
})
for viz in facet_unsplit_regression,:
tp.tee(
viz,
data=data,
row='Num Simels Per Cpu',
x=allocation_idx_mapped_title,
y='Simstep Period Outlet (ns)',
marker='+',
x_jitter=0.15,
regression='Quantile Regression',
teeplot_outattrs={
**{
'transform' : 'snapshot_diffs-groupby_exec_instance-median',
},
**nbm.collate_outattr_metadata(),
},
teeplot_subdir='simstep-period-outlet-ns',
)
# -
# # Outlier Analysis
#
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df_snapshot_diffs[
(df_snapshot_diffs['Latency Simsteps Inlet'] > 50)
& (df_snapshot_diffs['Num Simels Per Cpu'] == 1)
]
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df_snapshot_diffs[
(df_snapshot_diffs['Latency Simsteps Inlet'] > 50)
& (df_snapshot_diffs['Num Simels Per Cpu'] == 2048)
]
| binder/date=2021+project=72k5n/a=with-lac-417-vs-sans-lac-417/a=with-lac-417-vs-sans-lac-417+compute-work=0+simels_per_cpu=2048+ext=.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **DSFM Demo**: Credit Default - Simple Models
# Creator: [Data Science for Managers - EPFL Program](https://www.dsfm.ch)
# Source: [https://github.com/dsfm-org/code-bank.git](https://github.com/dsfm-org/code-bank.git)
# License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository.
# -------------
# ## Overview
# In this demo, we try to predict the probability of default on credit card bills using a dataset of customers payments from a Taiwanese bank. A credit default happens when you fail to pay the minimum payment by the due date mentioned on your credit card bill for more than 6 months. From a risk management perspective, the accuracy of the predicted probability of default is more valuable than just a binary prediction (classification) of default itself.
# <img src="https://greendayonline.com/wp-content/uploads/2017/03/Recovering-From-Student-Loan-Default.jpg" width="500" height="500" align="center"/>
#
#
# Image: https://greendayonline.com/wp-content/uploads/2017/03/Recovering-From-Student-Loan-Default.jpg
# #### The Credit Card Default Dataset
# We will try to predict the probability of defaulting on a credit card account at a Taiwanese bank. A credit card default happens when a customer fails to pay the minimum due on a credit card bill for more than 6 months.
#
# We will use a dataset from a Taiwanese bank with 30,000 observations (Source: *Yeh, I. C., & Lien, <NAME>. (2009). The comparisons of data mining techniques for the predictive accuracy of probability of default of credit card clients. Expert Systems with Applications, 36(2), 2473-2480.*). Each observation represents an account at the bank at the end of October 2005. We renamed the variable default_payment_next_month to customer_default. The target variable to predict is `customer_default` -- i.e., whether the customer will default in the following month (1 = Yes or 0 = No). The dataset also includes 23 other explanatory features.
#
# Variables are defined as follows:
# | Feature name | Variable Type | Description
# |------------------|---------------|--------------------------------------------------------
# | customer_default | Binary | 1 = default in following month; 0 = no default
# | LIMIT_BAL | Continuous | Credit limit
# | SEX | Categorical | 1 = male; 2 = female
# | EDUCATION | Categorical | 1 = graduate school; 2 = university; 3 = high school; 4 = others
# | MARRIAGE | Categorical | 0 = unknown; 1 = married; 2 = single; 3 = others
# | AGE | Continuous | Age in years
# | PAY1 | Categorical | Repayment status in September, 2005
# | PAY2 | Categorical | Repayment status in August, 2005
# | PAY3 | Categorical | Repayment status in July, 2005
# | PAY4 | Categorical | Repayment status in June, 2005
# | PAY5 | Categorical | Repayment status in May, 2005
# | PAY6 | Categorical | Repayment status in April, 2005
# | BILL_AMT1 | Continuous | Balance in September, 2005
# | BILL_AMT2 | Continuous | Balance in August, 2005
# | BILL_AMT3 | Continuous | Balance in July, 2005
# | BILL_AMT4 | Continuous | Balance in June, 2005
# | BILL_AMT5 | Continuous | Balance in May, 2005
# | BILL_AMT6 | Continuous | Balance in April, 2005
# | PAY_AMT1 | Continuous | Amount paid in September, 2005
# | PAY_AMT2 | Continuous | Amount paid in August, 2005
# | PAY_AMT3 | Continuous | Amount paid in July, 2005
# | PAY_AMT4 | Continuous | Amount paid in June, 2005
# | PAY_AMT5 | Continuous | Amount paid in May, 2005
# | PAY_AMT6 | Continuous | Amount paid in April, 2005
# The measurement scale for repayment status is:
#
# -2 = payment two months in advance
# -1 = payment one month in advance
# 0 = pay duly
# 1 = payment delay for one month
# 2 = payment delay for two months
# 3 = payment delay for three months
# 4 = payment delay for four months
# 5 = payment delay for five months
# 6 = payment delay for six months
# 7 = payment delay for seven months
# 8 = payment delay for eight months
# 9 = payment delay for nine months or more
# -------
# ## **Part 0**: Setup
#
# Put all import statements, constants and helper functions at the top of your notebook.
# +
# Standard imports
import numpy as np
import pandas as pd
import pandas_profiling
import itertools
# Visualization packages
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="white")
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split
# Special code to ignore un-important warnings
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# -
# Constants
FIGSIZE = (10, 6)
# +
# Define a helper function to visualize the confusion matrix
# Note optional parameter for normalization; apply normalization by setting `normalize=True`
def plot_confusion_matrix(cm, classes=[0,1], normalize=False, title='Confusion Matrix', cmap=plt.cm.Reds):
"""
Function to plot a sklearn confusion matrix, showing number of cases per prediction condition
Args:
cm an sklearn confusion matrix
classes levels of the class being predicted; default to binary outcome
normalize apply normalization by setting `normalize=True`
title title for the plot
cmap color map
"""
plt.figure(figsize=FIGSIZE)
plt.imshow(cm, aspect='auto', interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.locator_params(nbins=2)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
# add FP, TP, FN, TN counts
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, round (cm[i, j],2), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
# -
# ## **Part 1**: Data Preprocessing and EDA
#
# First, we would like to understand the main characteristics of the dataset. We might need to transform and clean some features before we can specify a statistical model.
# Load data into a dataframe
data = pd.read_csv('credit_data.csv')
data.head()
# Print data columns
data.columns
# Dimension of data in the form of (number of observations, number of features)
data.shape
# Check distribution of an arbitrary column
data['PAY_1'].hist(bins=40)
# Another way to check distribution of value
data['MARRIAGE'].value_counts()
# Get ovrall statistics about data frame
data.describe()
# Investigate 'EDUCATION' field
# Notice the mismatch between data description and actual data !
data['EDUCATION'].value_counts()
# Cross-tab 'MARRIAGE' and the target
pd.crosstab(data['MARRIAGE'], data['customer_default'], margins = True)
# Check data types
data.dtypes
# Check distribution of target feature
data['customer_default'].hist()
# +
# Divide dataframe into two data frames, with and without credit default
data_def_1 = data[data['customer_default'] == 1]
data_def_0 = data[data['customer_default'] == 0]
# Check distribution of different features in with and without default dataframes as well as full dataframe
plt.figure(figsize=FIGSIZE)
plt.style.use('seaborn-deep')
col = 'LIMIT_BAL'
plt.xlabel(col)
plt.ylabel('count')
plt.hist([data_def_0[col], data_def_1[col], data[col]], label=['non_default-'+col, 'default-'+col, 'all-'+col])
plt.legend(loc='upper right')
plt.show()
# -
# Check distribution of different features using box plot
plt.figure(figsize=FIGSIZE)
data.boxplot(column='LIMIT_BAL')
plt.ylabel('Feature value')
plt.show()
# +
# Visualize corelation of different features with respect to each other
# Compute the correlation matrix
corr = data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Setup the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.2)
# -
# Visualize distribution of one feature with respect to another one
sns.jointplot(x = "LIMIT_BAL", y = "customer_default", data = data)
# Visualize pair-wise distribution of a set of features
sample_data = data.iloc[:,1:5]
sns.pairplot(sample_data)
# ### Exploratory Data Analysis (the automated approach)
#
# Instead of performing all of the steps above manually, you can also run a "profile" on the dataset first, and then drill down into specific cases of interest.
# +
# Use the automated pandas profiling utility to examine the dataset
# data.profile_report()
# -
# ## **Part 2**: Data Preprocessing and Designing Cross-validation Schema
# Among different features, SEX and MARRIAGE are categoical while others are either numerical or ordinal with acceptable labeling.
# One hot encoding of sex and marital status
# Applying one-hot encoding for other field doesn't make sense since they are either numeric or ordinal
cols_to_transform = ['SEX', 'MARRIAGE']
data_with_dummies = pd.get_dummies(data=data, columns = cols_to_transform)
data_with_dummies.head()
# +
# Separate features and target
cols_X = [ 'LIMIT_BAL', 'EDUCATION', 'AGE', 'PAY_1', 'PAY_2', 'PAY_3',
'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3',
'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2',
'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6', 'SEX_1', 'SEX_2', 'MARRIAGE_0',
'MARRIAGE_1', 'MARRIAGE_2', 'MARRIAGE_3']
col_y = 'customer_default'
X = data_with_dummies.loc[:, cols_X]
y = data_with_dummies[col_y]
# -
# Divide data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify=y)
print(X_train.shape , X_test.shape , y_train.shape , y_test.shape)
# ## **Part 3**: Classification using Dummy Classifier and Logistic Regression
# ### Model 1: Baseline
#
# A good practice is to start with a "dumb" model that simply predicts the average (for a regression) or the most frequent outcome (for classification). That gives you sense of how well a complex model performs compared to the most simple model. As this is a classification problem, we will start by predicting No Default.
# +
# Define a baseline using dummy classifier that predicts the most frequent label
dummy_clf = DummyClassifier(strategy='most_frequent')
dummy_clf.fit(X_train, y_train)
# Predict default - as a binary outcome and a probability between 0 and 1
y_pred_dummy = dummy_clf.predict(X_test)
y_pred_dummy_proba = dummy_clf.predict_proba(X_test)
# Plot confusion matrix
cm_dummy = confusion_matrix(y_test, y_pred_dummy)
plot_confusion_matrix(cm_dummy)
# -
# How many errors did the dumb model make?
errors_dummy = cm_dummy[0][1] + cm_dummy[1][0]
print('Num Errors =', errors_dummy, '\n')
print('Accuracy =', "{0:.4f}".format(float((sum(cm_dummy)[0] - errors_dummy)/sum(cm_dummy)[0])))
print('AUC Score =', "{0:.4f}".format(roc_auc_score(y_test, y_pred_dummy_proba[:, 1])))
# ### Model 2: Logit model (no regularization)
# +
# Binary classification using a logit model
lr_clf = LogisticRegression(C=10e9)
lr_clf.fit(X_train, y_train)
# Predict default - as a binary outcome and a probability between 0 and 1
y_pred_lr = lr_clf.predict(X_test)
y_pred_lr_proba = lr_clf.predict_proba(X_test)
# Plot confusion matrix
cm_lr = confusion_matrix(y_test, y_pred_lr)
plot_confusion_matrix(cm_lr,)
# -
# How many errors did the dumb model make?
errors_lr = cm_lr[0][1] + cm_lr[1][0]
print('Num Errors =', errors_lr, '\n')
print('Accuracy =', "{0:.4f}".format(float((sum(cm_lr)[0] - errors_lr)/sum(cm_lr)[0])))
print('AUC Score =', "{0:.4f}".format(roc_auc_score(y_test, y_pred_lr_proba[:, 1])))
# ## **Summary of performance metrics**
# ### Accuracy
width = 30
models = ['Dummy', 'Logit']
results = [float((sum(cm_dummy)[0] - errors_dummy)/sum(cm_dummy)[0]), float((sum(cm_lr)[0] - errors_lr)/sum(cm_lr)[0])]
print('', '=' * width, '\n', 'Summary of Accuracy Scores'.center(width), '\n', '=' * width)
for i in range(len(models)):
print(models[i].center(width-8), '{0:.4f}'.format(results[i]))
# ### AUC
width = 25
models = ['Dummy', 'Logit']
results = [roc_auc_score(y_test, y_pred_dummy_proba[:, 1]), roc_auc_score(y_test, y_pred_lr_proba[:, 1])]
print('', '=' * width, '\n', 'Summary of AUC Scores'.center(width), '\n', '=' * width)
for i in range(len(models)):
print(models[i].center(width-8), '{0:.4f}'.format(results[i]))
# ## **Part 4**: Discussion
# **Accuracy?**
# Note that the logit model did WORSE than the baseline model above (at least when predicting class labels with a default probability threshold and when evaluated by the total number of errors made by the model). Using accuracy to compare models, however, as two drawbacks for this credit default problem:
#
# 1. The distribution of the target outcome variable is not balanced. Therefore, a model that uniformly predicts that NO customer will default can still get a high accuracy.
#
# 2. Different probability thresholds for making a "positive" prediction can lead to different performance results in the model. Thus, it may be more sensible to configure the model to predict and use probabilities, instead of directly predicting a label.
# A more useful metric for classification is the "**Area under the ROC curve**" (also called the **AUC**). The AUC gives you the probability that a randomly selected customer that is labeled as defaulting will be assigned a higher probability of default than a randomly selected customer that is labeled as NOT defaulting. The AUC also does not suffer from class skewness, and therefore is able to adjust for probabilities directly (instead of the distribution of class labels). But looking above, how do AUC scores actually compare...?
# **Do we need to standardize the data?**
# It is not necessary to normalize or standardize data for linear models when there is no regularization or special optimization procedure involved. Estimated coefficients will adjust to non-normalized data and the accuracy of the prediction will not be affected.
# **Do we need to use a _Pipeline_?** (What is a Pipeline, anyway?)
# Regularized models and other, more-complex models often require **standardized data** to work properly. However, you cannot standardize all of the data up-front in the beginning; if you do, then information will "leak" from the training data into the testing data because the standardization will reflect both datasets. You therefore need a special way to process data and models so that processing steps can be defined up-front, but run later on only the relevant sub-section of data. You accomplish that by using a late-execution "**pipeline**." _Pipelines can be confusing at first, but eventually you will get the hang of it._
# **Was our training/testing method robust?**
#
# One concern with the previous training/testing approach is that the relative performance of the baseline model and the Logit model might depend on the particular training/testing split in the data that was performed. In a more advanced analysis, we might use k-fold cross-validation for a more reliable testing estimate. Another concern with the previous training/testing approach arises when we need to "tune" a model with a hyper-parameter for better performance. In that case, hyper-parameter tuning would pick up information from the testing data and overfit the model. In the models that follow, we will need to tune hyper-parameters, so we will now need to change our testing strategy and generate three data splits: **training** (60%), **validation** (20%) and **test** (20%). We can do this by first splitting a training/testing set 80/20, and then further split the training set into a smaller training set and a validation set. The terminology for these sets can be confusing (and different resources use different names for the sets), but in this example we will create a "Validation Set" that pulls from what was originally part of the training set. Most importantly, one should set aside a testing set that is *never used* until the very final test.
| demos/credit-default-simple/credit-default-simple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Quality Report
# %cd -q ../../../src/
from arche import *
a = Arche("381798/1/4", schema="https://raw.githubusercontent.com/scrapinghub/arche/master/docs/source/nbs/data/books.json")
a.data_quality_report()
| docs/source/nbs/DQR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## import tensorflow and other packages
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# ## Define initial input x and output y
num_examples = 50
x=np.linspace(-2, 4, num_examples)
y=np.linspace(-6, 6, num_examples)
print x
plt.figure(figsize=(4,4))
plt.scatter(x, y)
plt.show()
# ## Generate random pertubation
randnum=np.random.random([num_examples])
print randnum
x += randnum #an 1-d array with random numbers
y += np.random.random([num_examples])
plt.figure(figsize=(4,4))
plt.scatter(x, y)
plt.show()
# ## Add a constant element to input array
x_with_bias = np.array([(1., a) for a in x]).astype(np.float32)
print x_with_bias
# ## Train a neural network with Gradient Descent
# The objective is minimizing L2 loss
# +
losses = []
training_steps = 50
learning_rate = 0.002
with tf.Session() as sess:
# Set up all the tensors, variables, and operations.
input = tf.constant(x_with_bias)
target = tf.constant(np.transpose([y]).astype(np.float32))
weights = tf.Variable(tf.random_normal([2, 1], 0, 0.1))
tf.initialize_all_variables().run()
# yhat is a vector in this case
yhat = tf.matmul(input, weights)
yerror = tf.sub(yhat, target)
loss = tf.nn.l2_loss(yerror)
update_weights = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
for _ in range(training_steps):
update_weights.run()
losses.append(loss.eval())
#print _ #It takes on value from 0 to 49
#print losses #this shows losses array keep increasing in size: [18] , [18, 13],...
# Training is done, get the final values for the graphs
betas = weights.eval()
yhat = yhat.eval()
# -
# ## Show the actual and predicted data points
# +
plt.figure(figsize=(4,4))
plt.scatter(x, y, alpha=.9) #plot original x and y
plt.scatter(x, np.transpose(yhat)[0], c="g", alpha=.6) #plot x and yhat
x_range = (-4, 6)
plt.plot(x_range, [betas[0] + a * betas[1] for a in x_range], "g", alpha=0.6)
plt.show()
# -
# ## Plot the prediction error over time
# +
# Show the loss over time.
plt.figure(figsize=(4,4))
plt.plot(range(0, training_steps), losses)
#plt.set_ylabel("Loss")
#plt.set_xlabel("Training steps")
plt.show()
# -
# ## Exercise: Build a neural network to predict room occupancy
# +
import csv
# Split data into inputs (5 cols) and output (1 col)
def load_data(filename):
x=[]
target=[]
a=[]
with open(filename) as csv_file:
data_file = csv.reader(csv_file)
for row in data_file:
a.append(row)
print a[0]
np_a=np.array(a)
x=np_a[1:,:-1].astype(np.float32) #read after first row
target=np_a[1:,-1].astype(np.float32)
return x, target
# -
room_X, room_y=load_data('data/room/train.csv')
print len(room_X)
print len(room_y)
#*** Inspect data
n_samples=len(room_X)
n_features = len(room_X[0])
print n_samples
print n_features
print room_X[0], room_y[0]
# +
losses = []
training_steps = n_samples
learning_rate = 0.01
with tf.Session() as sess:
# Set up all the tensors, variables, and operations.
input = tf.constant(room_X)
target = tf.constant(np.transpose([room_y]).astype(np.float32))
weights = tf.Variable(tf.random_normal([5, 1], 0, 0.1))
tf.initialize_all_variables().run()
# yhat is a matrix
yhat = tf.matmul(input, weights)
# loss is a matrix
lossDistribution = tf.nn.sigmoid_cross_entropy_with_logits(yhat, target, name="loss")
# take the mean of the lossDistribution vector
lossAvg = tf.reduce_mean(lossDistribution)
update_weights = tf.train.GradientDescentOptimizer(learning_rate).minimize(lossAvg)
for _ in range(training_steps):
update_weights.run()
losses.append(lossAvg.eval())
#print _ #It takes on value from 0 to 49
#print losses #this shows losses array keep increasing in size: [18] , [18, 13],...
# Training is done, get the final values for the graphs
finalWeightsBetas = weights.eval()
yhat = yhat.eval()
finalLossDistribution = lossDistribution.eval()
# -
print yhat
print finalWeightsBetas
print finalLossDistribution
print losses
# +
plt.figure(figsize=(4,4))
plt.plot(range(0, 1999), losses[0:1999])
# -
| jupyterhub/notebooks/zz_under_construction/zz_old/TensorFlow/Labs/lab2_NN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercício de Programação 5: Regressão
#
# <font color="red">**Prazo de submissão: 23:55 do dia 16/03/2021** </font>
#
# 2020.2 Álgebra Linear Computacional - DCC - UFMG
#
# Erickson - Fabricio
#
# Instruções:
# * Antes de submeter suas soluções, certifique-se de que tudo roda como esperado. Primeiro, **reinicie o kernel** no menu, selecione Kernel$\rightarrow$Restart e então execute **todas as células** (no menu, Cell$\rightarrow$Run All)
# * Apenas o arquivo .ipynb deve ser submetido. Ele não deve ser compactado.
# * Não deixe de preencher seu nome e número de matrícula na célula a seguir
#
# **Nome do aluno:** <NAME>
#
# **Matricula:** 2020054293
# ## Carregando os dados
#
# Iremos carregar os dados usando a biblioteca ```pandas```. Não se preocupe se você não conhece a biblioteca, pois o nosso objetivo é apenas extrair a matriz de dados $X$. Segue uma descrição do dataset, retirada [daqui](http://statweb.stanford.edu/~owen/courses/202/Cereals.txt).
#
# * Datafile Name: Cereals
# * Datafile Subjects: Food , Health
# * Story Names: Healthy Breakfast
# * Reference: Data available at many grocery stores
# * Authorization: free use
# * Description: Data on several variable of different brands of cereal.
#
# A value of -1 for nutrients indicates a missing observation.
# Number of cases: 77
# Variable Names:
#
# 1. Name: Name of cereal
# 2. mfr: Manufacturer of cereal where A = American Home Food Products; G =
# General Mills; K = Kelloggs; N = Nabisco; P = Post; Q = Quaker Oats; R
# = Ralston Purina
# 3. type: cold or hot
# 4. calories: calories per serving
# 5. protein: grams of protein
# 6. fat: grams of fat
# 7. sodium: milligrams of sodium
# 8. fiber: grams of dietary fiber
# 9. carbo: grams of complex carbohydrates
# 10. sugars: grams of sugars
# 11. potass: milligrams of potassium
# 12. vitamins: vitamins and minerals - 0, 25, or 100, indicating the typical percentage of FDA recommended
# 13. shelf: display shelf (1, 2, or 3, counting from the floor)
# 14. weight: weight in ounces of one serving
# 15. cups: number of cups in one serving
# 16. rating: a rating of the cereals
# +
#Execute esta célula para instalar o pandas caso já não tenha instalado
# import sys
# # !{sys.executable} -m pip install --user pandas
# -
import pandas as pd
df = pd.read_table('cereal.txt',sep='\s+',index_col='name')
df
# A seguir iremos remover as linhas correspondentes aos cereais que possuem dados faltantes, representados pelo valor -1.
# Também iremos remover as colunas com dados categóricos 'mfr' e 'type', e os dados numéricos, 'shelf', 'weight' e 'cups'.
import numpy as np
new_df = df.replace(-1,np.nan)
new_df = new_df.dropna()
new_df = new_df.drop(['mfr','type','shelf','weight','cups'],axis=1)
new_df
# Finalmente, iremos converter os dados nutricionais numéricos de ```new_df``` para uma matriz ```dados``` e as avaliações (ratings) para um vetor $y$. Os nomes dos cereais serão salvos em uma lista ```cereral_names``` e os nomes das colunas em uma lista ```col_names```.
# +
cereral_names = list(new_df.index)
print('Cereais:',cereral_names)
col_names = list(new_df.columns)
print('Colunas:',col_names)
dados = new_df.drop('rating', axis=1).values
print('As dimensões de dados são:',dados.shape)
y = new_df['rating'].values
print('As dimensões de y são:',y.shape)
# -
# ## Estimando os parâmetros da regressão linear simples
#
# Qual será a relação entre a avaliação $y$ e o número de calorias $x$ de um cereal? Para responder esta pergunta, considere uma regressão linear simples
# $$
# y = \beta_0 + \beta_1 x.
# $$
# Para encontrar os coeficientes $\beta_0$ e $\beta_1$ utilizando o método dos mínimos quadrados, basta resolver o sistema
# $$
# \begin{bmatrix}
# n & \sum_i x^{(i)} \\
# \sum_i x^{(i)} & \sum_i (x^{(i)})^2
# \end{bmatrix}
# \begin{bmatrix}
# \beta_0 \\ \beta_1
# \end{bmatrix}
# =
# \begin{bmatrix}
# \sum_i y^{(i)} \\ \sum_i x^{(i)} y^{(i)}
# \end{bmatrix}
# $$
#
# Portanto, para encontrar $\beta_0$ e $\beta_1$, você precisa
# 1. Calcular a matriz
# $$
# A = \begin{bmatrix}
# n & \sum_i x^{(i)} \\
# \sum_i x^{(i)} & \sum_i (x^{(i)})^2
# \end{bmatrix}
# $$
# e o vetor
# $$
# c = \begin{bmatrix}
# \sum_i y^{(i)} \\ \sum_i x^{(i)} y^{(i)}
# \end{bmatrix}
# $$
# 2. Resolver $A \beta = c$, onde $\beta$ é o vetor de coeficientes.
# **Exercício 1 - Regressão simples:** Encontre os coeficientes $\beta_0$ e $\beta_1$ quando a variável independente é ```calories```. Dica: A variavel X abaixo já armazena os valores deste atributo.
X = new_df['calories'].values
y = new_df['rating'].values
## calculando a média de X e Y
mu_x = X.mean()
mu_y = y.mean()
## variavel "temporaria" útil
sum_x_t = X - mu_x
## usando as expressões analíticas para calcular beta 0 e beta 1
b1 = np.sum( sum_x_t * (y - mu_y) ) / np.sum(sum_x_t ** 2)
b0 = mu_y - ( b1 * mu_x )
b0, b1
## calculando as predições de acordo com os coeficientes encontrados
y_hat_ex1 = b0 + (b1 * X)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.scatter( X, y, alpha=0.7 )
plt.plot(X, b0 + (b1 * X), color='r')
# **Exercício 2 - Regressão múltipla:** Considerando a nova tabela de dados X abaixo com os atributos 'calories', 'protein', 'fat', 'sugars' e 'vitamins' selecionados, estime os parâmetros da regressão múltipla para obter a variavel resposta ```rating```
X = new_df.loc[:,['calories', 'protein', 'fat', 'sugars', 'vitamins']].values
## adicionando coluna de 1's para os bias
X = np.concatenate([np.ones(shape=[X.shape[0], 1]), X], axis=1)
## resolvendo equação de forma matricial
betas = np.linalg.inv(X.T @ X) @ X.T @ y.reshape(-1, 1)
betas
y_hat_ex2 = X @ betas
y_hat_ex2 = y_hat_ex2.reshape(-1)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.scatter(y, y_hat_ex2.reshape(-1), alpha=0.3)
plt.plot(y, y, 'k-')
plt.xlabel('labels')
plt.ylabel('preds')
# **Exercício 3:** Nossos modelos de regressão linear são bons preditores da nota de avaliação do cereal? Qual o melhor modelo? Calcule os coeficientes de determinação e faça uma análise dos valores obtidos para responder a estas perguntas.
# insira seu código aqui
def calc_coef_det(y, y_hat):
mu_y = np.mean(y)
return 1 - np.sum( (y - y_hat) ** 2 ) / np.sum( (y - mu_y) ** 2 )
r2_ex1 = calc_coef_det(y, y_hat_ex1)
r2_ex2 = calc_coef_det(y, y_hat_ex2)
r2_ex1, r2_ex2
| src/eps/gabarito/EP5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="http://akhavanpour.ir/notebook/images/srttu.gif" alt="SRTTU" style="width: 150px;"/>
#
# [](https://notebooks.azure.com/import/gh/Alireza-Akhavan/class.vision)
# ## Sharpening
#
# By altering our kernels we can implement sharpening, which has the effects of in strengthening or emphasizing edges in an image.
# +
import cv2
import numpy as np
image = cv2.imread('images/input.jpg')
cv2.imshow('Original', image)
# Create our shapening kernel, we don't normalize since the
# the values in the matrix sum to 1
kernel_sharpening = np.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]])
# applying different kernels to the input image
sharpened = cv2.filter2D(image, -1, kernel_sharpening)
cv2.imshow('Image Sharpening', sharpened)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
# <div class="alert alert-block alert-info">
# <div style="direction:rtl;text-align:right;font-family:B Lotus, B Nazanin, Tahoma"> دانشگاه تربیت دبیر شهید رجایی<br>مباحث ویژه - آشنایی با بینایی کامپیوتر<br>علیرضا اخوان پور<br>96-97<br>
# </div>
# <a href="https://www.srttu.edu/">SRTTU.edu</a> - <a href="http://class.vision">Class.Vision</a> - <a href="http://AkhavanPour.ir">AkhavanPour.ir</a>
# </div>
| 13-Sharpening.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="KJQPpBXZDZvQ" colab_type="text"
# ### IMPORTS
# + id="0R-G-GsR9wkC" colab_type="code" colab={}
# Imports that will be using in this notebook
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, Input
from tensorflow.keras import layers
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
import pickle
# + [markdown] id="AMpP-qddDdsZ" colab_type="text"
# ### READING IN DATA
# + id="8qlxc0qawZOs" colab_type="code" colab={}
# Data Source
# https://www.kaggle.com/kingburrito666/cannabis-strains
# + id="YXa15Pxq97ae" colab_type="code" colab={}
# Reading in cannabis data
weed = pd.read_csv('cannabis.csv')
# + id="3b0qAi53-BPg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="2f4b76f6-7044-4ae7-ce25-7406d1a7c7af"
# Checking the whole dataframe and making sure I read it in correctly
weed
# + [markdown] id="q4lpvJh6DgR0" colab_type="text"
# ### DATA PREPROCESSING
# + id="XLdzdcag_R9X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="86418e21-7e95-4206-c334-7b9b4fd6361c"
# I know that there are some NaN values in the Flavor column so checking to see how many
weed[weed['Flavor'].isnull()]
# + id="B0g6e-1W2GWx" colab_type="code" colab={}
# Replacing the NaN values for none in the Flavor column
weed['Flavor'] = weed['Flavor'].replace(np.nan, 'none')
# + id="5Jj9B1EZ2S-X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 47} outputId="cb60672b-a6e6-448f-f9e0-5daadc06097a"
# Making sure that there are no NaN values left
weed[weed['Flavor'].isnull()]
# + id="NNzbVW0b0q5M" colab_type="code" colab={}
# Combining Effects and Flavor into once column to use for one hot encoding
weed['Effect_Flavor'] = weed['Effects'] + ',' + weed['Flavor'] # If you don't add the comma it just smooshes the words together
# + id="6Nu1DhpN1Wfb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 469} outputId="1fd8c894-d731-4d85-bf09-2c27207159a2"
# Checking to making sure it added correctly
weed
# + id="erG7-Epl1gCw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="228cc76a-ca35-4104-9594-778c1b5544d8"
# Checking one row to better see all the items and making sure the comma seperated the words correctly
weed['Effect_Flavor'][0]
# + id="brEoFdsj1lh0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="fb2b4aa3-dcda-4d57-cf8f-391c6f12276d"
# Looking at my new column
weed['Effect_Flavor'].describe()
# + [markdown] id="bWUJVQq5Dvyr" colab_type="text"
# ### MODEL PREPERATION
# + id="wkg8Ovtb-B2V" colab_type="code" colab={}
# Split into X and y
X = weed['Effect_Flavor']
y = weed['Strain']
# + id="71_6XFdptRqX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6b394070-c74f-4f86-88d0-eeb430566aac"
# Checking X shape
X.shape
# + id="xQsCK3MWtToL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fa432a52-ce72-4b9b-dc05-d4d87a2a3cdc"
# Checking y shape
y.shape
# + id="SaLKsP0aktJu" colab_type="code" colab={}
# Since the neural network cannot take in straight text I am going to vectorize my X data
# create the transformer
vect = CountVectorizer()
# build vocab
vect.fit(X)
# transform text
dtm = vect.transform(X)
# + id="P6NKGoP8k3aA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="7db7aed7-a99d-4163-adc5-c7288b6fe375"
# Checking to see that the feature names are correct
print(vect.get_feature_names())
# + id="0zV1UT9nlMIO" colab_type="code" colab={}
# Now I am going to use todense() to turn X into a matrix
X = dtm.todense()
# + id="vT1tzQf8qUpF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="c3389c67-008d-475b-f2e6-2954942a75dc"
# Make sure it turned into a matrix correctly
X
# + id="D3JrP31IqbsT" colab_type="code" colab={}
# I am using label encoder for y
# Label encoder turns y into a numeric representation of the data
le = preprocessing.LabelEncoder()
y = le.fit_transform(y)
# + id="rVMz-HhUxH9u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="50a2529f-1045-4c43-f72f-f5de92f43add"
# Check to make sure that y is an array now
y
# + id="uRNf7yknnHEE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b49fc4ee-29ab-4d80-e964-97f6c6146759"
# Checking to make sure the X shape looks good
X.shape
# + id="AS-g8VacnKq9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1d7ace85-019f-4454-b89b-44472952287b"
# Checking to make sure the y shape looks good
y.shape
# + id="Y6v4fwuEypUV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="83370934-5fd8-496e-ab89-63e37efd15c3"
# Checking to make sure my X and y look good
print(X[1])
print(y[0:5])
# + [markdown] id="ocSIwOSWD1jD" colab_type="text"
# ### CREATE NEURAL NETWORK
# + id="AGAxhgzD5Sf7" colab_type="code" colab={}
# Creating my own optimizer
# AdamamsGrad is using a super low learning_rate which means the changes between epochs will be smaller
AdamamsGrad = tf.keras.optimizers.Adam(
learning_rate=0.0001,
amsgrad=False,
)
# + id="Cg75V1qHGrPd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="3c6a997b-a6ad-497f-96c0-5911837af897"
# Instantiating my model type and creating the model architecture
model = Sequential()
model.add(Input(66))
model.add(Dense(64, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(2350, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=AdamamsGrad,
metrics=['accuracy'])
model.summary()
# + [markdown] id="jRbJ7oxnD85b" colab_type="text"
# ### TEST PREDICTION MODEL
# + id="fle7WDrIG5kh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="80b756b5-92ef-48ab-ebd8-65c1d5a0ee51"
# Fitting my model and getting my accuracy
model.fit(X,y, epochs=100)
# + [markdown] id="D4JUQugWQM5B" colab_type="text"
# ### PICKLE THE MODEL
# + id="d6peXiHmQgCl" colab_type="code" colab={}
| model_notebooks/med_cabinet_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Envinroment preparation
# First line if **magic** enabling matplotlib inline plots
# %matplotlib inline
# Then we have round of inports:
# * **pandas** is our main data storage module
# * **glob** and **os** are used for filename manipulations
import dask.dataframe as dd
import pandas as pd
import glob
import os
# ## Files index and identification
filenames = [ os.path.splitext(wholeFilename)[0] for wholeFilename in
[ os.path.basename(wholePath) for wholePath in glob.glob("../input/2*.xlsx") ] ]
dataFiles = pd.DataFrame({"filename": filenames})
dataFiles["year"], dataFiles["pollutant"], dataFiles["resolution"] = dataFiles["filename"].str.split('_', 2).str
dataFiles.head()
dataFiles["year"].value_counts()
dataFiles["pollutant"].value_counts()
dataFiles["resolution"].value_counts()
# ## Fixing data files identification
dataFiles.ix[dataFiles["resolution"] == "(PM2.5)-24g", 'pollutant'] = "Cl_(PM2.5)"
dataFiles.ix[dataFiles["resolution"] == "(PM2.5)-24g", 'resolution'] = "24g"
dataFiles.ix[dataFiles["resolution"] == "(PM2.5)_24g", 'pollutant'] = "SO42_(PM2.5)"
dataFiles.ix[dataFiles["resolution"] == "(PM2.5)_24g", 'resolution'] = "24g"
dataFiles.ix[dataFiles["resolution"] == "w_PM25_24g", 'pollutant'] = "Jony_w_PM25"
dataFiles.ix[dataFiles["resolution"] == "w_PM25_24g", 'resolution'] = "24g"
# Now **resolution** column should be correct:
dataFiles["resolution"].value_counts()
# Lets also fix worngly labelled pollutant:
dataFiles.ix[dataFiles["pollutant"] == "PM25", 'pollutant'] = "PM2.5"
dataFiles.describe()
# There is still one empty cell in **resolution** column. Lets identify it:
dataFiles[dataFiles["resolution"].isnull()]
# After manually examinign **2015_depozycja** file I found that it cointains new type of data, which will be useless in planned analysis. I decided to remove it from working memory.
dataFiles.drop(dataFiles[dataFiles["filename"] == "2015_depozycja"].index, inplace=True)
dataFiles.describe()
# ## Looking for worst measuring station for each pollutant in 2015
importantPollutants = ["PM10", "PM2.5", "O3", "NO2", "SO2", "C6H6", "CO"]
pollutants2015 = dataFiles[(dataFiles["year"] == "2015") & (dataFiles["resolution"] == "1g") &
(dataFiles["pollutant"].isin(importantPollutants))]
pollutants2015
from tqdm import tqdm
from collections import Counter
# +
#worstStation = {}
#for index, dataRow in tqdm(pollutants2015.iterrows(), total=len(pollutants2015.index)):
# dataFromFile = pd.read_excel("../input/" + dataRow["filename"] + ".xlsx", skiprows=[1,2])
# dataFromFile = dataFromFile.rename(columns={"Kod stacji":"Godzina"})
# dataFromFile = dataFromFile.set_index("Godzina")
# worstStation[dataRow["pollutant"]] = dataFromFile.max().sort_values(ascending = False).index[0]
# -
# ## Building one big data frame
pollutants = importantPollutants
years = sorted(list(dataFiles["year"].unique()))
pollutantsYears = dataFiles[(dataFiles["year"].isin(years)) & (dataFiles["resolution"] == "1g") &
(dataFiles["pollutant"].isin(pollutants))]
bigDataFrame = pd.DataFrame()
for dataYear in years:
print(dataYear)
yearDataFrame = pd.DataFrame()
for index, dataRow in tqdm(pollutantsYears[pollutantsYears["year"] == dataYear].iterrows(), total=len(pollutantsYears[pollutantsYears["year"] == dataYear].index)):
data = pd.read_excel("../input/" + dataRow["filename"] + ".xlsx", skiprows=[1,2])
data = data.rename(columns={"Kod stacji":"Hour"})
year = int(dataRow["year"])
rng = pd.date_range(start = str(year) + '-01-01 01:00:00', end = str(year+1) + '-01-01 00:00:00', freq='H')
# workaround for 2006_PM2.5_1g, 2012_PM10_1g, 2012_O3_1g
try:
data["Hour"] = rng
except ValueError:
print("File {} has some mess with timestamps".format(dataRow["filename"]))
continue
data = data.set_index("Hour")
data = data.stack()
data = pd.DataFrame(data, columns=[dataRow["pollutant"]])
data.index.set_names(['Hour', 'Station'], inplace=True)
yearDataFrame = pd.concat([yearDataFrame, data], axis=1)
bigDataFrame = bigDataFrame.append(yearDataFrame)
bigDataFrame.to_pickle("../output/bigDataFrame.pkl")
bigDataFrame
daskBigDataFrame = dd.from_pandas(bigDataFrame)
| workspace/01-preproc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (dev)
# language: python
# name: dev
# ---
# # Housing Rental Analysis for San Francisco
#
# In this projet, we use data visualization tools, including aggregation, interactive visualizations, and geospatial analysis, to find properties in the San Francisco market that are viable investment opportunities.
#
# Description:
#
# We use the `san_francisco_housing.ipynb` notebook to visualize and analyze the real-estate data.
#
# We create a visualization by using the integration between Plotly and the Mapbox API. Be sure to create your environment file (`.env`) and include your Mapbox API access token. Then import your Mapbox API access token into the `san_francisco_housing.ipynb` notebook, and set it by using the `px.set_mapbox_access_token` function.
#
# The project uses the `sfo_neighborhoods_census_data.csv` file from the `Resources` folder into the notebook and create the DataFrame that we’ll use in the analysis.
#
# The main task in this project is to visualize and analyze the real-estate data in a Jupyter notebook. We use the `san_francisco_housing.ipynb` notebook to complete the following tasks:
#
# * Calculate and plot the housing units per year.
#
# * Calculate and plot the average prices per square foot.
#
# * Compare the average prices by neighborhood.
#
# * Build an interactive neighborhood map.
#
# * Compose your data story.
#
# ##### Calculate and Plot the Housing Units per Year
#
# For this part, we use numerical and visual aggregation to calculate the number of housing units per year, and then visualize the results as a bar chart. To do so, we complete the following steps:
#
# 1. Use the `groupby` function to group the data by year. Aggregate the results by the `mean` of the groups.
#
# 2. Use the `hvplot` function to plot the `housing_units_by_year` DataFrame as a bar chart. Make the x-axis represent the `year` and the y-axis represent the `housing_units`.
#
# 3. Style and format the line plot to ensure a professionally styled visualization.
#
# 4. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 5. Answer the following question:
#
# * What’s the overall trend in housing units over the period that you’re analyzing?
#
# ##### Calculate and Plot the Average Sale Prices per Square Foot
#
# For this part, we use numerical and visual aggregation to calculate the average prices per square foot, and then visualize the results as a bar chart. To do so, we complete the following steps:
#
# 1. Group the data by year, and then average the results. What’s the lowest gross rent that’s reported for the years that the DataFrame includes?
#
# 2. Create a new DataFrame named `prices_square_foot_by_year` by filtering out the “housing_units” column. The new DataFrame should include the averages per year for only the sale price per square foot and the gross rent.
#
# 3. Use hvPlot to plot the `prices_square_foot_by_year` DataFrame as a line plot.
#
# > **Hint** This single plot will include lines for both `sale_price_sqr_foot` and `gross_rent`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use both the `prices_square_foot_by_year` DataFrame and interactive plots to answer the following questions:
#
# * Did any year experience a drop in the average sale price per square foot compared to the previous year?
#
# * If so, did the gross rent increase or decrease during that year?
#
# ##### Compare the Average Sale Prices by Neighborhood
#
# For this part, we use interactive visualizations and widgets to explore the average sale price per square foot by neighborhood. To do so, we complete the following steps:
#
# 1. Create a new DataFrame that groups the original DataFrame by year and neighborhood. Aggregate the results by the `mean` of the groups.
#
# 2. Filter out the “housing_units” column to create a DataFrame that includes only the `sale_price_sqr_foot` and `gross_rent` averages per year.
#
# 3. Create an interactive line plot with hvPlot that visualizes both `sale_price_sqr_foot` and `gross_rent`. Set the x-axis parameter to the year (`x="year"`). Use the `groupby` parameter to create an interactive widget for `neighborhood`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use the interactive visualization to answer the following question:
#
# * For the Anza Vista neighborhood, is the average sale price per square foot for 2016 more or less than the price that’s listed for 2012?
#
# ##### Build an Interactive Neighborhood Map
#
# For this part, we explore the geospatial relationships in the data by using interactive visualizations with Plotly and the Mapbox API. To build your map, use the `sfo_data_df` DataFrame (created during the initial import), which includes the neighborhood location data with the average prices. To do all this, complete the following steps:
#
# 1. Read the `neighborhood_coordinates.csv` file from the `Resources` folder into the notebook, and create a DataFrame named `neighborhood_locations_df`. Be sure to set the `index_col` of the DataFrame as “Neighborhood”.
#
# 2. Using the original `sfo_data_df` Dataframe, create a DataFrame named `all_neighborhood_info_df` that groups the data by neighborhood. Aggregate the results by the `mean` of the group.
#
# 3. Review the two code cells that concatenate the `neighborhood_locations_df` DataFrame with the `all_neighborhood_info_df` DataFrame. Note that the first cell uses the [Pandas concat function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) to create a DataFrame named `all_neighborhoods_df`. The second cell cleans the data and sets the “Neighborhood” column. Be sure to run these cells to create the `all_neighborhoods_df` DataFrame, which you’ll need to create the geospatial visualization.
#
# 4. Using Plotly Express, create a `scatter_mapbox` for the `all_neighborhoods_df` DataFrame. Remember that you need your MapBox API key. Be sure to do the following:
#
# * Set the `size` parameter to “sale_price_sqr_foot”.
#
# * Set the `color` parameter to “gross_rent”.
#
# * Set the `size_max` parameter to “25”.
#
# * Set the `zoom` parameter to “11”.
#
# 5. Style and format the line plot to ensure a professionally styled visualization.
#
# 6. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 7. Use the interactive map to answer the following question:
#
# * Which neighborhood has the highest gross rent, and which has the highest sale price per square foot?
#
# ##### Compose Your Data Story
#
# Based on the visualizations that you created, answer the following questions:
#
# * How does the trend in rental income growth compare to the trend in sales prices? Does this same trend hold true for all the neighborhoods across San Francisco?
#
# * What insights can you share with your company about the potential one-click, buy-and-rent strategy that they're pursuing? Do neighborhoods exist that you would suggest for investment, and why?
# Import the required libraries and dependencies
import os
import pandas as pd
import plotly.express as px
import hvplot.pandas
from pathlib import Path
from dotenv import load_dotenv
import dash
# ## Enable your Mapbox API access token
# +
# Load the .env file into the notebook
load_dotenv()
# Read in your MAPBOX_API_KEY
map_box_api_access_token = os.getenv("MAPBOX_API_ACCESS_TOKEN")
# Confirm the availability of your Mapbox API access token by checking its type
type(map_box_api_access_token)
# -
# Set your Mapbox API access token
px.set_mapbox_access_token(map_box_api_access_token)
# ## Import the data
# +
# Using the read_csv function and Path module, create a DataFrame
# by importing the sfo_neighborhoods_census_data.csv file from the Resources folder
sfo_data_df = pd.read_csv(Path("Resources/sfo_neighborhoods_census_data.csv"), index_col="year")
# Review the first and last five rows of the DataFrame
display(sfo_data_df.head())
display(sfo_data_df.tail())
# -
# ---
# ## Calculate and Plot the Housing Units per Year
#
# For this part of the assignment, use numerical and visual aggregation to calculate the number of housing units per year, and then visualize the results as a bar chart. To do so, complete the following steps:
#
# 1. Use the `groupby` function to group the data by year. Aggregate the results by the `mean` of the groups.
#
# 2. Use the `hvplot` function to plot the `housing_units_by_year` DataFrame as a bar chart. Make the x-axis represent the `year` and the y-axis represent the `housing_units`.
#
# 3. Style and format the line plot to ensure a professionally styled visualization.
#
# 4. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 5. Answer the following question:
#
# * What’s the overall trend in housing units over the period that you’re analyzing?
#
#
# ### Step 1: Use the `groupby` function to group the data by year. Aggregate the results by the `mean` of the groups.
# +
# Create a numerical aggregation that groups the data by the year and then averages the results.
housing_units_by_year = sfo_data_df.groupby("year").mean()
growth_6y=housing_units_by_year["housing_units"].pct_change(6).dropna()
average_growth_1y=housing_units_by_year["housing_units"].pct_change().dropna().mean()
# Review the DataFrame
display(housing_units_by_year)
print(f"During the period 2010-2016:")
print(f"Growth of housing units : {100*growth_6y.iloc[0]:.2f}%" )
print(f"Average Annual growth of housing: {100*average_growth_1y:.2f}%" )
# -
# ### Step 2: Use the `hvplot` function to plot the `housing_units_by_year` DataFrame as a bar chart. Make the x-axis represent the `year` and the y-axis represent the `housing_units`.
#
# ### Step 3: Style and format the line plot to ensure a professionally styled visualization.
# +
# Create a visual aggregation explore the housing units by year
thousands_of_housing_units_by_year=housing_units_by_year["housing_units"]/1000
thousands_of_housing_units_by_year.hvplot.bar(
rot=90
,title="Housing Units in San Francisco By Year (in Thousands) -- Period 2010-2016"
,xlabel= 'Year'
,ylabel='Housing Units (Thousands)'
,ylim=[300,400]
).opts(
color='pink'
)
# -
# ### Step 5: Answer the following question:
# **Question** What is the overall trend in housing_units over the period being analyzed?
#
# **Answer** # The overall trend is flat, as can be seen in the plot. Development of new housing appear to be very limited. In 6 years, the increment of housing units in San Francisco is the only 3.14%. Annualy, the average increment of housing units is of only 0.52%. Clearly almost not new housing is available every year in the city.
# ---
# ## Calculate and Plot the Average Sale Prices per Square Foot
#
# For this part of the assignment, use numerical and visual aggregation to calculate the average prices per square foot, and then visualize the results as a bar chart. To do so, complete the following steps:
#
# 1. Group the data by year, and then average the results. What’s the lowest gross rent that’s reported for the years that the DataFrame includes?
#
# 2. Create a new DataFrame named `prices_square_foot_by_year` by filtering out the “housing_units” column. The new DataFrame should include the averages per year for only the sale price per square foot and the gross rent.
#
# 3. Use hvPlot to plot the `prices_square_foot_by_year` DataFrame as a line plot.
#
# > **Hint** This single plot will include lines for both `sale_price_sqr_foot` and `gross_rent`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use both the `prices_square_foot_by_year` DataFrame and interactive plots to answer the following questions:
#
# * Did any year experience a drop in the average sale price per square foot compared to the previous year?
#
# * If so, did the gross rent increase or decrease during that year?
#
#
# ### Step 1: Group the data by year, and then average the results.
# +
# Create a numerical aggregation by grouping the data by year and averaging the results
prices_square_foot_by_year = sfo_data_df.groupby("year").mean()
# Review the resulting DataFrame
prices_square_foot_by_year
# -
#OBS: there is no standard deviation among years for the gross_rent.
# That means, the value presented in the data corresponds to an average.
sfo_data_df[["gross_rent"]].groupby("year").describe()
# **Question** What is the lowest gross rent reported for the years included in the DataFrame?
#
# **Answer** # The lowest monthly gross rent value is $1,239, and occur in the year 2010.
# ### Step 2: Create a new DataFrame named `prices_square_foot_by_year` by filtering out the “housing_units” column. The new DataFrame should include the averages per year for only the sale price per square foot and the gross rent.
# +
# Filter out the housing_units column, creating a new DataFrame
# Keep only sale_price_sqr_foot and gross_rent averages per year
prices_square_foot_by_year = sfo_data_df.groupby("year").mean()[["sale_price_sqr_foot","gross_rent"]]
# Review the DataFrame
prices_square_foot_by_year
# -
# Changes in sale_price_sqr_foot & gross_rent
sale_price_and_rent_annual_changes=prices_square_foot_by_year.pct_change().dropna()
sale_price_and_rent_annual_changes=sale_price_and_rent_annual_changes.add_prefix("Annual_change_").add_suffix("(%)")
round(sale_price_and_rent_annual_changes*100,1)
# +
# Trend in income growth and sale price per sqr foot growth
(sale_price_and_rent_annual_changes*100).hvplot(
kind="barh"
,title= "Change in Sale Price Per Sqr Foot Growth and Rent Growth in San Francisco 2011-2016"
,ylabel= "Sale Price per Square Feet Change \n and Gross Rent Change \n per Year (%)"
,xlabel="Year"
,legend='top_right'
,stacked=False
,rot=45
,width=1000
,height=600
)
# -
# We can see than since 2012, consistently, the real estate market have had an appreciation in the price of properties as well as on the rent. If this trends remains, it would imply that it is a good business to buy a house, and rent it. You would expect future gains for the rent itself, as well as for the appreciation of the house when you decide to sell
# the property.
# ### Step 3: Use hvPlot to plot the `prices_square_foot_by_year` DataFrame as a line plot.
#
# > **Hint** This single plot will include lines for both `sale_price_sqr_foot` and `gross_rent`
#
# ### Step 4: Style and format the line plot to ensure a professionally styled visualization.
#
# +
# Plot prices_square_foot_by_year.
# Inclued labels for the x- and y-axes, and a title.
prices_square_foot_by_year.hvplot(
xlabel="Year"
,ylabel="Sale Price per Sqr Foot \n & Gross Rent \n [US Dollars]"
,title="Sale Price per Square Foot & Average Gross Rent, in San Francisco -- 2010-2016"
,legend=True
,width=800
)
# -
# ### Step 6: Use both the `prices_square_foot_by_year` DataFrame and interactive plots to answer the following questions:
# **Question** * Did any year experience a drop in the average sale price per square foot compared to the previous year?
#
# **Answer** # Yes. In the year 2011 the sale price per square footage was 342 [USD], dropping a 7.4% from the 2010 value of 369 [USD].
# **Question** * If so, did the gross rent increase or decrease during that year?I
#
# **Answer** # The gross rent actually increased by a significant 23.5% from USD 1,239 to USD 1,530 during that same year 2011.
# ---
# ## Compare the Average Sale Prices by Neighborhood
#
# For this part of the assignment, use interactive visualizations and widgets to explore the average sale price per square foot by neighborhood. To do so, complete the following steps:
#
# 1. Create a new DataFrame that groups the original DataFrame by year and neighborhood. Aggregate the results by the `mean` of the groups.
#
# 2. Filter out the “housing_units” column to create a DataFrame that includes only the `sale_price_sqr_foot` and `gross_rent` averages per year.
#
# 3. Create an interactive line plot with hvPlot that visualizes both `sale_price_sqr_foot` and `gross_rent`. Set the x-axis parameter to the year (`x="year"`). Use the `groupby` parameter to create an interactive widget for `neighborhood`.
#
# 4. Style and format the line plot to ensure a professionally styled visualization.
#
# 5. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 6. Use the interactive visualization to answer the following question:
#
# * For the Anza Vista neighborhood, is the average sale price per square foot for 2016 more or less than the price that’s listed for 2012?
#
# ### Step 1: Create a new DataFrame that groups the original DataFrame by year and neighborhood. Aggregate the results by the `mean` of the groups.
# +
# Group by year and neighborhood and then create a new dataframe of the mean values
prices_by_year_by_neighborhood = sfo_data_df.groupby(["year","neighborhood"]).mean()
# Review the DataFrame
prices_by_year_by_neighborhood.head()
# -
# ### Step 2: Filter out the “housing_units” column to create a DataFrame that includes only the `sale_price_sqr_foot` and `gross_rent` averages per year.
# +
# Filter out the housing_units
prices_by_year_by_neighborhood = prices_by_year_by_neighborhood.drop(columns=["housing_units"])
# Review the first and last five rows of the DataFrame
display(prices_by_year_by_neighborhood.head() )
display(prices_by_year_by_neighborhood.tail() )
# -
# ### Step 3: Create an interactive line plot with hvPlot that visualizes both `sale_price_sqr_foot` and `gross_rent`. Set the x-axis parameter to the year (`x="year"`). Use the `groupby` parameter to create an interactive widget for `neighborhood`.
#
# ### Step 4: Style and format the line plot to ensure a professionally styled visualization.
# Use hvplot to create an interactive line plot of the average price per square foot
# The plot should have a dropdown selector for the neighborhood
prices_by_year_by_neighborhood.hvplot(
groupby="neighborhood"
,title="Average Price per Square Foot, and Gross Rent in San Francisco's Neighborhoods"
,xlabel="Year"
,ylabel="US Dollars"
)
# ### Step 6: Use the interactive visualization to answer the following question:
# **Question** For the Anza Vista neighborhood, is the average sale price per square foot for 2016 more or less than the price that’s listed for 2012?
#
# **Answer** In 2016 the price for the Anza Vista neighborhood is lower. Indeed, using the plot above, by selecting Anza Vista in the neighborhood drop down, and then looking at the hover on the respective years, we can see that for the Anza Vista neighborhood, the sale price per square foot in 2012 was 344.49. The averages sale price per square foot in 2016 was 88.40. The average sale price per square foot is significantly lower in 2016 than it was in 2012. This constrast with the price of rent that rise in all the city.
# ---
# ## Build an Interactive Neighborhood Map
#
# For this part, we explore the geospatial relationships in the data by using interactive visualizations with Plotly and the Mapbox API. To build the map, we use the `sfo_data_df` DataFrame (created during the initial import), which includes the neighborhood location data with the average prices. To do all this, we complete the following steps:
#
# 1. Read the `neighborhood_coordinates.csv` file from the `Resources` folder into the notebook, and create a DataFrame named `neighborhood_locations_df`. Be sure to set the `index_col` of the DataFrame as “Neighborhood”.
#
# 2. Using the original `sfo_data_df` Dataframe, create a DataFrame named `all_neighborhood_info_df` that groups the data by neighborhood. Aggregate the results by the `mean` of the group.
#
# 3. Review the two code cells that concatenate the `neighborhood_locations_df` DataFrame with the `all_neighborhood_info_df` DataFrame. Note that the first cell uses the [Pandas concat function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) to create a DataFrame named `all_neighborhoods_df`. The second cell cleans the data and sets the “Neighborhood” column. Be sure to run these cells to create the `all_neighborhoods_df` DataFrame, which you’ll need to create the geospatial visualization.
#
# 4. Using Plotly Express, create a `scatter_mapbox` for the `all_neighborhoods_df` DataFrame. Remember that you need your MapBox API key. Be sure to do the following:
#
# * Set the `size` parameter to “sale_price_sqr_foot”.
#
# * Set the `color` parameter to “gross_rent”.
#
# * Set the `size_max` parameter to “25”.
#
# * Set the `zoom` parameter to “11”.
#
# 5. Style and format the line plot to ensure a professionally styled visualization.
#
# 6. Note that your resulting plot should appear similar to the following image:
#
# 
#
# 7. Use the interactive map to answer the following question:
#
# * Which neighborhood has the highest gross rent, and which has the highest sale price per square foot?
#
#
# ### Step 1: Read the `neighborhood_coordinates.csv` file from the `Resources` folder into the notebook, and create a DataFrame named `neighborhood_locations_df`. Be sure to set the `index_col` of the DataFrame as “Neighborhood”.
# +
# Load neighborhoods coordinates data
neighborhood_locations_df = pd.read_csv(Path("Resources/neighborhoods_coordinates.csv"), index_col="Neighborhood")
# Review the DataFrame
neighborhood_locations_df
# -
# ### Step 2: Using the original `sfo_data_df` Dataframe, create a DataFrame named `all_neighborhood_info_df` that groups the data by neighborhood. Aggregate the results by the `mean` of the group.
# +
# Calculate the mean values for each neighborhood
all_neighborhood_info_df = sfo_data_df.groupby("neighborhood").mean()
# Review the resulting DataFrame
all_neighborhood_info_df
# -
# ### Step 3: Review the two code cells that concatenate the `neighborhood_locations_df` DataFrame with the `all_neighborhood_info_df` DataFrame.
#
# Note that the first cell uses the [Pandas concat function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html) to create a DataFrame named `all_neighborhoods_df`.
#
# The second cell cleans the data and sets the “Neighborhood” column.
#
# Be sure to run these cells to create the `all_neighborhoods_df` DataFrame, which you’ll need to create the geospatial visualization.
# +
# Using the Pandas `concat` function, join the
# neighborhood_locations_df and the all_neighborhood_info_df DataFrame
# The axis of the concatenation is "columns".
# The concat function will automatially combine columns with
# identical information, while keeping the additional columns.
all_neighborhoods_df = pd.concat(
[neighborhood_locations_df, all_neighborhood_info_df],
axis="columns",
sort=False
)
# Review the resulting DataFrame
display(all_neighborhoods_df.head())
display(all_neighborhoods_df.tail())
# +
# Call the dropna function to remove any neighborhoods that do not have data
#all_neighborhoods_df = all_neighborhoods_df.dropna()
all_neighborhoods_df = all_neighborhoods_df.reset_index().dropna()
# Rename the "index" column as "Neighborhood" for use in the Visualization
all_neighborhoods_df = all_neighborhoods_df.rename(columns={"index": "Neighborhood"})
# Review the resulting DataFrame
display(all_neighborhoods_df.head())
display(all_neighborhoods_df.tail())
# -
all_neighborhoods_df.sort_values("gross_rent")
# ### Step 4: Using Plotly Express, create a `scatter_mapbox` for the `all_neighborhoods_df` DataFrame. Remember that you need your MapBox API key. Be sure to do the following:
#
# * Set the `size` parameter to “sale_price_sqr_foot”.
# * Set the `color` parameter to “gross_rent”.
# * Set the `size_max` parameter to “25”.
# * Set the `zoom` parameter to “11”.
#
# ### Step 5: Style and format the line plot to ensure a professionally styled visualization.
#
px.scatter_mapbox(
all_neighborhoods_df
,title= "Average Gross Monthly Rent and Sell Price per Square Foot in San Francisco"
,size="sale_price_sqr_foot"
,color="gross_rent"
,lat="Lat"
,lon="Lon"
,size_max=25
,zoom=11
,color_continuous_scale="reds"
,hover_name="Neighborhood"
,hover_data={'Lat':False, 'Lon':False, "sale_price_sqr_foot":":.0f","gross_rent":":,.0f" }
,height=750
,width=750
,labels={"sale_price_sqr_foot":"Sale Price per Square Foot","gross_rent":"Gross Monthly Rent"}
)
# ### Step 7: Use the interactive map to answer the following question:
# **Question** Which neighborhood has the highest gross rent, and which has the highest sale price per square foot?
#
# **Answer** # The highest sale price per square foot can be found by looking for the larger circle in the Map. This corresponds to Union Square District, which is at USD 904. About the rent, with the data we have we cannot really respond the question for which has the higher rent. Thus, the rent in the data source for a particular year is the same for all neirborhoods. In the plot, however, some neirborhoods appear with a different rent amount, because the average is made across the years available, and some of them are missing some years. The map shows average across time for each neirborhood, so we get that highest gross rent for Westwood Park with a $3,959 monthly, as can be found by the darker red on the map, because it only has data points for 2016 and 2014, so the average appears as higher.
# # Preparing some additional calculations to create the story.
#Preparing data for calculating changes in prices per sqr foot across neighborhoods
prices_by_year_by_neighborhood.sale_price_sqr_foot.unstack()
# Calculation of the change in prices per square feet per year and neighborhood sorted by the ones that have
# more decrease in price in 2016
change_in_prices_by_year=prices_by_year_by_neighborhood.sale_price_sqr_foot.unstack().pct_change()
change_in_prices_by_year.T.sort_values(2016).head(15)
appreciation_full_period=prices_by_year_by_neighborhood.sale_price_sqr_foot.unstack().pct_change(6)
print(appreciation_full_period.T.sort_values(2016).head(8))
print(appreciation_full_period.T.sort_values(2016)[appreciation_full_period.loc[2016,:]>0].tail(8))
# Yearly change in prices during the period
change_in_prices_by_year_sorted=change_in_prices_by_year.T.sort_values(2016)*100
change_in_prices_by_year_sorted.hvplot.barh(
height=1000
,stacked=True
,ylabel="Sum of annual appreciation 2010-2016 (%)"
)
# ## Compose Your Data Story
#
# Based on the visualizations, here we compose a data story that synthesizes the analysis by answering the following questions:
# **Question** How does the trend in rental income growth compare to the trend in sales prices? Does this same trend hold true for all the neighborhoods across San Francisco?
#
# **Answer** # The trend in rental income have been to increase in the period 2010-2016 significantly. The price per square foot as well. At the same time, the amount of units in the same period have almost not increase.. That is to say, we are in the presence of a rental market with increase demand, but stagnant supply. The appreciation in of the real estate is not for all neirborhoods. A few of them did not increase their value in the period, like Anza Vista, Sounth Beach, Hayes Valley, Park North, Ingleside, and Oceanview. However, most of them have an appreciation, many by more than double its price per square foot in the period, such as For example, Van Ness/ Civic Center, Union Square District, Outer Richmond, Croker Amazon, and Mission Terrace.
#
# This is the perfect scenario for the one click buy-and-rent strategty. Buying a property would not only see returns for the appreciation of the property itself, but for the increase of rent due to increase of rental demand, since the rent may be expected to continue increasing for the lack of real estate development.
#
# Since the general trend for real estate price appreciation is positive, it would be good to focus in buying from neighborhhods that may be selling at a discount. Those may be the ones that had a positive overall appreciation, but a negative change in price per square foot in the last year available, 2016. Those cases are Outer Mission, Anza Vista, Hayes Valley, Park North, Marina, and Alamo Square. Unfortunately, we do not have differentiated rent prices by neighborhood in each year in the census csv file, in order to add that element to the analysis. However, given the explosive increase in rent prices as a whole, we can assume that all rent prices follow an upward trend in the period.
#
# Giving the pandemic, data until 2016 does not appear enough to have an updated picture to give a final recommendation. However, recent analysis, like in https://www.zumper.com/blog/state-of-the-san-francisco-bay-area-renter-2020/ shows that rent prices have dropped in San Francisco due to many people moving out, but many people have started moving in, specially coming from Sacramento, due to the fell in rent prices. This means that we can expect prices to stabilize at some point, and to reach a bottom. If we assume that the behavior of prices after the bottom are going to be similar to the behaviour in 2010-2016 as we get out of the pandemic, and if the trend of almost no new housing development remains after 2021, probably 2021 may be a very good opportunity to enter the real estate market, buy houses at discount, and enjoy rising rental prices in the near future.
#
# In conclusion, we see potential for the buy-and-rent strategy for a double source of profit on the real estate appreciation, and the rental income. Among all the neirborhoods that we would recommend to invest giving the data available, Outer Mission presents a high overall appreciation, with a large drop in 2016. Assuming that trend is still valid in 2021, that would be the first choice, since may be the Neirborhood selling at the larger discount.
| san_francisco_housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.svm import LinearSVR
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import StackingRegressor
from sklearn.ensemble import RandomForestRegressor
import lightgbm as lgb
from lightgbm import LGBMRegressor
# -
import sklearn
sklearn.show_versions()
# +
import random
seed = 42
np.random.seed(seed)
random.seed(seed)
# -
# # Загрузка датасета
# +
data = pd.read_csv('preprocessed_data.csv', index_col=0)
data.head()
# -
data.shape
# +
target_features = ['химшлак последний Al2O3', 'химшлак последний CaO',
'химшлак последний FeO', 'химшлак последний MgO',
'химшлак последний MnO', 'химшлак последний R',
'химшлак последний SiO2']
categorical_features = ['МАРКА_rare', 'МАРКА_Э76ХФ', 'МАРКА_Э90ХАФ',
'ПРОФИЛЬ_rare', 'ПРОФИЛЬ_Р65']
numerical_features = ['t вып-обр', 't обработка', 't под током', 't продувка',
'ПСН гр.', 'чист расход C', 'чист расход Cr', 'чист расход Mn',
'чист расход Si', 'чист расход V', 'температура первая',
'температура последняя', 'Ar (интенс.)', 'эл. энергия (интенс.)',
'произв жидкая сталь', 'расход газ Ar', 'сыпуч известь РП',
'сыпуч кокс пыль УСТК', 'сыпуч кокс. мелочь КМ1', 'сыпуч шпат плав.',
'ферспл CaC2', 'ферспл FeV-80', 'ферспл Mn5Si65Al0.5',
'ферспл SiMn18', 'ферспл фх850А', 'эл. энергия',
'химсталь первый Al_1', 'химсталь первый C_1', 'химсталь первый Cr_1',
'химсталь первый Cu_1', 'химсталь первый Mn_1', 'химсталь первый Mo_1',
'химсталь первый N_1', 'химсталь первый Ni_1', 'химсталь первый P_1',
'химсталь первый S_1', 'химсталь первый Si_1', 'химсталь первый Ti_1',
'химсталь первый V_1', 'химсталь последний Al', 'химсталь последний C',
'химсталь последний Ca', 'химсталь последний Cr',
'химсталь последний Cu', 'химсталь последний Mn',
'химсталь последний Mo', 'химсталь последний N',
'химсталь последний Ni', 'химсталь последний P', 'химсталь последний S',
'химсталь последний Si', 'химсталь последний Ti',
'химсталь последний V', 'химшлак первый Al2O3_1',
'химшлак первый CaO_1', 'химшлак первый FeO_1', 'химшлак первый MgO_1',
'химшлак первый MnO_1', 'химшлак первый R_1', 'химшлак первый SiO2_1']
# -
# Для каждого таргета будем строить свою модель.
#
# **У некоторых таргетов есть пропуски значений - их необходимо удалять**
scores = {}
models = {}
# # Al2O3
#
# Значение MAPE на бейзлайне = 11.38%
# В результате здесь **MAPE = 11.14%**
target = 'химшлак последний Al2O3'
targets_to_drop = ['химшлак последний CaO', 'химшлак последний FeO',
'химшлак последний MgO', 'химшлак последний MnO', 'химшлак последний R',
'химшлак последний SiO2']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Отбор фич с помощью Lasso
# +
sel_ = SelectFromModel(Lasso(alpha=0.005, random_state=seed))
sel_.fit(X, y)
selected_features = X.columns[(sel_.get_support())]
print('Было признаков: {}'.format((X.shape[1])))
print('Отобрано признаков: {}'.format(len(selected_features)))
print('Удалено признаков: {}'.format(np.sum(sel_.estimator_.coef_ == 0)))
# -
X = X[selected_features]
# ## Модель со стекингом
# +
estimators = [('lr', LinearRegression()),
('lgb', LGBMRegressor()),
('rf', RandomForestRegressor())]
stacking_reg = StackingRegressor(estimators=estimators)
scores_stacking = cross_val_score(stacking_reg, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_stacking.mean())*100))
# -
scores['Al2O3'] = np.abs(scores_stacking.mean())*100
models['Al2O3'] = stacking_reg
# # CaO
#
# MAPE на бейзлайне = 4.82%
# В итоговом пайплайне **MAPE = 4.76%**
target = 'химшлак последний CaO'
targets_to_drop = ['химшлак последний Al2O3', 'химшлак последний FeO',
'химшлак последний MgO', 'химшлак последний MnO',
'химшлак последний R', 'химшлак последний SiO2']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Отбор фич с помощью Lasso
# +
sel_ = SelectFromModel(Lasso(alpha=0.005, random_state=seed))
sel_.fit(X, y)
selected_features = X.columns[(sel_.get_support())]
print('Было признаков: {}'.format((X.shape[1])))
print('Отобрано признаков: {}'.format(len(selected_features)))
print('Удалено признаков: {}'.format(np.sum(sel_.estimator_.coef_ == 0)))
X = X[selected_features]
# +
estimators = [('lr', LinearRegression()),
('lgb', LGBMRegressor()),
('rf', RandomForestRegressor())]
stacking_reg = StackingRegressor(estimators=estimators)
scores_stacking = cross_val_score(stacking_reg, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_stacking.mean())*100))
# -
scores['CaO'] = np.abs(scores_stacking.mean())*100
models['CaO'] = stacking_reg
# # FeO
#
# Здесь все сложно, улучшать не очень получается.
# MAPE на бейзлайне = 35.29%
# В итоговом пайплайне **MAPE = 35.09%**
target = 'химшлак последний FeO'
targets_to_drop = ['химшлак последний Al2O3', 'химшлак последний CaO',
'химшлак последний MgO', 'химшлак последний MnO',
'химшлак последний R', 'химшлак последний SiO2']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Отбор фич
# +
sel_ = SelectFromModel(Lasso(alpha=0.005, random_state=seed))
sel_.fit(X, y)
selected_features = X.columns[(sel_.get_support())]
print('Было признаков: {}'.format((X.shape[1])))
print('Отобрано признаков: {}'.format(len(selected_features)))
print('Удалено признаков: {}'.format(np.sum(sel_.estimator_.coef_ == 0)))
X = X[selected_features]
# +
estimators = [('lr', LinearRegression()),
('lgb', LGBMRegressor()),
('rf', RandomForestRegressor())]
stacking_reg = StackingRegressor(estimators=estimators)
scores_stacking = cross_val_score(stacking_reg, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_stacking.mean())*100))
# -
scores['FeO'] = np.abs(scores_stacking.mean())*100
models['FeO'] = stacking_reg
# # MgO
#
# MAPE на бейзлайне = 26.21%%
# В итоговом пайплайне **MAPE = 25.73%**
target = 'химшлак последний MgO'
targets_to_drop = ['химшлак последний Al2O3', 'химшлак последний CaO',
'химшлак последний FeO', 'химшлак последний MnO',
'химшлак последний R', 'химшлак последний SiO2']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Отбор фич
# +
sel_ = SelectFromModel(Lasso(alpha=0.005, random_state=seed))
sel_.fit(X, y)
selected_features = X.columns[(sel_.get_support())]
print('Было признаков: {}'.format((X.shape[1])))
print('Отобрано признаков: {}'.format(len(selected_features)))
print('Удалено признаков: {}'.format(np.sum(sel_.estimator_.coef_ == 0)))
X = X[selected_features]
# -
# ## Стекинг
# +
estimators = [('lr', LinearRegression()),
('lgb', LGBMRegressor()),
('rf', RandomForestRegressor())]
stacking_reg = StackingRegressor(estimators=estimators)
scores_stacking = cross_val_score(stacking_reg, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_stacking.mean())*100))
# -
scores['MgO'] = np.abs(scores_stacking.mean())*100
models['MgO'] = stacking_reg
# # MnO
#
# **MAPE на бейзлайне = 30.95%**. Преодолеть не удалось.
target = 'химшлак последний MnO'
targets_to_drop = ['химшлак последний Al2O3', 'химшлак последний CaO',
'химшлак последний FeO', 'химшлак последний MgO',
'химшлак последний R', 'химшлак последний SiO2']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Линрег
lin_reg_base = LinearRegression()
scores_lin_reg_base = cross_val_score(lin_reg_base, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_lin_reg_base.mean())*100))
scores['MnO'] = np.abs(scores_lin_reg_base.mean())*100
models['MnO'] = lin_reg_base
# # R
#
# MAPE на бейзлайне = 6.19%
# В итоговом пайплайне **MAPE = 5.98%**
target = 'химшлак последний R'
targets_to_drop = ['химшлак последний Al2O3', 'химшлак последний CaO',
'химшлак последний FeO', 'химшлак последний MgO',
'химшлак последний MnO', 'химшлак последний SiO2']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Отбор фич
# +
sel_ = SelectFromModel(Lasso(alpha=0.001, random_state=seed))
sel_.fit(X, y)
selected_features = X.columns[(sel_.get_support())]
print('Было признаков: {}'.format((X.shape[1])))
print('Отобрано признаков: {}'.format(len(selected_features)))
print('Удалено признаков: {}'.format(np.sum(sel_.estimator_.coef_ == 0)))
X = X[selected_features]
# -
# ## Стекинг
# +
estimators = [('lr', LinearRegression()),
('lgb', LGBMRegressor()),
('rf', RandomForestRegressor())]
stacking_reg = StackingRegressor(estimators=estimators)
scores_stacking = cross_val_score(stacking_reg, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_stacking.mean())*100))
# -
scores['R'] = np.abs(scores_stacking.mean())*100
models['R'] = stacking_reg
# # SiO2
#
# **MAPE на бейзлайне = 6.98%**. Повысить не удалось.
target = 'химшлак последний SiO2'
targets_to_drop = ['химшлак последний Al2O3', 'химшлак последний CaO',
'химшлак последний FeO', 'химшлак последний MgO',
'химшлак последний MnO', 'химшлак последний R']
cleaned_data = data.drop(targets_to_drop, axis=1)
cleaned_data = cleaned_data.dropna()
X = cleaned_data.drop(target, axis=1)
y = cleaned_data[target]
# ## Нормализация
# +
X_categorical = pd.DataFrame(np.array(X[categorical_features]), columns=categorical_features)
X_scaled = pd.DataFrame(StandardScaler().fit_transform(X.drop(categorical_features, axis=1)),
columns=numerical_features)
X = X_categorical.join(X_scaled)
# -
# ## Линрег
lin_reg_base = LinearRegression()
scores_lin_reg_base = cross_val_score(lin_reg_base, X, y,
scoring='neg_mean_absolute_percentage_error')
print("Кросс-валидация: {:.2f}%".format(np.abs(scores_lin_reg_base.mean())*100))
scores['SiO2'] = np.abs(scores_lin_reg_base.mean())*100
models['SiO2'] = lin_reg_base
# # Выводы
pd.DataFrame.from_dict(scores, orient='index', columns=['MAPE score %'])
# В результате можно сделать следующие выводы по данному этапу:
#
# - Целевые переменные **'химшлак последний CaO'**, **'химшлак последний R'** и **'химшлак последний SiO2'** предсказываются с достаточно высокой точностью.
# - Целевая переменная **'химшлак последний Al2O3'** имеет погрешность предсказания 11.14%, что вполне удовлетворительно, однако следует внимательнее проработать модель, возможно метрику удастся улучшить.
# - Целевые переменные **'химшлак последний FeO'**, **'химшлак последний MgO'** и **'химшлак последний MnO'** предсказываются с довольно высокой погрешностью, которую не получается снизить до приемлемого уровня. Возможно, по имеющимся данным в принципе невозможно обеспечить низкую ошибку.
| Melekhin/2_Model_Selection_MiddleReport.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# name: ir
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/tbonne/IntroPychStats/blob/main/notebooks/DataViz1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="trouKhmEw-ii"
# <img src='http://drive.google.com/uc?export=view&id=1PmB2IttC7fpQdCjY9F03KDnV_Oe_MtCp' width=500>
# + [markdown] id="5Z2vKe5lxjuk"
# #<font color='darkorange'>Data visualization in R</font>
# + [markdown] id="_QeNGJdGx3tD"
# After collecting data it is often very useful to visuallize your data. We'll take a look at some ways below!
#
# + id="_VFNLdMJ0J7Z"
library(ggplot2)
# + id="EgYORjPizOmA"
#load in movies data
movies <- read.csv("https://raw.githubusercontent.com/fivethirtyeight/data/master/fandango/fandango_score_comparison.csv", header=TRUE)
# + colab={"base_uri": "https://localhost:8080/", "height": 531} id="UGU1WtrozYix" outputId="8af30001-b311-4c19-c090-07320a0f35d0"
head(movies)
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="06FXaqL5zuce" outputId="d2042f3e-08c8-4ce0-ecad-1e1cd57482d2"
ggplot(movies, aes(x=IMDB) ) + geom_histogram()
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="xMcBNlgF0F5x" outputId="f6914098-43b4-4458-b61f-c25cbc8dd09c"
ggplot(movies, aes(x=IMDB) ) + geom_density()
# + id="71cOMrO4JCHy"
ggplot(movies, aes(x=IMDB,y=RottenTomatoes) ) + geom_point()
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="Of7HouKe1fxF" outputId="778ccab5-ed7a-4d6c-e6fa-62e8b0731aff"
ggplot(movies, aes(x=IMDB,y=RottenTomatoes) ) + geom_point()
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="lp-pR4NvJJ67" outputId="69830d11-8fee-4d21-9bfc-6ca3d057708a"
ggplot(movies, aes(x=IMDB,y=RottenTomatoes ) ) + geom_point()
| notebooks/DataViz1.ipynb |
# +
# #!/usr/bin/env python
# This Python file uses the following encoding: utf-8
# Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stigler diet example"""
from __future__ import print_function
from six.moves import xrange
from ortools.linear_solver import pywraplp
"""Entry point of the program"""
# Nutrient minimums.
nutrients = [['Calories (kcal)', 3], ['Protein (g)', 70], [
'Calcium (g)', 0.8
], ['Iron (mg)', 12], ['Vitamin A (KIU)', 5], ['Vitamin B1 (mg)', 1.8],
['Vitamin B2 (mg)', 2.7], ['Niacin (mg)',
18], ['Vitamin C (mg)', 75]]
# Commodity, Unit, 1939 price (cents), Calories (kcal), Protein (g), Calcium (g), Iron (mg),
# Vitamin A (KIU), Vitamin B1 (mg), Vitamin B2 (mg), Niacin (mg), Vitamin C (mg)
data = [[
'Wheat Flour (Enriched)', '10 lb.', 36, 44.7, 1411, 2, 365, 0, 55.4,
33.3, 441, 0
], ['Macaroni', '1 lb.', 14.1, 11.6, 418, 0.7, 54, 0, 3.2, 1.9, 68, 0], [
'Wheat Cereal (Enriched)', '28 oz.', 24.2, 11.8, 377, 14.4, 175, 0,
14.4, 8.8, 114, 0
], ['Corn Flakes', '8 oz.', 7.1, 11.4, 252, 0.1, 56, 0, 13.5, 2.3, 68, 0], [
'Corn Meal', '1 lb.', 4.6, 36.0, 897, 1.7, 99, 30.9, 17.4, 7.9, 106, 0
], [
'Hominy Grits', '24 oz.', 8.5, 28.6, 680, 0.8, 80, 0, 10.6, 1.6, 110, 0
], ['Rice', '1 lb.', 7.5, 21.2, 460, 0.6, 41, 0, 2, 4.8, 60, 0], [
'Rolled Oats', '1 lb.', 7.1, 25.3, 907, 5.1, 341, 0, 37.1, 8.9, 64, 0
], [
'White Bread (Enriched)', '1 lb.', 7.9, 15.0, 488, 2.5, 115, 0, 13.8,
8.5, 126, 0
], [
'Whole Wheat Bread', '1 lb.', 9.1, 12.2, 484, 2.7, 125, 0, 13.9, 6.4,
160, 0
], ['Rye Bread', '1 lb.', 9.1, 12.4, 439, 1.1, 82, 0, 9.9, 3, 66, 0], [
'Pound Cake', '1 lb.', 24.8, 8.0, 130, 0.4, 31, 18.9, 2.8, 3, 17, 0
], ['Soda Crackers', '1 lb.', 15.1, 12.5, 288, 0.5, 50, 0, 0, 0, 0, 0], [
'Milk', '1 qt.', 11, 6.1, 310, 10.5, 18, 16.8, 4, 16, 7, 177
], [
'Evaporated Milk (can)', '14.5 oz.', 6.7, 8.4, 422, 15.1, 9, 26, 3,
23.5, 11, 60
], ['Butter', '1 lb.', 30.8, 10.8, 9, 0.2, 3, 44.2, 0, 0.2, 2, 0], [
'Oleomargarine', '1 lb.', 16.1, 20.6, 17, 0.6, 6, 55.8, 0.2, 0, 0, 0
], ['Eggs', '1 doz.', 32.6, 2.9, 238, 1.0, 52, 18.6, 2.8, 6.5, 1, 0], [
'Cheese (Cheddar)', '1 lb.', 24.2, 7.4, 448, 16.4, 19, 28.1, 0.8, 10.3,
4, 0
], ['Cream', '1/2 pt.', 14.1, 3.5, 49, 1.7, 3, 16.9, 0.6, 2.5, 0, 17], [
'Peanut Butter', '1 lb.', 17.9, 15.7, 661, 1.0, 48, 0, 9.6, 8.1, 471, 0
], ['Mayonnaise', '1/2 pt.', 16.7, 8.6, 18, 0.2, 8, 2.7, 0.4, 0.5, 0, 0], [
'Crisco', '1 lb.', 20.3, 20.1, 0, 0, 0, 0, 0, 0, 0, 0
], ['Lard', '1 lb.', 9.8, 41.7, 0, 0, 0, 0.2, 0, 0.5, 5, 0], [
'Sirloin Steak', '1 lb.', 39.6, 2.9, 166, 0.1, 34, 0.2, 2.1, 2.9, 69, 0
], ['Round Steak', '1 lb.', 36.4, 2.2, 214, 0.1, 32, 0.4, 2.5, 2.4, 87, 0
], ['Rib Roast', '1 lb.', 29.2, 3.4, 213, 0.1, 33, 0, 0, 2, 0, 0], [
'Chuck Roast', '1 lb.', 22.6, 3.6, 309, 0.2, 46, 0.4, 1, 4, 120, 0
], ['Plate', '1 lb.', 14.6, 8.5, 404, 0.2, 62, 0, 0.9, 0, 0, 0], [
'Liver (Beef)', '1 lb.', 26.8, 2.2, 333, 0.2, 139, 169.2, 6.4, 50.8,
316, 525
], [
'Leg of Lamb', '1 lb.', 27.6, 3.1, 245, 0.1, 20, 0, 2.8, 3.9, 86, 0
], [
'Lamb Chops (Rib)',
'1 lb.', 36.6, 3.3, 140, 0.1, 15, 0, 1.7, 2.7, 54, 0
], [
'Pork Chops', '1 lb.', 30.7, 3.5, 196, 0.2, 30, 0, 17.4, 2.7, 60, 0
], [
'Pork Loin Roast',
'1 lb.', 24.2, 4.4, 249, 0.3, 37, 0, 18.2, 3.6, 79, 0
], ['Bacon', '1 lb.', 25.6, 10.4, 152, 0.2, 23, 0, 1.8, 1.8, 71, 0], [
'Ham, smoked', '1 lb.', 27.4, 6.7, 212, 0.2, 31, 0, 9.9, 3.3, 50, 0
], ['Salt Pork', '1 lb.', 16, 18.8, 164, 0.1, 26, 0, 1.4, 1.8, 0, 0], [
'Roasting Chicken', '1 lb.', 30.3, 1.8, 184, 0.1, 30, 0.1, 0.9, 1.8,
68, 46
], [
'Veal Cutlets', '1 lb.', 42.3, 1.7, 156, 0.1, 24, 0, 1.4, 2.4, 57, 0
], [
'Salmon, Pink (can)', '16 oz.', 13, 5.8, 705, 6.8, 45, 3.5,
1, 4.9, 209, 0
], ['Apples', '1 lb.', 4.4, 5.8, 27, 0.5, 36, 7.3, 3.6, 2.7, 5, 544], [
'Bananas', '1 lb.', 6.1, 4.9, 60, 0.4, 30, 17.4, 2.5, 3.5, 28, 498
], ['Lemons', '1 doz.', 26, 1.0, 21, 0.5, 14, 0, 0.5, 0, 4, 952], [
'Oranges', '1 doz.', 30.9, 2.2, 40, 1.1, 18, 11.1, 3.6, 1.3, 10, 1998
], [
'Green Beans', '1 lb.', 7.1, 2.4, 138, 3.7, 80, 69, 4.3, 5.8, 37, 862
], ['Cabbage', '1 lb.', 3.7, 2.6, 125, 4.0, 36, 7.2, 9, 4.5, 26, 5369], [
'Carrots', '1 bunch', 4.7, 2.7, 73, 2.8, 43, 188.5, 6.1, 4.3, 89, 608
], ['Celery', '1 stalk', 7.3, 0.9, 51, 3.0, 23, 0.9, 1.4, 1.4, 9, 313], [
'Lettuce', '1 head', 8.2, 0.4, 27, 1.1, 22, 112.4, 1.8, 3.4, 11, 449
], ['Onions', '1 lb.', 3.6, 5.8, 166, 3.8, 59, 16.6, 4.7, 5.9, 21,
1184], [
'Potatoes', '15 lb.', 34, 14.3, 336, 1.8, 118, 6.7, 29.4, 7.1,
198, 2522
], [
'Spinach', '1 lb.', 8.1, 1.1, 106, 0, 138, 918.4, 5.7, 13.8, 33,
2755
], [
'Sweet Potatoes', '1 lb.', 5.1, 9.6, 138, 2.7, 54, 290.7, 8.4,
5.4, 83, 1912
], [
'Peaches (can)', 'No. 2 1/2', 16.8, 3.7, 20, 0.4, 10, 21.5, 0.5,
1, 31, 196
], [
'Pears (can)', 'No. 2 1/2', 20.4, 3.0, 8, 0.3, 8, 0.8, 0.8, 0.8,
5, 81
], [
'Pineapple (can)', 'No. 2 1/2', 21.3, 2.4, 16, 0.4, 8, 2, 2.8,
0.8, 7, 399
], [
'Asparagus (can)', 'No. 2', 27.7, 0.4, 33, 0.3, 12, 16.3, 1.4,
2.1, 17, 272
], [
'Green Beans (can)', 'No. 2', 10, 1.0, 54, 2, 65, 53.9, 1.6, 4.3,
32, 431
], [
'Pork and Beans (can)', '16 oz.', 7.1, 7.5, 364, 4, 134, 3.5,
8.3, 7.7, 56, 0
], [
'Corn (can)', 'No. 2', 10.4, 5.2, 136, 0.2, 16, 12, 1.6, 2.7, 42,
218
], [
'Peas (can)', 'No. 2', 13.8, 2.3, 136, 0.6, 45, 34.9, 4.9, 2.5,
37, 370
], [
'Tomatoes (can)', 'No. 2', 8.6, 1.3, 63, 0.7, 38, 53.2, 3.4, 2.5,
36, 1253
], [
'Tomato Soup (can)', '10 1/2 oz.', 7.6, 1.6, 71, 0.6, 43, 57.9,
3.5, 2.4, 67, 862
], [
'<NAME>', '1 lb.', 15.7, 8.5, 87, 1.7, 173, 86.8, 1.2,
4.3, 55, 57
], [
'<NAME>', '1 lb.', 9, 12.8, 99, 2.5, 154, 85.7, 3.9, 4.3,
65, 257
], [
'<NAME>', '15 oz.', 9.4, 13.5, 104, 2.5, 136, 4.5, 6.3,
1.4, 24, 136
], [
'<NAME>', '1 lb.', 7.9, 20.0, 1367, 4.2, 345, 2.9, 28.7,
18.4, 162, 0
], [
'<NAME>', '1 lb.', 8.9, 17.4, 1055, 3.7, 459, 5.1,
26.9, 38.2, 93, 0
], [
'<NAME>', '1 lb.', 5.9, 26.9, 1691, 11.4, 792, 0,
38.4, 24.6, 217, 0
], ['Coffee', '1 lb.', 22.4, 0, 0, 0, 0, 0, 4, 5.1, 50,
0], ['Tea', '1/4 lb.', 17.4, 0, 0, 0, 0, 0, 0, 2.3, 42, 0],
['Cocoa', '8 oz.', 8.6, 8.7, 237, 3, 72, 0, 2, 11.9, 40, 0], [
'Chocolate', '8 oz.', 16.2, 8.0, 77, 1.3, 39, 0, 0.9, 3.4, 14, 0
], ['Sugar', '10 lb.', 51.7, 34.9, 0, 0, 0, 0, 0, 0, 0, 0],
['Corn Syrup', '24 oz.', 13.7, 14.7, 0, 0.5, 74, 0, 0, 0, 5, 0], [
'Molasses', '18 oz.', 13.6, 9.0, 0, 10.3, 244, 0, 1.9, 7.5, 146,
0
], [
'Strawberry Preserves', '1 lb.', 20.5, 6.4, 11, 0.4, 7, 0.2,
0.2, 0.4, 3, 0
]]
# Instantiate a Glop solver, naming it LinearExample.
solver = pywraplp.Solver('StiglerDietExample',
pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
# Declare an array to hold our variables.
foods = [solver.NumVar(0.0, solver.infinity(), item[0]) for item in data]
# Objective function: Minimize the sum of (price-normalized) foods.
objective = solver.Objective()
for food in foods:
objective.SetCoefficient(food, 1)
objective.SetMinimization()
# Create the constraints, one per nutrient.
constraints = []
for i, nutrient in enumerate(nutrients):
constraints.append(solver.Constraint(nutrient[1], solver.infinity()))
for j, item in enumerate(data):
constraints[i].SetCoefficient(foods[j], item[i + 3])
print('Number of variables =', solver.NumVariables())
print('Number of constraints =', solver.NumConstraints())
# Solve the system.
status = solver.Solve()
# Check that the problem has an optimal solution.
if status != pywraplp.Solver.OPTIMAL:
print("The problem does not have an optimal solution!")
exit(1)
nutrients_result = [0] * len(nutrients)
print('')
print('Annual Foods:')
for i, food in enumerate(foods):
if food.solution_value() > 0.0:
print('{}: ${}'.format(data[i][0], 365. * food.solution_value()))
for j, nutrient in enumerate(nutrients):
nutrients_result[j] += data[i][j + 3] * food.solution_value()
print('')
print('Optimal annual price: ${:.4f}'.format(365. * objective.Value()))
print('')
print('Nutrients per day:')
for i, nutrient in enumerate(nutrients):
print('{}: {:.2f} (min {})'.format(nutrient[0], nutrients_result[i],
nutrient[1]))
print('')
print('Advanced usage:')
print('Problem solved in ', solver.wall_time(), ' milliseconds')
print('Problem solved in ', solver.iterations(), ' iterations')
| examples/notebook/examples/stigler_diet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Playful: Find your new favorite computer game
# Here is the basic outline of how I built [an app that recommends computer games on Steam](http://www.playful.live/) using a combination of python and PostgreSQL.
#
# ## Import stuff
# My config.py file is not on GitHub. You need your own Steam API key and database information.
import json
import pandas as pd
import numpy as np
from app.config import api_key, db_username, db_password, db_host, db_port
from urllib.request import Request, urlopen
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
import psycopg2
import pickle
from lightfm import LightFM
from lightfm.evaluation import recall_at_k
from lightfm.cross_validation import random_train_test_split
from scipy import sparse
import math
import random
# ## Scrape reviews for user IDs
# Scraping hub has [a detailed example of how to scrape reviews from the Steam store using scrapy]((https://blog.scrapinghub.com/2017/07/07/scraping-the-steam-game-store-with-scrapy/), complete with code in a GitHub repo.
#
# I scraped all of the reviews, which took about 4 days, in case later on I want to incorporate some of that information into the recommendations. For now the only thing I'm using from that exercize is a list of ~400,000 unique Steam user IDs of the review writers. I did not include any other Steam users, so my recommendations are biased toward games owned by people who have written reviews.
#
# Due to space limitations on GitHub, I am sharing only a small part of 1 of the 3 scrapy output files.
# +
def load_reviews():
reviews = []
path_to_scraped_data = 'example_data//'
files = ['scraped_reviews.jl']
for file in files:
with open(''.join((path_to_scraped_data, file)), 'r') as f:
for line in f:
reviews.append(json.loads(line))
return reviews
scraped_reviews = load_reviews()
user_ids = []
for review in scraped_reviews:
try:
user_ids.append(review['user_id'])
except KeyError:
pass
unique_users = list(set(user_ids))
print('There are', len(unique_users), 'unique steam user IDs in the sample data.')
# -
# ## API calls for game ownership
# This took about 5 minutes, and you have to be online for the API call to work.
#
# In the real app, I'm using a pickled version of the results to avoid complications in case a user deletes their account.
# +
def getGamesOwned(player_id):
req = Request('http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?key=%s&steamid=%s&format=json&include_played_free_games=True&include_appinfo=True'%(api_key, player_id))
try:
data_raw = urlopen(req).read()
data_json = json.loads(data_raw)
return data_json['response']['games']
except:
return []
def get_all_games_owned_by_players(user_ids):
users_and_their_games = {}
for idx, gamer_id in enumerate(user_ids):
users_and_their_games[gamer_id] = getGamesOwned(gamer_id)
return users_and_their_games
users_and_their_games = get_all_games_owned_by_players(unique_users)
# -
# ## Put the ownership data into pandas and PostgreSQL
# Every user-game pair gets its own row in the database. For example, say I have data for only 2 unique Steam users, Katie and Minchun. If Katie owns 20 games and Minchun owns 3 games, I'll end up with 23 rows.
#
# You have to have a SQL server installed and running with apppropriate password information for this section to work. Also, I used Windows. The syntax will be different on a Mac or Linux.
# +
user_column = []
app_column = []
for user in unique_users:
for game in users_and_their_games[user]:
user_column.append(user)
app_column.append(game['appid'])
user_game_df = pd.DataFrame({'user_id':user_column, 'app_id':app_column})
db_name = 'playful'
engine = create_engine('postgresql+psycopg2://%s:%s@%s:%s/%s'%(db_username,db_password,db_host,db_port,db_name))
if not database_exists(engine.url):
create_database(engine.url)
user_game_df.to_sql('user_games_table', engine, if_exists='replace')
user_game_df.head()
# -
# ## SQL query for most popular games
# This is how I came up with the list of the 12 most popular games on the app homepage. I'll convert the game IDs into actual names shortly.
#
# At scale, this SQL query was much faster than a similar analysis in pandas.
# +
sql_query = """ SELECT app_id, COUNT(user_id) AS "n_owners"
FROM user_games_table
GROUP BY app_id
ORDER BY n_owners DESC
LIMIT 12
"""
con = None
con = psycopg2.connect(database=db_name, user=db_username, password=<PASSWORD>, host=db_host, port=db_port)
most_popular_game_ids = pd.read_sql_query(sql_query, con).app_id.values
print('Here are the IDs of the most-owned games')
for game in most_popular_game_ids:
print(game)
# -
# ## Determine unique users and games
# +
sql_query = """ SELECT *
FROM user_games_table
"""
df = pd.read_sql_query(sql_query, con)
df.drop_duplicates(inplace=True)
unique_users = df.user_id.unique()
unique_games = df.app_id.unique()
n_users = len(unique_users)
n_games = len(unique_games)
n_datapoints = len(df)
sparsity = 100* n_datapoints / (n_users*n_games)
print('number of data points', n_datapoints)
print('number of users:', n_users)
print('number of games:', n_games)
print('Sparsity of data in the example interactions matrix: {:4.3f}%'.format(sparsity))
# -
# ## Mappers
# Each game has 3 different ways we can refer to it:
# * the game's name (gamename)
# * the game's Steam ID (gameid)
# * the game's location in the interactions matrix (idx)
#
# I made 6 different mapper dictionaries to convert from one game representation of a game to another. The game name to Steam ID mapping is from the API, but here and in the app I'm using stored data for that data and 2 of the mapper dictionaries.
#
# The users also get mapped to indexes in the matrix.
# +
## Game name and game ID information from API
# req = Request('http://api.steampowered.com/ISteamApps/GetAppList/v2/?key=%s'%(api_key))
# data_raw = urlopen(req).read()
# data_json = json.loads(data_raw)['applist']['apps']
## Saved game name and game ID info
with open('app//playful//static//data//all_game_info.txt', 'r') as f:
all_game_info = json.load(f)
gameid_to_name = {}
gamename_to_gameid = {}
for app in all_game_info:
gameid_to_name[app['appid']] = app['name']
gamename_to_gameid[app['name']] = app['appid']
idx_to_name = {}
idx_to_gameid = {}
name_to_idx = {}
gameid_to_idx = {}
for idx, gameid in enumerate(unique_games):
idx_to_gameid[idx] = gameid
gameid_to_idx[gameid] = idx
try:
idx_to_name[idx] = gameid_to_name[gameid]
except KeyError:
idx_to_name[idx] = "Could not identify this game. Maybe it's new?"
try:
name_to_idx[gameid_to_name[gameid]] = idx
except KeyError:
pass
userid_to_idx = {}
idx_to_userid = {}
for (idx, userid) in enumerate(unique_users):
userid_to_idx[userid] = idx
idx_to_userid[idx] = userid
# examples
game_idx = 2000
game_id = idx_to_gameid[game_idx]
game_name = gameid_to_name[game_id]
print(game_name, 'will be game number', game_idx, 'in the interactions matrix and has Steam game ID', game_id)
print('\nThe most-owned games in this sample of data by name instead of game ID:')
for gameid in most_popular_game_ids:
print(gameid_to_name[gameid])
# -
# ## Build the sparse interactions matrix
# I and J specify the locations in the sparse matrix where the data V will go.
#
# ### Ownership data
# The data in this case are all 1's that we put in the matrix to indicate which owner owns which game. All of the remaining entries in the matrix are zeroes, meaning we don't have any information about whether a given user is interested in a particular game.
#
# ### Hours played data
# The API calls also give me the number of hours each user has played, so I could use some function of that number instead of just the binary owns/doesn't own. I played around with this a little bit, and LightFM can do that, but it's not as simple as just swapping the ones in the data for the hours played. They need to go in as sample weights instead, and in a sparse matrix form that matches the training data. If only I had another two weeks...
#
# Here are some additional considerations if I were to use hours played data.
# * **What does it mean when a user owns a game but hasn't played it?**
# Maybe they just bought the game and are really super excited about it, but I would assume that means they weren't that interested in the game, and so ideally I would put a -1 in the matrix. I don't think LightFM can handle that.
# * **Sometimes people leave a game on even when they aren't playing it.**
# I could either apply a time cutoff or use the log of the hours played.
# * **Some games end quickly while others lend themselves to much longer playtimes.**
# I could normalize the times by average time played or perhaps based on genre.
# * **Older games have an advantage.**
# This is true, and my model also totally fails to account for changes in user preferences over time. However! The API call also tells me how long a user has spent playing each game in the last two weeks, so I could train on just that data.
# +
def map_id(idx_to_switch_out, mapper):
return mapper[idx_to_switch_out]
I = df.user_id.apply(map_id, args=[userid_to_idx]).values
J = df.app_id.apply(map_id, args=[gameid_to_idx]).values
V = np.ones_like(I)
interaction_matrix = sparse.coo_matrix((V, (I, J)), dtype=np.float64)
# -
# ## Split the data into training and test sets
# This split is not as straightforward as some other maching learning algorithms because I need *some* information in about what a user owns to make recommendations, so I can't just hold a group of users out entirely. Instead, I split the data into two sets with the same users, but my training data contains 80% of the users' games, and the test data contains the other 20%. The python package LightFM includes a handy function for doing that for me.
traindata, testdata = random_train_test_split(interaction_matrix)
# ## Implement matrix factorization
# LightFM uses stochastic gradient descent to solve for the latent vectors,or embeddings, that characterize each game and user in the interactions matrix.
#
# Hyperparameters that must be chosen for the model include:
# * the length of the latent vectors (no_components)
# * the learning rate to use during gradient descent
# * the number of iterations, or epochs, to use when trying to fit the data
# * the exact form of the loss function (the default is called WARP)
#
# Ideally one would use a grid search or start with random points within a grid search to decide what values to use for the various hyperparameters. That takes awhile, so here I'm showing the fit with the hyperparameters I used. Note that I did not do a proper grid search, but there is graph in backup slides at playful.live showing that the number of components in particular is certainly improved from the default value of 10.
model = LightFM(no_components=25, learning_rate=0.045)
model.fit(traindata, epochs=40)
# ## Recall@k
# There are a lot of different validation metrics one can use to evaluate recommender systems. The one I used when optimizing my hyperparameters is called recall@k.
#
# Recall refers to the number of true positives / (the number of true positives + the number of false negatives), and I like it better than precision (true positives / (true positives + false positives)) here because recall, unike precision, does not assume that a zero in the matrix (lack of ownership) means that person won't like the game if we recommended it.
#
# Recall@k tells us this: if I recommend only k games (12 games in this example) out of my list of ~20,000 games to users based on their games in the training data, how likely am I to recommend the games they own that I held out when training the model?
#
# And again LightFM has a handy function.
# +
example_recall = recall_at_k(model, testdata, k=12).mean()
true_model_recall = 0.083
print('recall@12 for this example:', example_recall)
print('recall@12 for my actual model:', true_model_recall)
# -
# ### Comparison with just recommending the most popular games
# This is a super relevant and important comparison to make, but the math is not straightforward. I tried simulating it with a for loop, but that approach hadn't found a single hit (a randomly dropped game that was one of the 12 most popular games) even after running all night. In contrast, LightFM's recall_at_k function is incredibly fast, I think because they're making good use of things like cython and sparse matrices. If I had another two weeks, this comparison is definitely something I would want to sort out. Just qualitatively though, I will note that the there is a lot of diversity in the genres of those 12 most-owned games (e.g., a physics sandbox vs a first-person shooter vs a strategy game), and the recommendations my model produces have a lot more game features that are obviously in common with each other.
#
# ### Comparison with random guessing
# If we randomly pick 12 games out of 20K and don't care about the order within that list of 12, the probability of picking the 12 games that we dropped is related to the [hypergeometric distribution](https://en.wikipedia.org/wiki/Hypergeometric_distribution) and works out 12 / 20K. Note the exact number of unique games in the Steam store changed between when I first created my model and when I created this example.
print('Chance of picking the 12 dropped games by random guessing:', 12./len(unique_games))
print('which is', round(true_model_recall/(12./len(unique_games))), 'times worse than my model')
# ## The item similarity matrix
# The model item embeddings are vectors that represent each game. (These are the things that the matrix factorization model fitting figured out). We take the dot product of this matrix by its transpose, normalize, and voila, there is a matrix of similarities between games.
game_similarity_matrix = model.item_embeddings.dot(model.item_embeddings.T)
normalizeto = np.array([np.sqrt(np.diagonal(game_similarity_matrix))])
game_similarity_matrix = game_similarity_matrix / normalizeto / normalizeto.T
# ## The cold start problem
# One major drawback of collaborative filtering is that if a user or game isn't in the interactions matrix, the model has no way to make recommendations. That's why recommenders still need things like game features (developer studio, genre, tags, etc.) and user features (games owned, demographics, etc.).
#
# ### New games
# My model never recommends any bright, shiny, brand new games. If I were to retrain the model every week (which I would definitely set up if I had another 2 weeks to work on this), then I would start to pick up the new games, but they won't show up right away. If that's the kind of recommendations you want (i.e., of the games that came out in the last, say, month, which ones are most relevant to me as a user?), you are in luck because that is exactly what the Steam store already does, or at least, is trying to do.
#
# ### New users
# For a brand new user, I show them the most popular games by number of owners (see list above), but 'new user' in this context doesn't only mean brand new users who don't own any games. It means any user who isn't in the interactions matrix. My app works for any Steam user who owns games, which means I need some information about the user. Specifically, I use the games they own and how many hours they have played each game.
# ## API call for user information
#
# This example uses my Steam vanityurl (which has to be set by the user in their Steam settings - just having a Steam account name is not enough!), but the app can also use the 17-digit Steam user ID.
# +
def convert_input_to_userid(input_id):
"""
Take user input from app (Steam user ID or vanity URL) and output Steam user ID for further API calls ]
"""
req = Request('http://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/?key=%s&vanityurl=%s'%(api_key, input_id))
try:
data_raw = urlopen(req).read()
except HTTPError:
return input_id
data_json = json.loads(data_raw)
try:
return int(data_json['response']['steamid'])
except KeyError:
return input_id
def get_user_games(user_id):
"""
Take Steam ID and make an API call to return users's owned games and hours played
"""
req = Request('http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?key=%s&steamid=%s&format=json&include_played_free_games=True&include_appinfo=True'%(api_key, user_id))
try:
data_raw = urlopen(req).read()
data_json = json.loads(data_raw)
return data_json['response']['games']
except:
return []
example_steam_urlname = 'elizabethferriss'
user_id = convert_input_to_userid(example_steam_urlname)
user_game_info = get_user_games(user_id)
print('My games')
print(user_game_info)
# -
# ## Rank user's games based on hours played
user_game_ids = [app['appid'] for app in user_game_info]
user_hours_played = [app['playtime_forever'] for app in user_game_info]
userdf = pd.DataFrame({'appid': user_game_ids, 'hours_played' : user_hours_played})
userdf = userdf.sort_values(by='hours_played', ascending=False)
userdf['game_name'] = [gameid_to_name[gameid] for gameid in userdf.appid]
user_game_ids = userdf.appid.values
user_hours_played = userdf.hours_played.values
userdf.head()
# ## Make recommendations based on the user's most-played games
# For each game, get the column in game similarity matrix for the user's most-played game and sort.
#
# The recommendations here are much different from the ones on the actual app because here I'm only using a very small selection of users to train my model.
# +
def idx_to_recs(game_idx):
game_recs_scores = game_similarity_matrix[game_idx]
df = pd.DataFrame({'game_idx':list(idx_to_name.keys()), 'scores':game_recs_scores})
df = df.sort_values(by='scores', ascending=False)
df['gameID'] = [idx_to_gameid[idx] for idx in df.game_idx]
df['games'] = [idx_to_name[idx] for idx in df.game_idx]
df = df[~df.gameID.isin(user_game_ids)] # filter out games already owned
return df['games'].values
nrecgroups = 10
nrecs_per_group = 8
games_already_recommended = []
for n in range(nrecgroups):
user_gameid= user_game_ids[n]
print(' People who own', gameid_to_name[user_gameid], 'also own:')
recs = idx_to_recs(gameid_to_idx[user_gameid])
recs = [rec for rec in recs if rec not in games_already_recommended] # don't recommend anything twice
for rec in recs[0:nrecs_per_group]:
games_already_recommended.append(rec)
print(rec)
print()
| Playful's Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="copyright"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="title:generic,gcp"
# # E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Tensorboard
#
# <table align="left">
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/ml_ops_stage2/get_started_vertex_tensorboard.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# <td>
# <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/ml_ops_stage2/get_started_vertex_tensorboard.ipynb">
# Open in Google Cloud Notebooks
# </a>
# </td>
# </table>
# <br/><br/><br/>
# + [markdown] id="overview:mlops"
# ## Overview
#
#
# This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Tensorboard.
# + [markdown] id="objective:mlops,stage2,get_started_vertex_tensorboard"
# ### Objective
#
# In this tutorial, you learn how to use `Vertex TensorBoard` when training with `Vertex AI`.
#
# This tutorial uses the following Google Cloud ML services:
#
# - `Vertex TensorBoard`
# - `Vertex Training
#
# The steps performed include:
#
# - Create a TensorBoard callback when training a model.
# - Using Tensorboard with locally trained model.
# - Using Vertex TensorBoard with Vertex Training.
# + [markdown] id="recommendation:mlops,stage2,vertex,tensorboard"
# ### Recommendations
#
# When doing E2E MLOps on Google Cloud, the following best practices for visualizing yourr training with TensorBoard.
#
# #### Local TensorBoard
#
# Use the OSS version of TensorBoard, either command-line or daemon version, when doing ad-hoc training locally.
#
# #### Cloud TensorBoard
#
# Use the Tensorboard.dev, when doing training on the cloud -- unless you have a privacy issue.
#
# #### Experiments
#
# Use Vertex TensorBoard when you have a privacy issue or doing experiments to compare results for different experiment configurations.
# + id="install_aip:mbsdk,tensorboard"
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
# ! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG
# + id="install_tensorflow"
if os.environ["IS_TESTING"]:
# ! pip3 install --upgrade tensorflow $USER_FLAG
# + [markdown] id="restart"
# ### Restart the kernel
#
# Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="restart"
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="project_id"
# #### Set your project ID
#
# **If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
# + id="set_project_id"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="autoset_project_id"
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# + id="set_gcloud_project_id"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="region"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
#
# - Americas: `us-central1`
# - Europe: `europe-west4`
# - Asia Pacific: `asia-east1`
#
# You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
#
# Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
# + id="region"
REGION = "us-central1" # @param {type: "string"}
# + [markdown] id="timestamp"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
# + id="timestamp"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="bucket:mbsdk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
#
# Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
# + id="bucket"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
# + id="autoset_bucket"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="create_bucket"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="create_bucket"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="validate_bucket"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="validate_bucket"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="set_service_account"
# #### Service Account
#
# **If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.
# + id="set_service_account"
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
# + id="autoset_service_account"
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
# shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].strip()
print("Service Account:", SERVICE_ACCOUNT)
# + [markdown] id="setup_vars"
# ### Set up variables
#
# Next, set up some variables used throughout the tutorial.
# ### Import libraries and define constants
# + id="import_aip:mbsdk"
import google.cloud.aiplatform as aip
# + [markdown] id="import_tf"
# #### Import TensorFlow
#
# Import the TensorFlow package into your Python environment.
# + id="import_tf"
import tensorflow as tf
# + [markdown] id="init_aip:mbsdk"
# ### Initialize Vertex SDK for Python
#
# Initialize the Vertex SDK for Python for your project and corresponding bucket.
# + id="init_aip:mbsdk"
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
# + [markdown] id="accelerators:training,mbsdk"
# #### Set hardware accelerators
#
# You can set hardware accelerators for training.
#
# Set the variable `TRAIN_GPU/TRAIN_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
#
# (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
#
# Otherwise specify `(None, None)` to use a container image to run on a CPU.
#
# Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators) hardware accelerator support for your region
# + id="accelerators:training,mbsdk"
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
# + [markdown] id="container:training"
# #### Set pre-built containers
#
# Set the pre-built Docker container image for training.
#
# - Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:
#
#
# For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
# + id="container:training"
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2.1".replace(".", "-")
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
# + [markdown] id="machine:training"
# #### Set machine type
#
# Next, set the machine type to use for training.
#
# - Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training.
# - `machine type`
# - `n1-standard`: 3.75GB of memory per vCPU.
# - `n1-highmem`: 6.5GB of memory per vCPU
# - `n1-highcpu`: 0.9 GB of memory per vCPU
# - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
#
# *Note: The following is not supported for training:*
#
# - `standard`: 2 vCPUs
# - `highcpu`: 2, 4 and 8 vCPUs
#
# *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
# + id="machine:training"
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
# + [markdown] id="tensorboard"
# ## Training with TensorBoard
#
# Tensorboard provides the means to visualize your training in-real time and to visualize the results (metrics).
#
# You can use Tensorboard in conjunction with local training, cloud training and with `Vertex Training`, which is referred to as `Vertex TensorBoard`
# + [markdown] id="tensorboard_local"
# ### Local training with TensorBoard
#
# When using TensorBoard with TF.Keras training locally or on the cloud, you do the following steps:
#
# 1. Create a TensorBoard callback.
# 2. Pass the callback to the `fit()` method, specifying a local directory to write the tensorboard logs to.
# 3. Upload the log directory to TensorBoard.
#
# Learn more about [TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard).
# + id="tensorboard_local"
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten
LOG_DIR = "./logs"
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (x_train / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
model = Sequential(
[
Flatten(),
Dense(512, activation="relu"),
Dense(512, activation="relu"),
Dense(10, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=LOG_DIR)
history = model.fit(x_train, y_train, epochs=4, batch_size=32, callbacks=[tensorboard])
# ! ls {LOG_DIR}/train/
# + [markdown] id="tensorboard_upload:dev"
# ### Uploading and sharing TensorBoard logs using `tensorboard dev`
#
# You can upload your TensorBoard logs and share with others using `tensorboard dev` command. Once uploaded, a URL is returned to open up the TensorBoard instance in a brower for visualizing.
#
# *Note:* Your TensorBoard instance is publicly visable.
#
# *Note:* In this example, while running within a notebook, the command will freeze since it is waiting for an interactive yes/no input. You can kill the command with a Ctrl C or kernel interupt.
#
# Learn more about [What is TensorBoard.dev](https://tensorboard.dev/).
# + id="tensorboard_upload:dev"
# ! tensorboard dev upload --logdir {LOG_DIR} \
# --name "Simple experiment with MNIST" \
# --description "Training results" \
# --one_shot
# + [markdown] id="create_vertex_tensorboard"
# ### Uploading TensorBoard logs using Vertex TensorBoard
#
# You can upload your TensorBoard logs by first creating a TensorBoard instance and then using the `tb-gcp-uploader` command to upload the logs. Once uploaded, the command will return a URL for connecting to the TensorBoard instance via the browser.
#
# Learn more about [TensorBoard overview](https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview).
# + id="create_vertex_tensorboard"
TENSORBOARD_DISPLAY_NAME = "example"
tensorboard = aip.Tensorboard.create(display_name=TENSORBOARD_DISPLAY_NAME)
tensorboard_resource_name = tensorboard.gca_resource.name
print("TensorBoard resource name:", tensorboard_resource_name)
# + [markdown] id="tensorboard_upload:tb-gcp-uploader"
# Upload the logs with the command `tb-gcp-uploader`, with the following arguments:
#
# --tensorboard_resource_name: The resource name of the TensorBoard instance.
#
# --logdir: location of the TensorBoard logs.
#
# --experiment_name: name for the experiment (training run).
#
#
# Once uploaded retrieve the URL to the TensorBoard instance to visualize the training/results.
# + id="tensorboard_upload:tb-gcp-uploader"
if 'EXPERIMENT_NAME' not in globals():
EXPERIMENT_NAME='example-' + TIMESTAMP
# output = !tb-gcp-uploader --tensorboard_resource_name={tensorboard_resource_name} \
# --logdir={LOG_DIR} \
# --experiment_name={EXPERIMENT_NAME} --one_shot=True
url = output[1].split(' ')[-1]
print(url)
from IPython.core.display import display, HTML
display(HTML("<a href='" + url + "'>click here for TensorBoard instance</a>"))
# + [markdown] id="tensorboard_customjob"
# ### CustomTrainingJob training with Vertex TensorBoard
#
# To use Vertex TensorBoard in conjunction with custom training with Vertex training, you make the following modifications:
#
# **Python Training Script**:
#
# 1. Get the value of the environment variable `AIP_TENSORBOARD_LOG_DIR`. This is set by Vertex Training service.
# 2. Create a TensorBoard callback with the log_dir parameter set to the value of `AIP_TENSORBOARD_LOG_DIR`.
#
# **CustomTrainingJob**:
#
# 1. On the `run()` method, add two additional parameters:
#
# - `tensorboard`: The resource name of the Vertex TensorBoard instance.
# - `service_account`: The service account with permissions to access Cloud Storage and Vertex TensorBoard.
# + [markdown] id="examine_training_package"
# ### Examine the training package
#
# #### Package layout
#
# Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
#
# - PKG-INFO
# - README.md
# - setup.cfg
# - setup.py
# - trainer
# - \_\_init\_\_.py
# - task.py
#
# The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
#
# The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#
# #### Package Assembly
#
# In the following cells, you will assemble the training package.
# + id="examine_training_package"
# Make folder for Python training script
# ! rm -rf custom
# ! mkdir custom
# Add package information
# ! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
# ! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
# ! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: example image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: <EMAIL>.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
# ! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
# ! mkdir custom/trainer
# ! touch custom/trainer/__init__.py
# + [markdown] id="taskpy_contents:mnist,tensorboard"
# #### Task.py contents
#
# In the next cell, you write the contents of the custom training script task.py. In summary:
#
# - Parse the command line arguments for the training configuration and hyperparameter settings.
# - Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
# - Get the resource name of the Vertex TensorBoard instance, and if not specified, then from the environment variable `AIP_TENSORBOARD_LOG_DIR`.
#
# - Load and preprocess the MNIST dataset.
# - Build and compile a DNN model.
# - Create a training callback to the TensorBoard instance.
# - Train the model with the `fit()` method with the callback to the TensorBoard instance.
# + id="taskpy_contents:mnist,tensorboard"
# %%writefile custom/trainer/task.py
# Single Instance Training for MNIST
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.datasets import mnist
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=1e-4, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--batch_size', dest='batch_size',
default=20, type=int,
help='Batch size.')
parser.add_argument('--tensorboard_log_dir', dest='tensorboard_log_dir',
default=os.getenv('AIP_TENSORBOARD_LOG_DIR'), type=str,
help='Log directory for TensorBoard instance')
args = parser.parse_args()
def make_datasets():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (x_train / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), _ = make_datasets()
def build_and_compile_model():
model = Sequential([
Flatten(),
Dense(512, activation='relu'),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(args.lr),
metrics=['accuracy'])
return model
model = build_and_compile_model()
def train_model():
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=args.tensorboard_log_dir)
history = model.fit(x_train, y_train, epochs=args.epochs, batch_size=args.batch_size, callbacks=[tensorboard])
return history
history = train_model()
model.save(args.model_dir)
# + [markdown] id="tarball_training_script"
# #### Store training script on your Cloud Storage bucket
#
# Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
# + id="tarball_training_script"
# ! rm -f custom.tar custom.tar.gz
# ! tar cvf custom.tar custom
# ! gzip custom.tar
# ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_example.tar.gz
# + [markdown] id="create_custom_training_job:mbsdk,no_model"
# ### Create and run custom training job
#
#
# To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.
#
# #### Create custom training job
#
# A custom training job is created with the `CustomTrainingJob` class, with the following parameters:
#
# - `display_name`: The human readable name for the custom training job.
# - `container_uri`: The training container image.
# - `requirements`: Package requirements for the training container image (e.g., pandas).
# - `script_path`: The relative path to the training script.
# + id="create_custom_training_job:mbsdk,no_model"
job = aip.CustomTrainingJob(
display_name="example_" + TIMESTAMP,
script_path="custom/trainer/task.py",
container_uri=TRAIN_IMAGE,
requirements=["gcsfs==0.7.1", "tensorflow-datasets==4.4"],
)
print(job)
# + [markdown] id="prepare_custom_cmdargs"
# ### Prepare your command-line arguments
#
# Now define the command-line arguments for your custom training container:
#
# - `args`: The command-line arguments to pass to the executable that is set as the entry point into the container.
# - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts.
# - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or
# - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.
# - `"--epochs=" + EPOCHS`: The number of epochs for training.
# - `"--steps=" + STEPS`: The number of steps per epoch.
# + id="prepare_custom_cmdargs"
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
# + [markdown] id="run_custom_job:mbsdk,no_model,tensorboard"
# #### Run the custom training job
#
# Next, you run the custom job to start the training job by invoking the method `run`, with the following parameters:
#
# - `args`: The command-line arguments to pass to the training script.
# - `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).
# - `machine_type`: The machine type for the compute instances.
# - `accelerator_type`: The hardware accelerator type.
# - `accelerator_count`: The number of accelerators to attach to a worker replica.
# - `base_output_dir`: The Cloud Storage location to write the model artifacts to.
# - `tensorboard`: The resource name of the Vertex TensorBoard instance.
# - `service_account`: The service account with permissions to access Cloud Storage and Vertex TensorBoard.
# - `sync`: Whether to block until completion of the job.
# + id="run_custom_job:mbsdk,no_model,tensorboard"
if TRAIN_GPU:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
tensorboard=tensorboard_resource_name,
service_account=SERVICE_ACCOUNT,
sync=True,
)
else:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
tensorboard=tensorboard_resource_name,
service_account=SERVICE_ACCOUNT,
sync=True,
)
model_path_to_deploy = MODEL_DIR
# + [markdown] id="tensorboard_customjob_view"
# ### View your TensorBoard instance
#
# To view a Vertex TensorBoard associated with a training job, navigate to the Training page in the Vertex AI section of the Google Cloud Console. Click the training job to view the Training Detail page, then click the Open TensorBoard button on the top of the page.
#
# Alternatively, you can navigate to the Experiments tab and view the list of all experiments. Your experiment will have the same name as the training job.
# + [markdown] id="cleanup:mbsdk"
# # Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
#
# - Dataset
# - Pipeline
# - Model
# - Endpoint
# - AutoML Training Job
# - Batch Job
# - Custom Job
# - Hyperparameter Tuning Job
# - Cloud Storage Bucket
# + id="cleanup:mbsdk"
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
# ! gsutil rm -r $BUCKET_NAME
| notebooks/community/ml_ops/stage2/get_started_vertex_tensorboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Purpose = get population for any 10 countries using latest population numbers
# https://pypi.org/project/world-bank-data/
# https://github.com/mwouts/world_bank_data/blob/master/examples/A%20sunburst%20plot%20of%20the%20world%20population.ipynb
# IMPORTANT install the World Bank Data python package....pip install world_bank_data --upgrade
# -
import pandas as pd
import world_bank_data as wb
pd.set_option('display.max_rows', 6)
# +
import pandas as pd
from .request import wb_get_table
from .search import search
import wbdata
import pandas
import matplotlib.pyplot as plt
#set up the countries I want
countries = ["CL","UY","HU"]
#set up the indicator I want (just build up the dict if you want more than one)
indicators = {'NY.GNP.PCAP.CD':'GNI per Capita'}
#grab indicators above for countires above and load into data frame
df = wbdata.get_dataframe(indicators, country=countries, convert_date=False)
#df is "pivoted", pandas' unstack fucntion helps reshape it into something plottable
dfu = df.unstack(level=0)
# a simple matplotlib plot with legend, labels and a title
dfu.plot();
plt.legend(loc='best');
plt.title("GNI Per Capita ($USD, Atlas Method)");
plt.xlabel('Date'); plt.ylabel('GNI Per Capita ($USD, Atlas Method');
# -
#population = wb.get_series('SP.POP.TOTL', mrv=1)
population = wb.get_series('SP.POP.TOTL', id_or_value='countries', simplify_index=True, mrv=1)
population[2:5]
df = countries[['region', 'name']].rename(columns={'name': 'country'}).loc[countries.region != 'Aggregates']
df['population'] = population
df
countries = ["CL","UY","HU"]
#indicators = {'NY.GNP.PCAP.CD':'GNI per Capita'}
indicators = {'SP.POP.TOTL'}
#df = wb.get_dataframe(indicators, country=countries, convert_date=False)
#data = {'Indicators': ['SP.POP.TOTL'], 'countries': ["CL","UY","HU"]}
#df = pd.DataFrame(data, columns=['Indicator', 'countries'])
#df = pd.DataFrame(boston.data, columns=boston.feature_names)
data = wb.get_dataframe({'SP.POP.TOTL':'values'},
country=('BGD', 'SAU'),
data_date=data_dates,
convert_date=False, keep_levels=True)
df = pd.DataFrame(data, columns=['Indicator', 'countries'])
df.head()
population = wb.get_series('SP.POP.TOTL', mrv=1)
population
| test/test2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Problem 1
# Given: $N \sim Poisson(\lambda)$ and $X_1, \dots, X_n \sim \vec{\pi}$
#
# $X_k(t)$ is continous time MC with $X_k(0) = X_k$
# $N_t(a) = $\{k:X_k(t) = a\}$
#
# i.e. $N_t$ is the number of visits to state $a$ in time $t$.
#
# $\sum_a\pi(a)Q_{ab}=0$ for each $b$ with the constraint $\sum_a\pi(a)=1$
#
#
# $\sum_a\pi(a)Q_{ab}=0$ $\implies$ $\vec{\pi}^TQ=0$ $\implies$
#
# $$
# \begin{align*}
# \vec{\pi}^TQ&=0\\
# \Longleftrightarrow \vec{\pi}^TQ^n&=0\ \ \forall n \geq 1\\
# \Longleftrightarrow \sum_{n\geq 1}\vec{\pi}\frac{t^n}{n!}Q^n &=0 \ \ \forall t \geq 0\\
# \Longleftrightarrow \vec{\pi}\sum_{n\geq 0}\frac{t^n}{n!}Q^n &=\vec{\pi}\\
# \Longleftrightarrow \vec{\pi}P &=\vec{\pi}\\
# \Longleftrightarrow \vec{\pi}\ \text{is a stationary distribution}
# \end{align*}
# $$
#
# Now, $P(X_k(t)=a)=\pi(a)$ and $N_t(a) = \{k:X_k(t) = a\}$ $\implies$ $N_t(a)|N \sim Binom(N, \pi(a))$ and
# $N \sim Poisson(\lambda)$ then $\boxed{N_t \sim Poisson(\lambda \pi)}$
# ## Problem 2
# +
# %matplotlib inline
from __future__ import division
import pandas as pd
import matplotlib
import itertools
matplotlib.rcParams['figure.figsize'] = (16,12)
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1)
def propose(S):
r = np.random.choice(len(S), 2)
rs = np.sort(r)
j,k=rs[0],rs[1]
y=np.copy(S)
y[j:k+1] = y[j:k+1][::-1]
return y
def count_cycles(S):
sample_length = len(S)
n_cycles = 0
index = 0
length_travelled = 0
visited = []
while length_travelled < sample_length:
if S[index] == index and index < sample_length :
index+=1
n_cycles+=1
length_travelled+=1
else:
visited.append(index)
index = S[index]
length_travelled+=1
if index not in visited:
n_cycles+=1
return n_cycles
# -
N = [2,3,4, 100]
alpha = 3
assert count_cycles([0,1]) == 2
assert count_cycles([0,2,1]) == 2
assert count_cycles([1,0]) == 1
# +
N_iterations = 10000
def theoretical(S, alpha, denom):
n_cycles = count_cycles(S)
return n_cycles**alpha/denom
def run(n, show=True):
oldS = np.arange(n)
old_n_cycles = count_cycles(oldS)
count_dict = {}
if show:
denom = sum([count_cycles(x)**alpha for x in itertools.permutations(range(n))])
for i in range(N_iterations):
proposedS = propose(oldS)
new_n_cycles = count_cycles(proposedS)
pi_ab = new_n_cycles**alpha/(old_n_cycles**alpha)
q = min(1,pi_ab)
if q>= np.random.uniform():
oldS = proposedS
old_n_cycles = new_n_cycles
tkey = ','.join([str(x+1) for x in oldS.tolist()])
key="["+tkey+"]"
if key not in count_dict:
if show:
count_dict[key] = [0,0,0]
count_dict[key][1] = theoretical(oldS,alpha,denom)
count_dict[key][2] = old_n_cycles
else:
count_dict[key] = [0,0]
count_dict[key][1] = old_n_cycles
count_dict[key][0]+=1
df = pd.DataFrame(count_dict)
df=df.transpose()
if show:
df.columns=[r'Simulated $\pi(s)$', 'Theoretical', 'c(s)']
df[r'Simulated $\pi(s)$'] = df[r'Simulated $\pi(s)$']/N_iterations
df['Percentage Error'] = 100*(df[r'Simulated $\pi(s)$']/df['Theoretical']-1)
else:
df.columns=[r'Simulated $\pi(s)$', 'c(s)']
df[r'Simulated $\pi(s)$'] = df[r'Simulated $\pi(s)$']/N_iterations
df.index.name='State'
return df
# -
# ## n=2
df = run(N[0])
df
# ## n=3
#
df = run(N[1])
df
# ## n=4
count_dict = run(N[2])
count_dict
# ## N=100
df = run(N[3], show=False)
# $$\sum_{s \in S_a}\pi(s)c(s)=E[c(s)]$$
#
# and similarly,
#
# $$\sum_{s \in S_a}\pi(s)c^2(s)=E[c^2(s)]=Var(c(s))+E^2[c(s)]$$
expectation = sum(df[r'Simulated $\pi(s)$']*df['c(s)'])
expectation2 = sum(df[r'Simulated $\pi(s)$']*df['c(s)']*df['c(s)'])
t_expectation = np.mean(df['c(s)'])
t_expectation2 = np.var(df['c(s)'])+np.mean(df['c(s)'])**2
print 'Simulated E[c(s)] = {}\t\t Theoretical(M0M) E[c(s)] = {}'.format(expectation, t_expectation)
print 'Simulated E[c^2(s)] = {}\t\t Theoretical(M0M) E[c^2(s)] = {}'.format(expectation2, t_expectation2)
# I use method of moments to calculate the Theoretical values. They seem to be in sync with the simulated values. The estimates seem to be in sync even though MOM is just a first approximation because the sample size is large enough to capture the dynamics of the population distribution.
cycles = df['c(s)']
plt.hist(cycles, normed=True)
# ## Problem 3
# ## Part (A)
def run():
N = 1000
N_iterations = 200
chrom_length = 3*(10**9)
transposon_length = 3*1000
mu = 0.05
t_positions = []
n_initial = np.random.random_integers(N-1)
x_initial = np.random.random_integers(chrom_length-1)
offspring_positions = []
all_positions = [[] for t in range(N)]
all_positions[n_initial].append(x_initial)
all_t_count =[]
for nn in range(N_iterations):
for i in range(N):
indicator = np.random.binomial(1,mu,len(all_positions[i]))
temp_indices = []
for ind, ind_value in enumerate(indicator):
if ind_value == 1:
temp_indices.append(ind)
for j in temp_indices:
x_temp = np.random.random_integers(chrom_length-1)
all_positions[i][j] = x_temp
all_positions[i].append(np.random.random_integers(chrom_length-1))
offspring_positions = [[] for t in range(N)]
for j in range(N):
y,z = np.random.random_integers(0,N-1,2)
y_parent = np.random.binomial(1,0.5,len(all_positions[y]))
z_parent = np.random.binomial(1,0.5,len(all_positions[z]))
temp_y = []
temp_z = []
for index,value in enumerate(y_parent):
if value>=1:
temp_y.append(all_positions[y][index])
for index,value in enumerate(z_parent):
if value>=1:
temp_z.append(all_positions[z][index])
for t_y in temp_y:
offspring_positions[j].append(t_y)
for t_z in temp_z:
offspring_positions[j].append(t_z)
all_positions = offspring_positions
count_t = 0
count_x = []
for p in range(N):
count_t += len(all_positions[p])
count_x.append(all_positions[p])
survived_t = np.unique(count_x, return_counts=True)[1]
all_t_count.append((count_t, len(survived_t[survived_t>=N*mu])))
return all_t_count
all_t_count = run()
die_out_transposons = all_t_count
# +
fig, axs = plt.subplots(2,2)
axs[0][0].plot([x[0] for x in die_out_transposons])
axs[0][0].set_title('No. of Transpososn v/s Generations')
axs[0][0].set_xlabel('Generations')
axs[0][0].set_ylabel('No. of Transpososn')
axs[0][1].plot([x[1] for x in die_out_transposons])
axs[0][1].set_title('No. of Common Transpososn v/s Generations')
axs[0][1].set_xlabel('Generations')
axs[0][1].set_ylabel('No. of Common Transposons')
increasing_rate = []
for i in range(1,len(die_out_transposons)):
increasing_rate.append(die_out_transposons[i][0]/(die_out_transposons[i-1][0]+0.000001))
axs[1][0].plot(increasing_rate)
axs[1][0].set_title('Increasing rate v/s Generations')
axs[1][0].set_xlabel('Generations')
axs[1][0].set_ylabel('Increasing Rate')
# -
# ### The above example shows one case when the "the transposon does not spread"
all_t_count = run()
nondie_out_transposons = all_t_count
# +
fig, axs = plt.subplots(2,2)
axs[0][0].plot([x[0] for x in nondie_out_transposons])
axs[0][0].set_title('No. of Transpososn v/s Generations')
axs[0][0].set_xlabel('Generations')
axs[0][0].set_ylabel('No. of Transpososn')
axs[0][1].plot([x[1] for x in die_out_transposons])
axs[0][1].set_title('No. of Surviving Transpososn v/s Generations')
axs[0][1].set_xlabel('Generations')
axs[0][1].set_ylabel('No. of Common Transposons')
increasing_rate = []
for i in range(1,len(die_out_transposons)):
increasing_rate.append(die_out_transposons[i][0]/(die_out_transposons[i-1][0]+0.000001))
axs[1][0].plot(increasing_rate)
axs[1][0].set_title('Increasing rate v/s Generations')
axs[1][0].set_xlabel('Generations')
axs[1][0].set_ylabel('Increasing Rate')
# -
# ### The above example shows one case when the "the transposon does spread", with rate being exponential and the common transposons still being limited
# ## Part (B)
#
# Treating the total number of transposons $N(t)$ at any time $t$ to be a branching process, then $N(t+1) = \sum_{i=1}^{N(t)} W_{t,i}$ Where $W_{t,i}$ is the number of locations of the $i^{th}$ transposon in the offspring.
#
# Now consider $E[N_t]$
#
# Claim: $E[N_t] = (1+\mu)^t$
#
# Proof:
# With probability of $\mu$ the transposon undergoes becomes 2 from 1. and hence $W_{t,i}$, the number of locations of the $i^{th}$ transposon in the offspring is a poisson random varaible with mean $1+\mu$
#
# $W_k \sim Poisson(1+\mu)$
# $$
# \begin{align*}
# E[N_t] = E[\sum_{i=1}^{N(t-1)} W_{t,i}] &= E[E[\sum_{i=1}^{n} W_{t,i}|N(t-1)=n]]\\
# &= E[N(t-1)] \times (1+\mu)\\
# &= N(1+\mu)^t
# \end{align*}
# $$
#
#
#
#
# Thus, the expected number of total transposons is an exponential.
#
# ## Part (C)
#
# $P(X>0) \leq EX$
#
# Consider $X_t$ as the total number of trasposon copies at location $x$ at generation $t$
# For each new generation, the new arrival at $x$ is a poisson process with mean = $2 \times \mu \times N(t) \times \frac{1}{L}$. Let $R(t)$ represent the new arrivals at x
#
# $N(t)$ represents the number of transposon copies of the transposon suriving for $t$ generations!
# Then, $E[N_1]=1+\mu$ By indution, $E[N_t] = (1+\mu)^t$
#
# Thus, $R(t) \sim \text{Poisson}(\frac{2\mu N(t)}{L})$
#
# Now Using a branching process model for number of transposon copies located at location $x$, the offspring mean number of offspring transposons is = $(1-\mu)*1 + \mu*2 = 1+\mu$
#
# Let $Z_{t,k}(u)$= Number of offspring copies of $k^{th}$ transposon at $x$ that occured at time $t$ inserted at time u ($u \leq t)
#
# Using branching process property, $E[Z_{t,k}(t+u)] = (1+\mu)^u$
#
# Then
#
# $$
# \begin{align*}
# EX_t &= \sum_{u \leq 0} \sum_{k=1}^{R(u)} Z_{u,k}(0)\\
# &= \sum_{u \leq 0}E[R(u)]E[Z_{u,1}(0)]\\
# &= \sum_{u \leq 0} (1+\mu)^t \frac{2 N \mu }{L} \times (1+\mu)^u \\
# &= \sum_{u\geq 0}(1+\mu)^t \frac{2 N \mu }{L(1+\mu)^u}\\
# &\approx \frac{2 \mu }{L}\times(1+\frac{1}{\mu})\\
# &= \frac{2}{L}(1+\mu)^{t+1}
# \end{align*}
# $$
#
# Thus, $P(X>0) \leq \frac{2}{L}(1+\mu)^{t+1}$
# ## Part (D)
# $\mu = 10^{-2}$
#
# $N = 10^7$
#
# For an individual $ EX = \frac{2}{L}(1+\mu)^{t+1} \times \frac{1}{N} = \frac{2}{NL}(1+\mu)^{t+1} $
#
# Now, $\frac{2}{NL}(1+\mu)^{t+1}=0.1L$ $\implies$ $(1+\mu)^{t+1}=0.1NL^2/2$
#
from math import log
N=10**7
mu=0.01
L=3*(10**9)
t = log(0.1*N*L*L/2)/log(1+mu)
print(t)
# Thus it takes `t=5703` generations for transposons to conver 10% of genome(I ignored the length of transposon itself)
| 2015_Fall/MATH-578B/Homework5/Homework5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Teonancatl Penetration map
# ## Left Hemisphere
# + pycharm={"name": "#%%\n"}
import pathlib as pl
from nems_lbhb.penetration_map import penetration_map
import matplotlib.pyplot as plt
# + pycharm={"name": "#%%\n"}
# the anterior posterior axis is places with the knob to the left, i.e. higher values, are more anterior (left), and
# lower values are most posterior (right)
# mapping prior to viral injections
first_mapping = ['TNC025a', 'TNC026a', 'TNC027a'] # PEG#1 A1#1 PEG#2
# first pair of array recordings
first_arrays = ['TNC028a', 'TNC029a']
# mapping after array breackage, trying to find a good PEG spot
second_mapping = ['TNC031a', 'TNC032a', 'TNC033a', 'TNC034a', 'TNC035a']
sites = first_mapping + first_arrays + second_mapping # last batch added 2021-07-30
# Original landmark measurements
# landmarks = {}
# corrected to better align with corresponding penetrations
landmarks = {}
# fig, coords = penetration_map(sites, equal_aspect=True, flip_X=True, flatten=False, landmarks=landmarks)
fig, coords = penetration_map(sites, equal_aspect=True, flip_X=True,
flatten=True, flip_YZ=True,)
fig.axes[0].grid()
# saves the scatter
# mappath = pl.Path('/auto/data/lbhb/photos/Craniotomies/Teonancatl/TNC_LH_map.png')
# fig.savefig(mappath, transparent=True)
| reports/211015_TNC_left_penetration_map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import geopandas as gp
import matplotlib.pyplot as plt
from geopandas import GeoDataFrame
from shapely.geometry import Point
#Import Shape files
map_df_Area = gp.read_file("Maps Esri/Areas.shp")
map_df_District = gp.read_file("Maps Esri/Districts.shp")
map_df_Sector = gp.read_file("Maps Esri/Sectors.shp")
#Import and convert raw data to SHP POINTS
data_df = pd.read_csv('Maps Esri/levels.csv')
geometry = [Point(xy) for xy in zip(data_df.Lon, data_df.Lat)]
crs = {'init': 'epsg:4326'}
gdf = GeoDataFrame(data_df, crs=crs, geometry=geometry)
#SJOIN to add postcode data to raw
points_with_Area = gp.sjoin(gdf, map_df_Area, how="inner", op="intersects")
points_with_Area.rename(columns = {'name_right':'name'}, inplace = True)
points_with_District = gp.sjoin(gdf, map_df_District, how="inner", op="intersects")
points_with_District.rename(columns = {'name_right':'name'}, inplace = True)
points_with_Sector = gp.sjoin(gdf, map_df_Sector, how="inner", op="intersects")
points_with_Sector.rename(columns = {'name_right':'name'}, inplace = True)
#Run some counts for outputted raw data
count_Area = points_with_Area.groupby('name').count()
count_Area.drop(['geometry', 'Lat', 'index_right', 'name_left'], axis = 1)
count_Area.to_csv('count_Area.csv')
count_District = points_with_District.groupby('name').count()
count_District.drop(['geometry', 'Lat', 'index_right', 'name_left'], axis = 1)
count_District.to_csv('count_District.csv')
count_Sector = points_with_Sector.groupby('name').count()
count_Sector.drop(['geometry', 'Lat', 'index_right', 'name_left'], axis = 1)
count_Sector.to_csv('count_Sector.csv')
#Re Import tables to bypass inconsistancy with pandas groupby indexing
ct_area = pd.read_csv('count_Area.csv')
ct_dist = pd.read_csv('count_District.csv')
ct_sect = pd.read_csv('count_Sector.csv')
map_df_Area.head(100)
#Merge tables for plotting
merged_Area = map_df_Area.set_index('name').join(ct_area.set_index('name'))
points_with_Area.head()
count_Area.head()
variable = 'name_left'
vmin, vmax = 0, 350
fig, ax = plt.subplots(1, figsize=(125, 30))
ax.axis('off')
ax.set_title('Volume RQS 1 by district 2019', fontdict={'fontsize': '45', 'fontweight' : '5'})
sm = plt.cm.ScalarMappable(cmap='Greens', norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cbar = fig.colorbar(sm)
merged_Area.plot(column=variable, cmap='Greens', linewidth=0.4, ax=ax, edgecolor='0.8')
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 08 - Saving and loading the data in .npz format
#
# The .npz file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in .npy format
# Use [__np.savez__](https://numpy.org/doc/stable/reference/generated/numpy.savez.html) to save the data:
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
data = np.loadtxt('../data/inflammation-01.csv', delimiter=',')
# -
filename = 'datainfo.npz'
np.savez(filename, data=data, mean_daily=np.mean(data,0))
# Use [__np.load()__](https://numpy.org/doc/stable/reference/generated/numpy.load.html) to load it :
patient = np.load(filename)
# To check the keys in the loaded data:
list(patient)
patient['mean_daily']
# We can plot this data using [`matplotlib.pyplot.plot`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html):
plt.plot(patient['mean_daily'])
# Previous: [07 - K-means clustering](k_means.ipynb)<span style="float:right;">Next: [09 - Fancy indexing](fancy_indexing.ipynb)
| Day_1_Scientific_Python/numpys/savez.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyEI Overview
#
# PyEI is a Python package for performing ecological inference. It is meant to gather together in one place several approaches to ecological inference, to provide helpful features for plotting and reporting and results, and to integrate with other tools that facilitate model comparison and model checking.
#
# Start here if you have some familiarity with ecological inference (EI), and want an overview of how to use PyEI and its main features.
#
# If you are new to ecological inference, the Introduction notebook might be a better place to start.
#
# For a more detailed gallery of plotting functionality, see the Plotting notebook.
#
# For information about model checking and model comparison, see the Model Checking notebook.
# # Contents:
#
# - Loading data to use
# - Fitting a model for EI
# - Plotting overview
# - Reporting results overview
# - Accessing samples
# - Currently available models
# - Convergence issues and warnings
# +
import numpy as np
import pymc3 as pm
from pyei.data import Datasets
from pyei.two_by_two import TwoByTwoEI
from pyei.goodmans_er import GoodmansER
from pyei.goodmans_er import GoodmansERBayes
from pyei.r_by_c import RowByColumnEI
# -
# # Loading data to use
#
# `PyEI` provides access to several example datasets for getting started. We can load the Santa Clara dataset, with its 42 precincts, and convert it to a DataFrame with `data = Datasets.Santa_Clara.to_dataframe().` We'll be passing various columns of this DataFrame to PyEI.
#
# Whether you are using an example data set or your own data, here is what you will need to pass to the various EI methods:
#
# - A vector of length `num_precincts` giving the population of each precincts. Depending on your purposes and data, the appropriate measure of population may be CVAP, VAP, etc. Below we name this `precinct_pops`.
#
# - For 2 x 2 EI:
# * A vector of length `num_precints` whose entries are each numbers between zero and 1 that give the fraction of the population of each precinct who belong to the demographic group of interest. Below we name this `group_fraction`.
# * A vector of length `num_precincts` whose entries are each numbers beteween zero and 1 that give the fraction of voters in each precinct voting for the candidate of interest (or, if we are estimating turnout, who voted at all). Below, we name this `votes_fraction`.
# * Optionally: name of the candidate of interest, name of the demographic group of interest, and/or names of precincts (for use in PyEI's plotting and reporting).
#
# - For $r$ x $c$ EI:
# * An array of shape $r$ x `num_precints` whose entries are each numbers between zero and 1 that give the fraction of the population of each precinct who belong to each of the $r$ demographic groups of interest (these fractions should sum to 1 for each precinct). Each row should sum to 1. Below we name this `group_fractions`.
# * An array of shape $c$ x `num_precincts` whose entries are each numbers beteween zero and 1 that give the fraction of voters in each precinct voting for each of $c$ candidates of interest (or, if we are considering turnout, who voted or did not vote). Each row should sum to 1. Below, we name this `votes_fractions`.
# * Optionally: A vector of length $c$ giving the names of the candidates and a vector of length $r$ giving the name of the demographic groups and/or names of precincts (will be used in PyEI's plotting and reporting).
# +
# Example 2x2 data
santa_clara_data = Datasets.Santa_Clara.to_dataframe()
group_fraction_2by2 = np.array(santa_clara_data["pct_e_asian_vote"])
votes_fraction_2by2 = np.array(santa_clara_data["pct_for_hardy2"])
precinct_pops = np.array(santa_clara_data["total2"])
demographic_group_name_2by2 = "e_asian"
candidate_name_2by2 = "Hardy"
precinct_names = santa_clara_data['precinct']
santa_clara_data.head()
# +
# Example rxc data (here r=c=3)
santa_clara_data = Datasets.Santa_Clara.to_dataframe() # This line same as 2x2 example
group_fractions_rbyc = np.array(santa_clara_data[['pct_ind_vote', 'pct_e_asian_vote', 'pct_non_asian_vote']]).T
votes_fractions_rbyc = np.array(santa_clara_data[['pct_for_hardy2', 'pct_for_kolstad2', 'pct_for_nadeem2']]).T
precinct_names = santa_clara_data['precinct'] # This line same as 2x2 example
candidate_names_rbyc = ["Hardy", "Kolstad", "Nadeem"]
demographic_group_names_rbyc = ["ind", "e_asian", "non_asian"]
precinct_pops = np.array(santa_clara_data['total2']) # This line same as 2x2 example
# -
# # Fitting a model for EI
#
# We fit most models by first creating a `TwoByTwoEI`, `RowByColumnEI` or `GoodmanERBayes` object, which is initialized with a (required) model name that is chosen among the supported models and (if desired) the applicable model parameters, then calling its `fit` method, with the relevant data as arguments. We can also, optionally, pass arguments to specify sampling choices like the number of MCMC draws.
#
# We also can pass to the fit() function of `TwoByTwoEI`, `RowByColumnEI`, or `GoodmanERBayes` additional keyword arguments for the sampler, such as:
#
# - draws - the number of samples to draw
# - chains - the number of chains
# - tune - number of iteration to tune
# - cores - number of chains to run in parallel
#
# (see https://docs.pymc.io/api/inference.html sampling.sample for more)
# E.g.:
#
# We fit a Goodman's ecological regression model by creating a `GoodmanER` or `GoodmanERBayes` object and then calling its fit method.
# +
# Fitting a first 2 x 2 model
# Create a TwobyTwoEI object
ei_2by2 = TwoByTwoEI(model_name="king99_pareto_modification", pareto_scale=15, pareto_shape=2)
# Fit the model
ei_2by2.fit(group_fraction_2by2,
votes_fraction_2by2,
precinct_pops,
demographic_group_name=demographic_group_name_2by2,
candidate_name=candidate_name_2by2,
precinct_names=precinct_names,
draws=1200, # optional
tune=3000, # optional
target_accept=.99# optional
)
# Generate a simple report to summarize the results
print(ei_2by2.summary())
# +
# Fitting a first r x c model
# Create a RowByColumnEI object
ei_rbyc = RowByColumnEI(model_name='multinomial-dirichlet-modified', pareto_shape=100, pareto_scale=100)
# Fit the model
ei_rbyc.fit(group_fractions_rbyc,
votes_fractions_rbyc,
precinct_pops,
demographic_group_names=demographic_group_names_rbyc,
candidate_names=candidate_names_rbyc,
#precinct_names=precinct_names,
)
# Generate a simple report to summarize the results
print(ei_rbyc.summary())
# +
# Fitting Goodman's ER - precincts not weighted by population
# Create a GoodmansER object
goodmans_er = GoodmansER()
# Fit the model
goodmans_er.fit(
group_fraction_2by2,
votes_fraction_2by2,
demographic_group_name=demographic_group_name_2by2,
candidate_name=candidate_name_2by2
)
# Generate a simple report to summarize the results
print(goodmans_er.summary())
# Fitting Goodman's ER - precincts weighted by population
goodmans_er = GoodmansER(is_weighted_regression="True")
goodmans_er.fit(group_fraction_2by2,
votes_fraction_2by2,
precinct_pops, # Must include populations if weighting by population
demographic_group_name=demographic_group_name_2by2,
candidate_name=candidate_name_2by2
)
print(goodmans_er.summary())
# +
# Fitting Goodman's ER - Bayesian variant with priors over the intercepts with x=0 and x=1
bayes_goodman_ei = GoodmansERBayes("goodman_er_bayes", weighted_by_pop=True, sigma=1)
bayes_goodman_ei.fit(
group_fraction_2by2,
votes_fraction_2by2,
precinct_pops,
demographic_group_name=demographic_group_name_2by2,
candidate_name=candidate_name_2by2
)
# -
# # Plotting overview
#
# PyEI has a number of plots available. Here is a list of available plots.
#
# Plotting methods for any fitted EI model where inference involves sampling -- i.e. all approaches except the (non-Bayesian) Goodman's ER
#
# - Summary plots for distributions of polity-wide voter preferences
# - `plot`
# - `plot_kde` (2 by 2)
# - `plot_kdes` ($r$ by $c$)
# - `plot_boxplot` (2 by 2)
# - `plot_boxplots` ($r$ by $c$)
# - `plot_intervals` (2 by 2)
#
# - Plots of polarization
# - `plot_polarization_kde` (2 by 2)
# - `plot_polarization_kdes` ($r$ by $c$)
#
# For all approaches except Goodman's ER and the Bayesian Goodman's ER (which do not generate samples for each precinct)
#
# - Plots of **precinct-level** voter preferences
# - `precinct_level_plot`
# - `plot_intervals_by_precinct`
#
#
# `GoodmansER` objects also have a plot method
# - `plot`
#
# Additional plotting utilities: for tomography plotting and comparing precinct-level posterior means:
#
# - `plot_utils.tomography_plot`
# - `plot_precinct_scatterplot`
#
# Here we only display a few of these plots as examples. A more detailed look at the plotting and reporting functionality, see the Plotting and Reporting notebook.
ei_2by2.plot()
goodmans_er.plot()
ei_rbyc.plot_kdes(plot_by="candidate") # or, plot_by="group"
ei_2by2.plot_polarization_kde(percentile=95, show_threshold=True) #set show_threshold to false to just view the kde
ei_2by2.precinct_level_plot()
# # Reporting results overview
#
# See below for examples of how to access/report:
# - Polity-wide posterior means and credible intervals of voter preferences
# - Precinct-level posterior means and credible intervals of voter preferences
# - Information about polarization and candidate of choice
# **Polity-wide posterior means and credible intervals of voter preferences:**
# +
# EI objects have a summary method that gives a report
print(ei_2by2.summary())
# Can also use for r x c
#print(ei_rbyc.summary())
# Can also use for BayesGoodmansER
#print(bayes_goodman_ei.summary())
# Note: Goodman's ER also has a summary function, although it just gives point esimates
#print(goodmans_er.summary())
# -
posterior_mean_voting_prefs = ei_2by2.posterior_mean_voting_prefs
print(demographic_group_name_2by2, " support for ", candidate_name_2by2, posterior_mean_voting_prefs[0])
print("non-",demographic_group_name_2by2, " support for ", candidate_name_2by2, posterior_mean_voting_prefs[1])
credible_interval_95_mean_voting_prefs = ei_2by2.credible_interval_95_mean_voting_prefs
print(demographic_group_name_2by2, " support for ", candidate_name_2by2, credible_interval_95_mean_voting_prefs[0])
print("non-", demographic_group_name_2by2, " support for ", candidate_name_2by2, credible_interval_95_mean_voting_prefs[1])
# Means and intervals for BayesGoodmanER estimates
x_vals, means, lower_bounds, upper_bounds = bayes_goodman_ei.compute_credible_int_for_line()
# **Precinct-level posterior means and credible intervals**:
precinct_posterior_means, precinct_credible_intervals = ei_2by2.precinct_level_estimates()
#precinct_posterior_means has shape num_precincts x 2 (groups) x 2 (candidates)
print(precinct_posterior_means[5][0][0]) # Estimated (posterior mean) support for the candidate from the group in precinct 5
#precinct_credible_intervals has shape num_precincts x 2 (groups) x 2 (candidates) x 2 (endpoints))
print(precinct_credible_intervals[5][0][0]) # 95% credible interval of support for the candidate from the group in precint 5
# **Polarization and candidates of choice**
print(ei_2by2.polarization_report(percentile=95, reference_group=0, verbose=True))
print(ei_2by2.polarization_report(threshold=0.25, reference_group=0, verbose=True))
# set verbose=False to just show numerical value
ei_2by2.polarization_report(threshold=0.25, reference_group=0, verbose=False)
# For the r by c case, specify the groups you wish to compare and the candidate
print(ei_rbyc.polarization_report(percentile=90, groups=['ind', 'e_asian'], candidate='Kolstad', verbose=True))
# + tags=[]
ei_rbyc.candidate_of_choice_report(verbose=True) #pass a verbose=False argument to just output the dictionary
# -
# For each pair of groups, this function reports the fraction samples for which
# the `preferred candidate` of one group (as measured by: who is the candidate supported
# by the plurality within that group according to the sampled district-level support value)
# is different from the `preferred candidate` of the others group
ei_rbyc.candidate_of_choice_polarization_report() #pass a verbose=False argument to just output the relevant numerica values
# # Accessing samples
#
# Most of the models that PyEI supports (although not Goodman's regression) are Bayesian models, and inference proceeds by using MCMC methods to sample from the posterior distributions of interest. Samples are accessible so that you can work with them directly if desired.
#
# We can use the `sampled_voting_prefs` property of a fitted EI object to access the samples of district-level voter prefernences of each group for each candidate, which are calculated by weighting each set of precinct-level samples by the population and summing.
#
# We can also access samples from the posterior distribution by accessing them by name from the fitted ei object's `sim_trace` property.
# +
# Using `sampled_voting_prefs`
sampled_voting_prefs = ei_2by2.sampled_voting_prefs # ei.sampled_voting_prefs is samples of district-level voter preference: list of length 2
sampled_voting_prefs[0] #samples of district-wide support of specified group for specified candidate
sampled_voting_prefs[1] #samples of district-wide support of (complement of specified group) for specified candidate
sampled_voting_prefs[0].mean() #posterior mean
sampled_voting_prefs[1].mean() #posterior mean
# +
# By name of parameters, using the fitted ei object's `sim_trace` property.
# Shape is: num_samples x (dimensionality of parameter)
ei_2by2.sim_trace['b_1']
# Shape is: num_samples x (dimensionality of parameter)
ei_rbyc.sim_trace['kappa']
# -
# # Currently supported models - quick reference
#
# The names of currently supported models are as follows:
#
# - TwoByTwoEI
# * `king99`
# * `king99_pareto_modification`
# * `wakefield_beta`
# * `wakefield_normal` (caution: sampling difficulties)
# * `truncated_normal`
#
# - RowByColumnEI
# * `multinomial-dirichlet`
# * `multinomial-dirichlet-modified`
#
# - GoodmansER
# * `is_weighted_regression=False` (not weighted by precinct populations)
# * `is_weighted_regression=True` (weighted by precinct populations)
#
# - GoodmansERBayes
#
# Using pymc3's `model_to_graphviz` function is a nice way to visualize the Bayesian models. For example:
model = ei_2by2.sim_model
pm.model_to_graphviz(model)
model = ei_rbyc.sim_model
pm.model_to_graphviz(model)
# # Convergence issues and warnings
#
# There may be [convergence](https://mc-stan.org/docs/2_27/reference-manual/convergence.html) issues when fitting models to data. The presence of [divergences](https://mc-stan.org/docs/2_27/reference-manual/divergent-transitions.html) (especially a large number of divergences), a high value of [$\hat{R}$](https://mc-stan.org/docs/2_27/reference-manual/notation-for-samples-chains-and-draws.html) (a convergence diagnostic calculated by comparing multiple chains - more samples will generally lower it, and looking for a value below 1.1 is a good rule of thumb), and a low [effective sample size](https://mc-stan.org/docs/2_27/reference-manual/effective-sample-size-section.html) are three signs that there were computational difficulties during sampling that may have made the results inaccurate.
#
# `PyEI` relies on `PyMC3` for sampling, and thus benefits from the diagnostic checks that `PyMC3` automatically performs, and their associated warnings. (For some examples of how to compute and access convergence diagnostics with `PyEI` (using the [`ArviZ`](https://arviz-devs.github.io/arviz/) library), see `examples/model_eval_and_comparison_demo.ipynb`.) `PyEI` is thus intentionally vocal about computational difficulties that may be encountered. The warnings issued should be taken seriously, as they indicate that the samples generated may not be representative of the posterior distribution, in which case results derived from these samples should not be trusted.
#
# If convergence issues are encountered, there are several steps the user can take that may help mitigate; we mention several here and encourage the user to consult helpful resources in e.g. within the [`PyMC3`](https://pymc3.readthedocs.io/en/latest/notebooks/Diagnosing_biased_Inference_with_Divergences.html) and [`Stan`](https://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html) documentation.
#
# Increasing the optional `target_accept` argument to the fit method might help ("might" because the default value of 0.99 is already quite high, and the sorts of geometric problems that frustrate the sampler require an exponentially different scale). Increasing the number of tuning steps (by setting the `tune` argument of the fit method) is sometimes helpful by giving `PyMC3` more time to estimate a step size and covariance matrix, and increasing the total number of draws (with the `draws` argument of the fit method) will allow for a larger effective sample size (at the cost of more computation time).
#
# Another, more involved approach to take would be [reparametrizing](https://mc-stan.org/docs/2_27/stan-users-guide/reparameterization-section.html) the model, which changes the posterior geometry in ways that (hopefully) ease computational difficulties. For example, the `king99_pareto_modification` model implemented in `PyEI` is a reparametrization of the `king99` model that is in the literature. On the Santa Clara example dataset, the modified (reparameterized) model encounters fewer computational issues (see example below). However, `king99_pareto_modification` does not so far seem to outperform `king99` in all settings, so the user may wish to try both.
#
# Computational difficulties can be a symptom of poor choice of model for the data at hand, and so it is possible that computational issues indicate a fundamental unsuitability of the ecological inference model to the data at hand. It is the authors' hope that additional ecological inference approaches may be developed that avoid some of the computational difficulties that seem to occur in sampling from existing models.
# +
# Witting this particular EI model to the Santa Clara example data
# results in computational difficulties
ei_2by2orig = TwoByTwoEI(model_name="king99", lmbda=0.5)
# Fit the model
ei_2by2orig.fit(group_fraction_2by2,
votes_fraction_2by2,
precinct_pops,
demographic_group_name=demographic_group_name_2by2,
candidate_name=candidate_name_2by2,
precinct_names=precinct_names,
draws=1200, # optional
tune=3000, # optional
target_accept=.99# optional
)
# Generate a simple report to summarize the results
print(ei_2by2orig.summary())
# +
# Using this reparametrized version of the model helps mitigate the problems,
# although some difficulties persist
# Create a TwobyTwoEI object
ei_2by2 = TwoByTwoEI(model_name="king99_pareto_modification", pareto_scale=15, pareto_shape=2)
# Fit the model
ei_2by2.fit(group_fraction_2by2,
votes_fraction_2by2,
precinct_pops,
demographic_group_name=demographic_group_name_2by2,
candidate_name=candidate_name_2by2,
precinct_names=precinct_names,
draws=1200, # optional
tune=3000, # optional
target_accept=.99 # optional
)
# Generate a simple report to summarize the results
print(ei_2by2.summary())
# -
| pyei/intro_notebooks/PyEI_overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="zxWsQffj1ysi"
# # Advanced Feature Engineering in BQML
#
# **Learning Objectives**
#
# 1. Evaluate the model
# 2. Extract temporal features, feature cross temporal features
# 3. Apply `ML.FEATURE_CROSS` to categorical features
# 4. Create a Euclidian feature column, feature cross coordinate features
# 5. Apply the `BUCKETIZE` function, `TRANSFORM` clause, L2 Regularization
#
#
# ## Overview
# In this lab, we utilize feature engineering to improve the prediction of the fare amount for a taxi ride in New York City. We will use BigQuery ML to build a taxifare prediction model, using feature engineering to improve and create a final model. By continuing the utilization of feature engineering to improve the prediction of the fare amount for a taxi ride in New York City by reducing the RMSE.
#
# In this Notebook, we perform a feature cross using BigQuery's `ML.FEATURE_CROSS`, derive coordinate features, feature cross coordinate features, clean up the code, apply the `BUCKETIZE` function, the `TRANSFORM` clause, L2 Regularization, and evaluate model performance throughout the process.
# -
# !echo "Your current GCP Project Name is: "$(gcloud config list project --format "value(core.project)")
# + colab={} colab_type="code" id="F78xGb8xgKkd" outputId="4e1b0020-4fe7-4594-824c-6246a0e7edae" language="bash"
# export PROJECT_ID=#[your-gcp-project-id]
# + [markdown] colab_type="text" id="L0-vOB4y2BJM"
# ## The source dataset
#
# Our dataset is hosted in [BigQuery](https://cloud.google.com/bigquery/). The taxi fare data is a publically available dataset, meaning anyone with a GCP account has access. Click [here](https://console.cloud.google.com/bigquery?project=bigquery-public-data&p=nyc-tlc&d=yellow&t=trips&page=table) to access the dataset.
#
# The Taxi Fare dataset is relatively large at 55 million training rows, but simple to understand, with only six features. The fare_amount is the target, the continuous value we’ll train a model to predict.
#
# + [markdown] colab_type="text" id="X8HpUXTAgKkh"
# ## Create a BigQuery Dataset
#
# A BigQuery dataset is a container for tables, views, and models built with BigQuery ML. Let's create one called __feat_eng__ if we have not already done so in an earlier lab. We'll do the same for a GCS bucket for our project too.
# + colab={} colab_type="code" id="i_PC0J4BgKkh" outputId="6b794a09-d144-405f-8178-25ada7825681" language="bash"
#
# # Create a BigQuery dataset for feat_eng if it doesn't exist
# datasetexists=$(bq ls -d | grep -w feat_eng)
#
# if [ -n "$datasetexists" ]; then
# echo -e "BigQuery dataset already exists, let's not recreate it."
#
# else
# echo "Creating BigQuery dataset titled: feat_eng"
#
# bq --location=US mk --dataset \
# --description 'Taxi Fare' \
# $PROJECT_ID:feat_eng
# echo "\nHere are your current datasets:"
# bq ls
# fi
# + [markdown] colab_type="text" id="b2TuS1s9vREL"
# ## Create the training data table
#
# Since there is already a publicly available dataset, we can simply create the training data table using this raw input data. Note the WHERE clause in the below query: This clause allows us to TRAIN a portion of the data (e.g. one hundred thousand rows versus one million rows), which keeps your query costs down. If you need a refresher on using MOD() for repeatable splits see this [post](https://www.oreilly.com/learning/repeatable-sampling-of-data-sets-in-bigquery-for-machine-learning).
#
# * Note: The dataset in the create table code below is the one created previously, e.g. `feat_eng`. The table name is `feateng_training_data`. Run the query to create the table.
# + colab={} colab_type="code" id="CMNRractvREL" outputId="d2a2084b-ec56-48a5-8522-409cc32ea75e"
# %%bigquery
CREATE OR REPLACE TABLE
feat_eng.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
passenger_count*1.0 AS passengers,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 10000) = 1
AND fare_amount >= 2.5
AND passenger_count > 0
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
# + [markdown] colab_type="text" id="clnaaqQsXkwC"
# ## Verify table creation
#
# Verify that you created the dataset.
#
# + colab={} colab_type="code" id="gjNcSsaDgKkm" outputId="d0ef8389-89e3-48b7-9576-5c3104370172"
# %%bigquery
# LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT
*
FROM
feat_eng.feateng_training_data
LIMIT
0
# + [markdown] colab_type="text" id="RhgXan8wvREN"
# ### Baseline Model: Create the baseline model
#
# Next, you create a linear regression baseline model with no feature engineering. Recall that a model in BigQuery ML represents what an ML system has learned from the training data. A baseline model is a solution to a problem without applying any machine learning techniques.
#
# When creating a BQML model, you must specify the model type (in our case linear regression) and the input label (fare_amount). Note also that we are using the training data table as the data source.
# + [markdown] colab_type="text" id="kb_5NlfU7oyT"
# Now we create the SQL statement to create the baseline model.
# + colab={} colab_type="code" id="ixxd1ugPgKkr" outputId="ae2bb206-aaa3-42ce-83ef-2f22e059a81c"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.baseline_model OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
# + [markdown] colab_type="text" id="Tq2KYJOM9ULC"
#
# Note, the query takes several minutes to complete. After the first iteration is complete, your model (baseline_model) appears in the navigation panel of the BigQuery web UI. Because the query uses a CREATE MODEL statement to create a model, you do not see query results.
#
# You can observe the model as it's being trained by viewing the Model stats tab in the BigQuery web UI. As soon as the first iteration completes, the tab is updated. The stats continue to update as each iteration completes.
# + [markdown] colab_type="text" id="HO5d50Eic-X1"
# Once the training is done, visit the [BigQuery Cloud Console](https://console.cloud.google.com/bigquery) and look at the model that has been trained. Then, come back to this notebook.
# + [markdown] colab_type="text" id="RSgIJqN6vREV"
# ### Evaluate the baseline model
# Note that BigQuery automatically split the data we gave it, and trained on only a part of the data and used the rest for evaluation. After creating your model, you evaluate the performance of the regressor using the `ML.EVALUATE` function. The `ML.EVALUATE` function evaluates the predicted values against the actual data.
#
# NOTE: The results are also displayed in the [BigQuery Cloud Console](https://console.cloud.google.com/bigquery) under the **Evaluation** tab.
# + [markdown] colab_type="text" id="Ofrjof9ngKku"
# Review the learning and eval statistics for the baseline_model.
# + colab={} colab_type="code" id="pKw8SkXjgKku" outputId="5be55f39-f3df-4e7c-aa39-12a681bd8d32"
# %%bigquery
# Eval statistics on the held out data.
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.baseline_model)
# -
# **Exercise.** Evaluate the model
# + colab={} colab_type="code" id="v0-yRVp2gKkw" outputId="72a3067c-5ce1-43c4-d44f-506837cf03f7"
# %%bigquery
# TODO: Your code goes here
# + [markdown] colab_type="text" id="xJGbfYuD8a9d"
# **NOTE:** Because you performed a linear regression, the results include the following columns:
#
# * mean_absolute_error
# * mean_squared_error
# * mean_squared_log_error
# * median_absolute_error
# * r2_score
# * explained_variance
#
# **Resource** for an explanation of the [Regression Metrics](https://towardsdatascience.com/metrics-to-evaluate-your-machine-learning-algorithm-f10ba6e38234).
#
# **Mean squared error** (MSE) - Measures the difference between the values our model predicted using the test set and the actual values. You can also think of it as the distance between your regression (best fit) line and the predicted values.
#
# **Root mean squared error** (RMSE) - The primary evaluation metric for this ML problem is the root mean-squared error. RMSE measures the difference between the predictions of a model, and the observed values. A large RMSE is equivalent to a large average error, so smaller values of RMSE are better. One nice property of RMSE is that the error is given in the units being measured, so you can tell very directly how incorrect the model might be on unseen data.
#
# **R2**: An important metric in the evaluation results is the R2 score. The R2 score is a statistical measure that determines if the linear regression predictions approximate the actual data. Zero (0) indicates that the model explains none of the variability of the response data around the mean. One (1) indicates that the model explains all the variability of the response data around the mean.
# + [markdown] colab_type="text" id="p_21sAIR7LZw"
# Next, we write a SQL query to take the SQRT() of the mean squared error as your loss metric for evaluation for the benchmark_model.
# + cellView="form" colab={} colab_type="code" id="8mAXRTvbvRES" outputId="65784ff8-522b-4c43-f7c6-379fe7c901a8"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.baseline_model)
# + [markdown] colab_type="text" id="nW6fnqAW8vyI"
# #### Model 1: EXTRACT dayofweek from the pickup_datetime feature.
#
# * As you recall, dayofweek is an enum representing the 7 days of the week. This factory allows the enum to be obtained from the int value. The int value follows the ISO-8601 standard, from 1 (Monday) to 7 (Sunday).
#
# * If you were to extract the dayofweek from pickup_datetime using BigQuery SQL, the dataype returned would be integer.
# + [markdown] colab_type="text" id="tStXdJYhgKk1"
# **Exercise.** We will create a model titled "model_1" from the benchmark model and extract out the DayofWeek.
# + cellView="both" colab={} colab_type="code" id="ZQ0kT2jN-vpm" outputId="599e3b87-c772-4474-c959-2fe80de33c64"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_1 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
# TODO: Your code goes here
# + [markdown] colab_type="text" id="T24XjIJgdLCH"
# Once the training is done, visit the [BigQuery Cloud Console](https://console.cloud.google.com/bigquery) and look at the model that has been trained. Then, come back to this notebook.
# + [markdown] colab_type="text" id="SRLxpccX_Tin"
# Next, two distinct SQL statements show the TRAINING and EVALUATION metrics of model_1.
# + colab={} colab_type="code" id="XfN4oxOigKk4" outputId="300723d5-18d8-44f4-a600-ff2ffd91eaeb"
# %%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.model_1)
# + colab={} colab_type="code" id="jvke4qzIgKk8" outputId="30a07042-034e-4bbb-fc02-e87306a13a0d"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
# + [markdown] colab_type="text" id="wf-9FBmL_Ti_"
# Here we run a SQL query to take the SQRT() of the mean squared error as your loss metric for evaluation for the benchmark_model.
# + cellView="both" colab={} colab_type="code" id="CsVBzNef_TjC" outputId="74f91e84-494f-4699-fa99-c0b165fae2b2"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
# + [markdown] colab_type="text" id="Lw30UexH8v9P"
# ### Model 2: EXTRACT hourofday from the pickup_datetime feature
#
# As you recall, **pickup_datetime** is stored as a TIMESTAMP, where the Timestamp format is retrieved in the standard output format – year-month-day hour:minute:second (e.g. 2016-01-01 23:59:59). Hourofday returns the integer number representing the hour number of the given date.
#
# Hourofday is best thought of as a discrete ordinal variable (and not a categorical feature), as the hours can be ranked (e.g. there is a natural ordering of the values). Hourofday has an added characteristic of being cyclic, since 12am follows 11pm and precedes 1am.
# + [markdown] colab_type="text" id="zXeGHeWogKlC"
# Next, we create a model titled "model_2" and EXTRACT the hourofday from the pickup_datetime feature to improve our model's rmse.
# + cellView="both" colab={} colab_type="code" id="FHeqcYz-B9F1" outputId="f682ba71-66d8-4550-a2e8-3260dc04354b"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_2 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS dayofweek,
EXTRACT(HOUR
FROM
pickup_datetime) AS hourofday,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
# + cellView="both" colab={} colab_type="code" id="h2yjF6uGCiZh" outputId="a53802ff-821a-427b-e358-1fa4e916576a"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
# + cellView="both" colab={} colab_type="code" id="bhfabG8XCiZm" outputId="75c94b12-7d46-4324-8aa7-5774aa15544d"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
# + [markdown] colab_type="text" id="vbOSxv6BDqB-"
# ### Model 3: Feature cross dayofweek and hourofday using CONCAT
#
# First, let’s allow the model to learn traffic patterns by creating a new feature that combines the time of day and day of week (this is called a [feature cross](https://developers.google.com/machine-learning/crash-course/feature-crosses/video-lecture).
#
# Note: BQML by default assumes that numbers are numeric features, and strings are categorical features. We need to convert both the dayofweek and hourofday features to strings because the model (Neural Network) will automatically treat any integer as a numerical value rather than a categorical value. Thus, if not cast as a string, the dayofweek feature will be interpreted as numeric values (e.g. 1,2,3,4,5,6,7) and hour ofday will also be interpreted as numeric values (e.g. the day begins at midnight, 00:00, and the last minute of the day begins at 23:59 and ends at 24:00). As such, there is no way to distinguish the "feature cross" of hourofday and dayofweek "numerically". Casting the dayofweek and hourofday as strings ensures that each element will be treated like a label and will get its own coefficient associated with it.
# + [markdown] colab_type="text" id="cNVO4nr6gKlK"
# **Exercise.** Create the SQL statement to feature cross the dayofweek and hourofday using the CONCAT function. Name the model "model_3"
# + cellView="both" colab={} colab_type="code" id="F7l02C9KFMy7" outputId="9d43f216-c138-4c0d-cf7b-4d938ba33d78"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_3 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
# TODO: Your code goes here
# + colab={} colab_type="code" id="Glzorg3YgKlM" outputId="3bd57903-74da-46d7-ff26-4348b302053e"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
# + colab={} colab_type="code" id="6GtPp2QugKlO" outputId="fc5c0942-4da7-4ab7-9fcd-fd4b6ce82917"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
# + [markdown] colab_type="text" id="FbSRbuJ-fYtK"
# ### Model 4: Apply the ML.FEATURE_CROSS clause to categorical features
#
# BigQuery ML now has ML.FEATURE_CROSS, a pre-processing clause that performs a feature cross.
#
# * ML.FEATURE_CROSS generates a [STRUCT](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#struct-type) feature with all combinations of crossed categorical features, except for 1-degree items (the original features) and self-crossing items.
#
# * Syntax: ML.FEATURE_CROSS(STRUCT(features), degree)
#
# * The feature parameter is a categorical features separated by comma to be crossed. The maximum number of input features is 10. An unnamed feature is not allowed in features. Duplicates are not allowed in features.
#
# * Degree(optional): The highest degree of all combinations. Degree should be in the range of [1, 4]. Default to 2.
#
# Output: The function outputs a STRUCT of all combinations except for 1-degree items (the original features) and self-crossing items, with field names as concatenation of original feature names and values as the concatenation of the column string values.
#
# + [markdown] colab_type="text" id="1zZI0X6s1ysl"
# Examine the components of ML.Feature_Cross
# + colab={} colab_type="code" id="hfKmfGqw1ysq" outputId="adb978c5-90cb-4ee2-eaad-ad8060276d16"
# %%bigquery
CREATE OR REPLACE MODEL feat_eng.model_4
OPTIONS
(model_type='linear_reg',
input_label_cols=['fare_amount'])
AS
SELECT
fare_amount,
passengers,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM `feat_eng.feateng_training_data`
# + [markdown] colab_type="text" id="G6tpoYhcIgs4"
# Next, two distinct SQL statements show the TRAINING and EVALUATION metrics of model_1.
# + colab={} colab_type="code" id="NZudI67BgKlU" outputId="2dc86a64-8879-4ee3-d1ce-55f3c698371b"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_4)
# + colab={} colab_type="code" id="AZPAQozDgKlW" outputId="5f7f8fe4-f3a9-4ca1-ed9f-af5b7bfabdb6"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_4)
# + [markdown] colab_type="text" id="ymzRp28Q1ys1"
# ### Sliding down the slope toward a loss minimum (reduced taxi fare)!
# * Our fourth model above gives us an RMSE of 9.65 for estimating fares. Recall our heuristic benchmark was 8.29. This may be the result of feature crossing. Let's apply more feature engineering techniques to see if we can't get this loss metric lower!
# + [markdown] colab_type="text" id="2NAPkAlEEg6C"
# ### Model 5: Feature cross coordinate features to create a Euclidean feature
#
#
# Pickup coordinate:
# * pickup_longitude AS pickuplon
# * pickup_latitude AS pickuplat
#
# Dropoff coordinate:
# * dropoff_longitude AS dropofflon
# * dropoff_latitude AS dropofflat
#
# **Coordinate Features**:
# * The pick-up and drop-off longitude and latitude data are crucial to predicting the fare amount as fare amounts in NYC taxis are largely determined by the distance traveled. As such, we need to teach the model the Euclidean distance between the pick-up and drop-off points.
#
# * Recall that latitude and longitude allows us to specify any location on Earth using a set of coordinates. In our training data set, we restricted our data points to only pickups and drop offs within NYC. New York city has an approximate longitude range of -74.05 to -73.75 and a latitude range of 40.63 to 40.85.
#
# * The dataset contains information regarding the pickup and drop off coordinates. However, there is no information regarding the distance between the pickup and drop off points. Therefore, we create a new feature that calculates the distance between each pair of pickup and drop off points. We can do this using the Euclidean Distance, which is the straight-line distance between any two coordinate points.
#
# * We need to convert those coordinates into a single column of a spatial data type. We will use the ST_DISTANCE and the ST_GEOGPOINT functions.
#
# * ST_DISTANCE: ST_DISTANCE(geography_1, geography_2). Returns the shortest distance in meters between two non-empty GEOGRAPHYs (e.g. between two spatial objects).
#
# * ST_GEOGPOINT: ST_GEOGPOINT(longitude, latitude). Creates a GEOGRAPHY with a single point. ST_GEOGPOINT creates a point from the specified FLOAT64 longitude and latitude parameters and returns that point in a GEOGRAPHY value.
#
# + [markdown] colab_type="text" id="kCYniJnejNz0"
# Next we convert the feature coordinates into a single column of a spatial data type. Use the The ST_Distance and the ST.GeogPoint functions.
#
# SAMPLE CODE:
# ST_Distance(ST_GeogPoint(value1,value2), ST_GeogPoint(value3, value4)) AS euclidean
#
#
#
# -
# **Exercise.** Create a Euclidean feature
# + cellView="both" colab={} colab_type="code" id="P8mFocaKj9oA" outputId="77cb597a-b130-4b8a-a253-a2d72dc7da2e"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_5 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
# TODO: Your code goes here
# + [markdown] colab_type="text" id="uy7eK6iXlYPu"
# Next, two distinct SQL statements show metrics for model_5.
# + cellView="both" colab={} colab_type="code" id="9atctYyGlYP7" outputId="aa2e5513-4df8-4c0c-d08b-63690b567ecb"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_5)
# + cellView="both" colab={} colab_type="code" id="Lk42mvjzlYQE" outputId="e77d8fcc-ef55-40e5-aac1-5db287e0c4ea"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_5)
# + [markdown] colab_type="text" id="uUoQmADkmlnV"
# ### Model 6: Feature cross pick-up and drop-off locations features
#
# In this section, we feature cross the pick-up and drop-off locations so that the model can learn pick-up-drop-off pairs that will require tolls.
#
# This step takes the geographic point corresponding to the pickup point and grids to a 0.1-degree-latitude/longitude grid (approximately 8km x 11km in New York—we should experiment with finer resolution grids as well). Then, it concatenates the pickup and dropoff grid points to learn “corrections” beyond the Euclidean distance associated with pairs of pickup and dropoff locations.
#
# Because the lat and lon by themselves don't have meaning, but only in conjunction, it may be useful to treat the fields as a pair instead of just using them as numeric values. However, lat and lon are continuous numbers, so we have to discretize them first. That's what SnapToGrid does.
#
#
# * ST_SNAPTOGRID: ST_SNAPTOGRID(geography_expression, grid_size). Returns the input GEOGRAPHY, where each vertex has been snapped to a longitude/latitude grid. The grid size is determined by the grid_size parameter which is given in degrees.
#
# **REMINDER**: The ST_GEOGPOINT creates a GEOGRAPHY with a single point. ST_GEOGPOINT creates a point from the specified FLOAT64 longitude and latitude parameters and returns that point in a GEOGRAPHY value. The ST_Distance function returns the minimum distance between two spatial objectsa. It also returns meters for geographies and SRID units for geometrics.
# + [markdown] colab_type="text" id="B9viHV3l1ytF"
# The following SQL statement is incorrect. Modify the code to feature cross the pick-up and drop-off locations features.
# + cellView="both" colab={} colab_type="code" id="7VjZawfZpQ7Y" outputId="10772712-3259-4f42-da8a-921a33c1e2bd"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_6 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
CONCAT(ST_AsText(ST_SnapToGrid(ST_GeogPoint(pickuplon,
pickuplat),
0.01)), ST_AsText(ST_SnapToGrid(ST_GeogPoint(dropofflon,
dropofflat),
0.01))) AS pickup_and_dropoff
FROM
`feat_eng.feateng_training_data`
# + [markdown] colab_type="text" id="zvOfj_k1qijv"
# Next, we evaluate model_6.
# + cellView="both" colab={} colab_type="code" id="G8rM09jLqij4" outputId="b24aa617-c46c-4cb9-f943-2e5a2d920945"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_6)
# + cellView="both" colab={} colab_type="code" id="CQUfOjPlqij8" outputId="b72347ac-3de0-49d4-b385-2955edf195ab"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_6)
# + [markdown] colab_type="text" id="Y4RL3f9K1ytN"
# We should now have a total of five input features for our model.
# 1. fare_amount
# 2. passengers
# 3. day_hr
# 4. euclidean
# 5. pickup_and_dropoff
# + colab={} colab_type="code" id="-UxZXY18rWG8" outputId="1901b6e0-78ea-4dbb-80a7-ae2da365b509"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_6 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
CONCAT(ST_AsText(ST_SnapToGrid(ST_GeogPoint(pickuplon,
pickuplat),
0.01)), ST_AsText(ST_SnapToGrid(ST_GeogPoint(dropofflon,
dropofflat),
0.01))) AS pickup_and_dropoff
FROM
`feat_eng.feateng_training_data`
# + [markdown] colab_type="text" id="vSWHC_9Z1ytS"
# ## BQML's Pre-processing functions:
#
# Here are some of the preprocessing functions in BigQuery ML:
# * ML.FEATURE_CROSS(STRUCT(features)) does a feature cross of all the combinations
# * ML.POLYNOMIAL_EXPAND(STRUCT(features), degree) creates x, x<sup>2</sup>, x<sup>3</sup>, etc.
# * ML.BUCKETIZE(f, split_points) where split_points is an array
# + [markdown] colab_type="text" id="ENp6mUvB1ytT"
# ### Model 7: Apply the BUCKETIZE Function
#
#
# ##### BUCKETIZE
# Bucketize is a pre-processing function that creates "buckets" (e.g bins) - e.g. it bucketizes a continuous numerical feature into a string feature with bucket names as the value.
#
# * ML.BUCKETIZE(feature, split_points)
#
# * feature: A numerical column.
#
# * split_points: Array of numerical points to split the continuous values in feature into buckets. With n split points (s1, s2 … sn), there will be n+1 buckets generated.
#
# * Output: The function outputs a STRING for each row, which is the bucket name. bucket_name is in the format of bin_<bucket_number>, where bucket_number starts from 1.
#
# * Currently, our model uses the ST_GeogPoint function to derive the pickup and dropoff feature. In this lab, we use the BUCKETIZE function to create the pickup and dropoff feature.
# + [markdown] colab_type="text" id="B-VzlYCG1ytU"
# Next, apply the BUCKETIZE function to model_7 and run the query.
# + colab={} colab_type="code" id="bE3ZRGV21yta" outputId="83952ef3-62a2-4719-e647-5e0285424082"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_7 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
ST_Distance(ST_GeogPoint(pickuplon,
pickuplat),
ST_GeogPoint(dropofflon,
dropofflat)) AS euclidean,
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
CONCAT( ML.BUCKETIZE(pickuplon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(pickuplat,
GENERATE_ARRAY(37, 45, 0.01)), ML.BUCKETIZE(dropofflon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(dropofflat,
GENERATE_ARRAY(37, 45, 0.01)) ) AS pickup_and_dropoff
FROM
`feat_eng.feateng_training_data`
# + [markdown] colab_type="text" id="AVPXGKZ374v7"
# Next, we evaluate model_7.
# + colab={} colab_type="code" id="yRZc6S101ytc" outputId="237ba0f9-c2f7-4351-b2ce-879b9b301c3c"
# %%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.model_7)
# + colab={} colab_type="code" id="-X1oUFpm1yte" outputId="ef90ec44-eb0f-4201-fbb5-9e29a2c240d3"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_7)
# + colab={} colab_type="code" id="PXpHTWhv1ytg" outputId="f15ca9f6-b5aa-4084-ce9f-3b7579f7c7eb"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_7)
# + [markdown] colab_type="text" id="z8PojyIe1ytk"
# ### Final Model: Apply the TRANSFORM clause and L2 Regularization
#
# Before we perform our prediction, we should encapsulate the entire feature set in a `TRANSFORM` clause. BigQuery ML now supports defining data transformations during model creation, which will be automatically applied during prediction and evaluation. This is done through the TRANSFORM clause in the existing CREATE MODEL statement. By using the TRANSFORM clause, user specified transforms during training will be automatically applied during model serving (prediction, evaluation, etc.)
#
# In our case, we are using the `TRANSFORM` clause to separate out the raw input data from the `TRANSFORMED` features. The input columns of the `TRANSFORM` clause is the query_expr (AS SELECT part). The output columns of `TRANSFORM` from select_list are used in training. These transformed columns are post-processed with standardization for numerics and one-hot encoding for categorical variables by default.
#
# The advantage of encapsulating features in the `TRANSFORM` clause is the client code doing the `PREDICT` doesn't change, e.g. our model improvement is transparent to client code. Note that the `TRANSFORM` clause MUST be placed after the `CREATE` statement.
#
# ##### [L2 Regularization](https://developers.google.com/machine-learning/glossary/#L2_regularization)
# Sometimes, the training RMSE is quite reasonable, but the evaluation RMSE illustrate more error. Given the severity of the delta between the EVALUATION RMSE and the TRAINING RMSE, it may be an indication of overfitting. When we do feature crosses, we run into the risk of overfitting (for example, when a particular day-hour combo doesn't have enough taxi rides).
#
# Overfitting is a phenomenon that occurs when a machine learning or statistics model is tailored to a particular dataset and is unable to generalize to other datasets. This usually happens in complex models, like deep neural networks. Regularization is a process of introducing additional information in order to prevent overfitting.
#
# Therefore, we will apply L2 Regularization to the final model. As a reminder, a regression model that uses the L1 regularization technique is called Lasso Regression while a regression model that uses the L2 Regularization technique is called Ridge Regression. The key difference between these two is the penalty term. Lasso shrinks the less important feature’s coefficient to zero, thus removing some features altogether. Ridge regression adds “squared magnitude” of coefficient as a penalty term to the loss function.
#
# In other words, L1 limits the size of the coefficients. L1 can yield sparse models (i.e. models with few coefficients); Some coefficients can become zero and eliminated.
#
# L2 regularization adds an L2 penalty equal to the square of the magnitude of coefficients. L2 will not yield sparse models and all coefficients are shrunk by the same factor (none are eliminated).
#
# The regularization terms are ‘constraints’ by which an optimization algorithm must ‘adhere to’ when minimizing the loss function, apart from having to minimize the error between the true y and the predicted ŷ. This in turn reduces model complexity, making our model simpler. A simpler model can reduce the chances of overfitting.
# + [markdown] colab_type="text" id="YZuZPtES1ytl"
# **Exercise.** Apply the `TRANSFORM` clause and L2 Regularization to the final model.
# + colab={} colab_type="code" id="rKf_4I771ytn" outputId="c3ed43f9-59de-44e0-b4bb-a35e3e34874e"
# %%bigquery
CREATE OR REPLACE MODEL
feat_eng.final_model
# TODO: Your code goes here
ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING) AS dayofweek,
CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING) AS hourofday)) AS day_hr,
CONCAT( ML.BUCKETIZE(pickuplon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(pickuplat,
GENERATE_ARRAY(37, 45, 0.01)), ML.BUCKETIZE(dropofflon,
GENERATE_ARRAY(-78, -70, 0.01)), ML.BUCKETIZE(dropofflat,
GENERATE_ARRAY(37, 45, 0.01)) ) AS pickup_and_dropoff ) OPTIONS(input_label_cols=['fare_amount'],
model_type='linear_reg',
l2_reg=0.1) AS
SELECT
*
FROM
feat_eng.feateng_training_data
# + [markdown] colab_type="text" id="3Sb4U38_1yto"
# Next, we evaluate the final model.
# + colab={} colab_type="code" id="RqUNost_1ytq" outputId="998edad7-c023-4afb-a820-633a5ed30124"
# %%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.final_model)
# + colab={} colab_type="code" id="bdQnFs8q1yts" outputId="ba9df792-bb74-49d1-a284-7efdcc8b6ba7"
# %%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.final_model)
# + colab={} colab_type="code" id="MhaD7-rI1ytu" outputId="ef5c5b2c-2451-40eb-ee96-81b48de1542f"
# %%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.final_model)
# + [markdown] colab_type="text" id="g5cnCLq72Uu8"
# ### Predictive Model
#
#
# Now that you have evaluated your model, the next step is to use it to predict an outcome. You use your model to predict the taxifare amount.
# The `ML.PREDICT` function is used to predict results using your model: `feat_eng.final_model`.
#
# Since this is a regression model (predicting a continuous numerical value), the best way to see how it performed is to evaluate the difference between the value predicted by the model and the benchmark score. We can do this with an `ML.PREDICT` query.
# + [markdown] colab_type="text" id="wt97FSfvgKmJ"
# Now, apply the `ML.PREDICT` function.
# + colab={} colab_type="code" id="-ohaHCFW204X" outputId="b865a56b-af5b-4144-f3f1-75455f2215c8"
# %%bigquery
SELECT
*
FROM
ML.PREDICT(MODEL feat_eng.final_model,
(
SELECT
-73.982683 AS pickuplon,
40.742104 AS pickuplat,
-73.983766 AS dropofflon,
40.755174 AS dropofflat,
3.0 AS passengers,
TIMESTAMP('2019-06-03 04:21:29.769443 UTC') AS pickup_datetime ))
# -
# ### Lab Summary
# Our ML problem: Develop a model to predict taxi fare based on distance -- from one point to another in New York City.
#
# Create a RMSE summary table:
# + [markdown] colab_type="text" id="m8qPoEd7gKmM"
# | Model | Taxi Fare | Description |
# |-------------|-----------|---------------------------------------|
# | model_4 | 9.65 | --Feature cross categorical features |
# | model_5 | 5.58 | --Create a Euclidian feature column |
# | model_6 | 5.90 | --Feature cross Geo-location features |
# | model_7 | 6.23 | --Apply the TRANSFORM Clause |
# | final_model | 5.39 | --Apply L2 Regularization |
# + [markdown] colab_type="text" id="nAsY468W4BuK"
# Execute the cell below to visualize a RMSE bar chart.
# + colab={} colab_type="code" id="kRNfpJvW3V2n" outputId="3de43107-1b50-4e32-c5aa-79e5fa030557"
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('ggplot')
x = ['m4', 'm5', 'm6','m7', 'final']
RMSE = [9.65,5.58,5.90,6.23,5.39]
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos, RMSE, color='green')
plt.xlabel("Model")
plt.ylabel("RMSE")
plt.title("RMSE Model Summary")
plt.xticks(x_pos, x)
plt.show()
plt.show()
# + [markdown] colab_type="text" id="YfDZh0QZgKmQ"
# Copyright 2021 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| notebooks/feature_engineering/labs/2_bqml_adv_feat_eng.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Absenteeism - Applying Machine Learning
# #### Importing the relevant libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import seaborn as sns
# We can override the default matplotlib styles with those of Seaborn
sns.set()## Importing the relevant libraries
# +
# Load the data from a .csv
data_preprocessed = pd.read_csv(
os.path.join(os.path.pardir,'data','processed','Absenteeism_data_preprocessed.csv'), index_col=0)
data_preprocessed.head()
# -
df = data_preprocessed.copy()
# #### Create targets
# - For this case we are going to classify the workers into two categories, `Moderately absent` and `Excessively absent`.
# - We will use `median` as the basis.
# - Our task will be to predict whether we will obtain a 0 (Moderately absent) or a 1 (Excessively absent).
# Get the median
df['Absenteeism Time in Hours'].median()
# +
# Moderately absent: <=3
# Excessively absent >=4
# +
# create targets for our logistic regression
# they have to be categories and we must find a way to say if someone is 'being absent too much' or not
# what we've decided to do is to take the median of the dataset as a cut-off line
# in this way the dataset will be balanced (there will be roughly equal number of 0s and 1s for the logistic regression)
# as balancing is a great problem for ML, this will work great for us
# alternatively, if we had more data, we could have found other ways to deal with the issue
# for instance, we could have assigned some arbitrary value as a cut-off line, instead of the median
# note that what line does is to assign 1 to anyone who has been absent 4 hours or more (more than 3 hours)
# that is the equivalent of taking half a day off
# initial code from the lecture
# targets = np.where(data_preprocessed['Absenteeism Time in Hours'] > 3, 1, 0)
# parameterized code
targets = np.where(df['Absenteeism Time in Hours']>
df['Absenteeism Time in Hours'].median(), 1, 0)
# -
targets
df['Excessive Absenteeism'] = targets
df
# **Note: About our target**
# ***
# using the `median` as a cutoff line is numerically stable and rigid.
# That's because by using the median we have implicitly balanced the dataset roughly half of the targets are zeros while the other half ones. This will prevent our model from learning to output one of the two classes exclusively.
# Thinking it did very well
# Get the ratio of Excessive Absenteeism
targets.sum()/targets.shape[0]
# As we can see around 46% of the targets are 1s
# Drop the Absenteeism Time in Hours feature
data_with_targets = df.drop(['Absenteeism Time in Hours', 'Distance to Work',
'Daily Work Load Average', 'Day of week'], axis=1)
data_with_targets
data_with_targets is df
1 is 2
# #### Select the inputs for the regresion
data_with_targets.shape
# The inputs will be all features except `Excessive Absenteeism`
# data_with_targets.iloc[:,:14]
data_with_targets.iloc[:,:-1]
unscalled_inputs = data_with_targets.iloc[:,:-1]
unscalled_inputs
# #### Standardize the inputs
# +
# Import the relevant libraries
# from sklearn.preprocessing import StandardScaler
# +
# absenteeism_scalar = StandardScaler()
# -
# ## NOTE:
# ***
# Since the dummy variables are either in 0s or 1s, we do not have to include them when standardizing. Hence we have to create a custom scalar
#
# The idea is that this is a custom scalar based on the `StandardScalar` from sklearn.
#
# However when we declare the scalar object there's an extra argument, `columns to scale`.
#
# So our custom scalar will not standardise all inputs but only the ones we choose.
#
# In this way we will be able to preserve the dummys untouched.
#
# In practice we would avoid this step by standardizing prior to creating the dummies but we didn't do
# +
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
class CustomScalar(BaseEstimator, TransformerMixin):
def __init__(self, columns, copy=True, with_mean=True, with_std=True):
self.scalar = StandardScaler(copy, with_mean, with_std)
self.columns = columns
self.mean_ = None
self.var_ = None
def fit(self, X, y=None):
self.scalar.fit(X[self.columns], y)
self.mean_ = np.mean(X[self.columns])
self.var_ = np.var(X[self.columns])
return self
def transform(self, X, y=None, copy=None):
init_col_order = X.columns
X_scaled = pd.DataFrame(self.scalar.transform(X[self.columns]), columns=self.columns)
X_not_scaled = X.loc[:, ~X.columns.isin(self.columns)]
return pd.concat([X_not_scaled, X_scaled], axis=1)[init_col_order]
# -
unscalled_inputs
unscalled_inputs.columns.values
# +
columns_to_ommit = ['Reason_1', 'Reason_2', 'Reason_3', 'Reason_4']
columns_to_scale = [column for column in unscalled_inputs.columns.values if column not in columns_to_ommit]
# -
absenteeism_scalar = CustomScalar(columns_to_scale)
# absenteeism_scalar will contain information about the mean and standard deviation.
absenteeism_scalar.fit(unscalled_inputs)
scaled_inputs = absenteeism_scalar.transform(unscalled_inputs)
scaled_inputs
# Whenever we get new data we will just apply `absenteeism_scalar.transform(new_data)` to reach the same transformation
# ```
# new_data = pd.read_csv('new_data.csv)
# new_data_scaled = absenteeism_scalar.transform(new_data)
# ```
scaled_inputs
scaled_inputs.shape
# ### Train Test Split
# We want to shuffle the data so that we remove all types of dependencies that come from the order of
# the data set like Day of the week
# import the relevant library
from sklearn.model_selection import train_test_split
# ##### Split
# +
# train_test_split(inputs, target) will split the data into 4 arrays
# array 1: A training dataset with inputs
# array 2: A training dataset with targets
# array 3: A testing dataset with inputs
# array 4: A testing dataset with targets
# Split the variables with an 80-20 split and some random state
# To have the same split as mine, use random_state = 365
train_test_split(scaled_inputs, targets, test_size=0.2, random_state=365)
# -
x_train, x_test, y_train, y_test = train_test_split(scaled_inputs, targets, test_size=0.2, random_state=365)
print('\n x_train: ',x_train.shape, '\n x_test: ',
x_test.shape, '\n y_train:', y_train.shape, '\n y_test: ',y_test.shape)
# ##### Logistic regression with sklearn
# Import the relevant libraries
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
reg = LogisticRegression(solver='liblinear')
# Train the model
reg.fit(x_train, y_train)
# ###### accuracy
# To get the model accuracy we use .score(inputs, targets)
reg.score(x_train, y_train)
# Our model has an accuracy of around 76%
# ##### Manually checking the accuracy
# ***
#
# ***
# Accuracy means that x% of the model outputs match the targets.
#
# So if we want to find the accuracy of a model manually we should find the outputs and compare them with
#
# the targets.
# +
# .predict()method will find the predicted outputs of the regression
# The model itself is contained in the variable reg and we are choosing to predict the outputs associated
# with the training inputs and contained an X train.
model_outputs = reg.predict(x_train)
# -
model_outputs
y_train
# +
model_outputs == y_train
# we get an array which compares the elements of the two variables. If there is a match the result is true.
# otherwise it is false.
# Now we can clearly see which elements have been guessed correctly and which haven't.
# If we divide the number of matches by the total number of elements we will get the accuracy
# -
# total number of true entries.
np.sum(model_outputs == y_train)
# total number of elements
model_outputs.shape[0]
# Get the accuracy = Correct predictions / # observations
np.sum(model_outputs == y_train)/model_outputs.shape[0]
# ###### intercept and coefficients
reg.intercept_
reg.coef_.shape
feature_name = unscalled_inputs.columns.values
summary_table = pd.DataFrame(columns=['Feature name'], data=feature_name)
summary_table
# we must transpose this array because by default and the arrays are rows and not columns
summary_table['Coefficient'] = np.transpose(reg.coef_)
summary_table
# Adding the intercept to the table
summary_table.index = summary_table.index + 1
summary_table.loc[0] = ['Intercept', reg.intercept_[0]]
summary_table = summary_table.sort_index()
summary_table
# ##### Weight (coefficient) and bias (intercept)
# another notion we must emphasize is that whenever we are dealing with a logistic regression the
#
# coefficients we are predicting or the so-called log(odds).
#
# This is a consequence of the choice of model logistic regression.
#
# By default these models are nothing but a linear function predicting log(odds).
#
# These log odds are later transformed into zeros and ones.
summary_table['Odds Ratio'] = np.exp(summary_table.Coefficient)
# The Odds Ratio feature will hold the exponentials of the coefficients
summary_table
summary_table.sort_values('Odds Ratio', ascending=False)
# ## Interpretation
# ***
# If a `coefficient` is around 0 or it's `odds ratio` is close to 1.
#
# This means that the corresponding feature is not particularly important.
#
# It is important to note that `the further away from 0 a coefficient is, the bigger its importance`.
#
# So by looking at the coefficients table we will notice that the most strongly pronounced features seem
#
# to be the four reasons for absence, transportation expense, whether a person has children pets, and education
#
# Note that pet and education are at the `bottom of the table but their weights are still far away from zero`. They are indeed important.
#
# The daily work load average distance to work and day of the week would seem to have the smallest impact their weight is almost zero so regardless of the particular values they will barely affect our model
#
# ***
# ***
#
# I'll quickly recap what the 5 reason variable stand for
# ```
# Reason 0 or no reason which is the baseline model.
# Reason 1 which comprises of various diseases.
#
# Reason 2 relating to pregnancy and giving birth
# Reason 3 regarding poisoning and peculiar reasons not categorized elsewhere.
# Reason 4 which relates to light diseases in the light of this clarification.
# ```
#
# **We can easily understand our coefficients.**
#
# 1. The most crucial reason for excessive absence is `poisoning`. Not much of a surprise there. If you are poisoned you just won't go to work. The weight means the odds of someone being excessively absent after being poisoned are 20 times higher than when no reason was reported
#
# 1. Another very important reason seems to be number one or various diseases. I'd call this the normal absenteeism case you got sick, you skipped work. No drama. A person who has reported this is 14 times more likely to be excessively absent than a person who didn't specify a reason.
#
# 1. Then we have pregnancy and giving birth. I particularly like this one because it's a prominent cause of absenteeism but at the same time it is way less pronounced than reasons 1 and 3. My explanation for this is a woman is pregnant. She goes to the gynecologist gets a regular pregnancy check and comes back to work. Nothing excessive about that from time to time. There are some emergencies but the odds ratio we can verify that it's only around two times more likely to be excessively absent than the base model.
#
# 1. Transportation expense. This is the most important non-dom feature in the model.But here's the problem it is one of our standardized variables. We don't have direct interpretability of it it's odds ratio implies that for one standardized unit or for one standard deviation increase in transportation expense it is close to twice as likely to be excessively absent.
#
# ```
# This is the main drawback of standardization standardized models almost always yield higher accuracy because the optimization algorithms work better in this way.
#
# Machine learning engineers prefer models with higher accuracy. So they normally go for standardization.
#
# Econometricians and statisticians however prefer less accurate but more interpretable models because they care about the underlying reasons behind different phenomena.
#
# Data scientists may be in either position. Sometimes they need higher accuracy. Other times they must find the main drivers of a problem.
#
# So it makes sense to create two different models one with standardized features and one without them and then draw insights from both. However should we opt for predicting values. We definitely prefer higher accuracy. So standardization is more often the norm.
#
# ```
# ***
# ***
# The **reasoning in terms of weights** is that, a weight of zero implies that no matter the feature value, we will multiply it by 0 and the whole result will be zero.
#
# The **meaning in terms of odds ratios** is the following. For one unit change in the standardized feature the odds increase by a multiple equal to the odds ratio.
#
# So if the odds ratio is 1 then the odds don't change at all.
#
# For example if the odds are 5 to 1 and the odds ratio is two we would say that for one unit change
#
# the odds change from 5 to 1 to 10 to 1 because we multiply them by the odds ratio.
#
# Alternatively if the odds ratio is 0.2 the odds would change to 1 to 1
#
# when the odds ratio is 1. We don't have a change as multiplication with the number one keeps things equal.
#
# This makes sense as the odds ratio was one whenever the weight is zero.
# ***
# ***
# Consider the daily work load. Its average weight is -0.03 so almost zero and it's odd ratio is 0.97. So almost 1.
#
# So this feature is almost useless for our model and with or without it the result would likely be the same.
# ***
# #### Backward Elimination
# ***
#
# The idea of backward elimination is that we can simplify our model by removing all features which have close to no contribution to the model.
#
# Usually when we have the p-values of variables we get rid of all coefficients with p-values above 0.05.
#
# When learning with sklearn we don't have p-values because we don't necessarily need them.
#
# The reasoning of the engineers who created the package is that if the weight is small enough it will make a difference anyway. And we trust their work.
#
# So if we remove these variables the rest of our model should not really change in terms of coefficient values.
# ### Testing our model
# ###### Accuracy
# +
reg.score(x_test, y_test)
# So based on data that the model has never seen before we can say that and 76% of the cases the
# model will predict of a person is going to be excessively absent
# -
# instead of 0 and 1 we can get the probability of an output being 0 or 1. There is an S.K. learn method
# called predict_proba() which returns the probability estimates for all possible outputs.
predicted_proba = reg.predict_log_proba(x_test)
predicted_proba
predicted_proba.shape
# We get is a 140 x 2 array. There are 140 test observations and 2 columns.
#
# The first column shows the probability our model assigned to the observation being zero and the second, the probability the model assigned to the observation being one.
# That's why summing any two numbers horizontally will give you an output of one.
#
#
# What we're interested in is the probability of excessive absenteeism right. So the probability of getting one.
# Therefore we can simply slice out all values from the second column. This will give us the probabilities of excessive absenteeism
predicted_proba[:,1]
# #### Saving the model
# We must save:
# ***
# 1. The model object which in our case is `reg`. This object has the
# - Type of regression
# - coefficient
# - Intercept
# 1. Scalar object. What it did was store the:
# - columns to scale,
# - mean and,
# - standard deviation of each feature.
# ***
# The information in the absenteeism_scalar is needed to preprocess any new data using the same rules as the ones apply to training data.
import pickle
# Save the model
with open(os.path.join(os.path.pardir, 'src', 'models', 'absenteeism_model_1.pickle'), 'wb') as file:
pickle.dump(reg, file)
# Save the scalar
with open(os.path.join(os.path.pardir, 'src', 'models', 'absenteeism_scalar_1.pickle'), 'wb') as file:
pickle.dump(absenteeism_scalar, file)
# ***A Note on Pickling***
#
#
# There are several popular ways to save (and finalize) a model. To name some, you can use Joblib (a part of the SciPy ecosystem), and JSON. Certainly, each of those choices has its pros and cons. Pickle is probably the most intuitive and definitely our preferred choice.
#
# Once again, ‘pickle’ is the standard Python tool for serialization and deserialization. In simple words, pickling means: converting a Python object (no matter what) into a string of characters. Logically, unpickling is about converting a string of characters (that has been pickled) into a Python object.
#
#
#
# There are some potential issues you should be aware of, though!
#
# ###### Pickle and Python version.
#
# Pickling is strictly related to Python version. It is not recommended to (de)serialize objects across different Python versions. Logically, if you’re working on your own this will never be an issue (unless you upgrade/downgrade your Python version).
#
#
#
# ###### Pickle is slow.
#
# Well, you will barely notice that but for complex structures it may take loads of time to pickle and unpickle.
#
#
#
# ###### Pickle is not secure.
#
# This is evident from the documentation of pickle, quote: “Never unpickle data received from an untrusted or unauthenticated source.” The reason is that just about anything can be pickled, so you can easily unpickle malicious code.
#
#
#
# Now, if you are unpickling your own code, you are more or less safe.
#
#
#
# If, however, you receive pickled objects from someone you don’t fully trust, you should be very cautious. That’s how viruses affect your operating system.
#
#
#
# Finally, even your own file may be changed by an attacker. Thus, the next time you unpickle, you can unpickle just about anything (that this unethical person put there).
#
#
#
# Certainly, all these cases are very rare, but you must be aware of them. Generally, it is recommended to use JSON
# ## Creating a module to automate the process
predict_absenteeism_script_file = os.path.join(os.path.pardir,'src','data','predict_absenteeism.py')
# +
# %%writefile predict_absenteeism_script_file
# -*- coding: utf-8 -*-
# Import the relevant libraries
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
import pickle
import os
class CustomScalar(BaseEstimator, TransformerMixin):
def __init__(self, columns, copy=True, with_mean=True, with_std=True):
self.scalar = StandardScaler(copy, with_mean, with_std)
self.columns = columns
self.mean_ = None
self.var_ = None
def fit(self, X, y=None):
self.scalar.fit(X[self.columns], y)
self.mean_ = np.mean(X[self.columns])
self.var_ = np.var(X[self.columns])
return self
def transform(self, X, y=None, copy=None):
init_col_order = X.columns
X_scaled = pd.DataFrame(self.scalar.transform(X[self.columns]), columns=self.columns)
X_not_scaled = X.loc[:, ~X.columns.isin(self.columns)]
return pd.concat([X_not_scaled, X_scaled], axis=1)[init_col_order]
# Create a class that will be used to predict new data
class AbsenteeismModel:
def __init__(self, model_file, scalar_file):
# read the saved model and scalar file
with open(model_file, 'rb') as model_file, open(scalar_file, 'rb') as scalar_file:
self.reg = pickle.load(model_file)
self.scalar = pickle.load(scalar_file)
self.data = None
# Take the *.csv file and preprocess it
def load_and_clean_data(self, data_file):
# import the data
df = pd.read_csv(data_file, delimiter=',')
# store the data in a new variable for later use
self.df_with_predictions = df.copy()
# drop the ID column
df = df.drop(['ID'], axis=1)
# to preserve the code we've created, we will add a column with NAN strings
df['Absenteeism Time in Hours'] = 'NaN'
# create a separate dataframe containing dummy variables for all the available Reasons
reason_columns = pd.get_dummies(df['Reason for Absence'], drop_first=True)
# split reason_columns into 4 types
reason_type_1 = reason_columns.loc[:, 1:14].max(axis=1)
reason_type_2 = reason_columns.loc[:, 15:17].max(axis=1)
reason_type_3 = reason_columns.loc[:, 18:21].max(axis=1)
reason_type_4 = reason_columns.loc[:, 22:].max(axis=1)
# to avoid multicollinnearity, drop the `Reason for Absence` column from df
df = df.drop(['Reason for Absence'], axis=1)
# concatenate df with the 4 Reasons for Absence
df = pd.concat([df, reason_type_1, reason_type_2, reason_type_3, reason_type_4], axis=1)
# assign names to the 4 reason types columns
# Note: There is a more universal version of this code. However, this will best suit our current purpose
column_names = ['Date', 'Transportation Expense', 'Distance to Work', 'Age',
'Daily Work Load Average', 'Body Mass Index', 'Education',
'Children', 'Pets', 'Absenteeism Time in Hours', 'Reason_1', 'Reason_2', 'Reason_3', 'Reason_4']
df.columns = column_names
# reorder the columns
column_names_reordered = ['Reason_1', 'Reason_2', 'Reason_3', 'Reason_4', 'Date',
'Transportation Expense', 'Distance to Work', 'Age',
'Daily Work Load Average', 'Body Mass Index', 'Education',
'Children', 'Pets', 'Absenteeism Time in Hours']
df = df[column_names_reordered]
# conver the date column into datetime
df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%Y')
# create a list with month values retrieved from the 'Date' column
list_months = []
list_months = list(map(lambda x: df['Date'][x].month, list(range(df.shape[0]))))
# insert the values into a new colun in the df called 'Month Value'
df['Month Value'] = list_months
# create a new feature called 'Day of the Week'
day_of_week = lambda x: x.weekday()
df['Day of the Week'] = df['Date'].apply(day_of_week)
# drop the 'Date' column from the df
df = df.drop('Date', axis=1)
# reorder the columns
column_names_updated = ['Reason_1', 'Reason_2', 'Reason_3', 'Reason_4', 'Month Value',
'Day of the Week', 'Transportation Expense', 'Distance to Work', 'Age',
'Daily Work Load Average', 'Body Mass Index', 'Education',
'Children', 'Pets', 'Absenteeism Time in Hours']
df = df[column_names_updated]
# map 'Education' variables; the results is a dummy
df['Education'] = df['Education'].map({1:0, 2:1, 3:1, 4:1})
# replace the NaN values
df = df.fillna(value=0)
# drop the original 'Absenteeism Time in Hours'
df = df.drop(['Absenteeism Time in Hours'], axis=1)
# drop the variables we decided we will not use
df = df.drop(['Distance to Work', 'Daily Work Load Average', 'Day of the Week'], axis=1)
# declare a new variable called processed_data
self.preprocessed_data = df.copy()
# scale the data
self.data = self.scalar.transform(self.preprocessed_data)
# a function which outputs the probability of a data point to be 1
def predict_probability(self):
if self.data is not None:
pred = self.reg.predict_proba(self.data)[:,1]
return pred
# a function that outputs 0 or 1 based on the model
def predicted_output_category():
if self.data is not None:
pred_outputs = self.reg.predict(self.data)
return pred_outputs
# predict the outputs and the probability and
# add add columns with these values at the end of the df
def predicted_outputs(self):
if self.data is not None:
self.preprocessed_data['Probability'] = self.predict_probability()
self.preprocessed_data['Prediction'] = self.predicted_output_category()
return self.preprocessed_data
# -
| notebooks/Absenteeism - Applying Machine Learning with Logistic regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python385jvsc74a57bd02db524e06e9f5f4ffedc911c917cb75e12dbc923643829bf417064a77eb14d37
# ---
# # Train
#
# Use this notebook to train a model that solves our regression task and uploads the train model artefact to AWS S3.
# ## Imports
# +
import os
from urllib.request import urlopen
import aporia
import boto3 as aws
import joblib
import pandas as pd
import seaborn as sns
from numpy import floating ,ndarray
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.tree import DecisionTreeRegressor
# -
# ## Configuration
# +
sns.set()
AWS_S3_PROJECT_BUCKET = "bodywork-pipeline-with-aporia-monitoring"
DATASET_URL = (
"http://bodywork-pipeline-with-aporia-monitoring"
".s3.eu-west-2.amazonaws.com/datasets/dataset_t0.csv"
)
# -
# ## Load Data from Cloud Object Storage
dataset = data = pd.read_csv(urlopen(DATASET_URL))
dataset
# ## Data Preparation
#
# Split labels from features and process categorical features.
# +
category_to_integer_map = {"c0": 0, "c1": 1, "c2": 2}
def preprocess(df):
df = df.copy()
df["F_2"] = df["F_2"].apply(lambda e: category_to_integer_map[e])
return df
X = dataset[["F_1", "F_2"]]
y = dataset["y"]
X
# -
# ## Split Data into Train and Test Subsets
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
stratify=dataset["F_2"].values,
random_state=42
)
# ## Setup Model Metrics
#
# We will use the Mean Absoloute Error (MAE) for this regression task.
def compute_metrics(y_true: ndarray, y_pred: ndarray) -> floating:
mape = mean_absolute_percentage_error(y_true, y_pred)
print(f"MAPE = {mape/100:.2%}")
return mape
# ## Train Model
#
# We will train a decision tree, so that we can capture the non-linearities in the dataset and we will only use the default parameters, as the relationships between the labels, when conditioned on the categorical feature, is linear and should be easy to capture.
model = DecisionTreeRegressor()
model.fit(preprocess(X_train), y_train)
# ### Diagnostics
# +
y_test_pred = model.predict(preprocess(X_test))
compute_metrics(y_test, y_test_pred)
_ = sns.lmplot(
y="y_test_pred",
x="y_test",
data=pd.DataFrame({"y_test": y_test, "y_test_pred": y_test_pred}),
line_kws={"color": "red", "alpha": 0.5}
)
# -
# Not bad!
# ## Save Model to Cloud Object Storage
# +
# persist trained model locally
joblib.dump(model, "model.joblib")
# upload trained model to AWS S3
s3_client = aws.client('s3')
s3_client.upload_file(
"model.joblib",
AWS_S3_PROJECT_BUCKET,
"models/model.joblib"
)
# remove local files
os.remove("model.joblib")
# -
# ## Send Datasets to Aporia
#
# To use for monitoring live prediction performance.
# +
aporia.init(token="<APORIA_TOKEN>", environment="training", verbose=True)
apr_model = aporia.create_model_version(
model_id="<APORIA_MODEL_ID>",
model_version="<APORIA_MODEL_VERSION>",
model_type="regression",
raw_inputs={
"F_1": "numeric",
"F_2": "string",
},
features={
"F_1": "numeric",
"F_2": "numeric",
},
predictions={
"y": "numeric"
},
)
apr_model.log_training_set(
raw_inputs=X_train,
features=preprocess(X_train),
labels=y_train.to_frame(),
)
apr_model.log_test_set(
raw_inputs=X_test,
features=preprocess(X_test),
labels=y_test.to_frame(),
predictions=pd.DataFrame(columns=["y"], data=y_test_pred),
)
| notebooks/train_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
from cnn.numpy_model import Conv2d
# +
kernel = np.array([
[1, 2],
[3, 4]
])
dEdx_l = np.zeros((3, 3))
# -
# ## View <u>convolution</u> backprop operations through input matrix y<sup>l-1</sup> on each step
# ### It is worth noting that the kernel retains its orientation on dE/dx<sup>l</sup> matrix, while during feedforward <u>convolution</u> step the kernel was turned upside down on y<sup>l-1</sup> (and It is vice versa for <u>cross-correlation</u>).
# +
conv2d = Conv2d(
kernel_size=(2, 2),
in_channels=1,
out_channels=1,
stride=1,
kernel_center=(0, 0),
convolution=True
)
x = conv2d.convolution_back_dEdy_l_minus_1(dEdx_l, kernel, (3, 3), print_demo=True)
# -
# ### With stride=2 the kernel "decays" and zeros appear between its elements.
# +
conv2d = Conv2d(
kernel_size=(2, 2),
in_channels=1,
out_channels=1,
stride=2,
kernel_center=(0, 0),
convolution=True
)
x = conv2d.convolution_back_dEdy_l_minus_1(dEdx_l, kernel, (2, 2), print_demo=True)
| src/scripts/notebooks/demo_of_conv_backprop_through_input.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Investiga la forma de mostrar un botón upload en tu servidor flask al entrar en la URL y que permita subir un archivo (csv, json, etc) que se almacene en tu pc.
#
# Una vez esté hecho,carga con pandas el archivo y muestra en la web el número de filas y columnas que tiene el dataframe.
| week9_ML_svm_poly_norm/day2_svm_poly/exercises/6.flask_pro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# # ASSIGNMENT CONFIG
# template_pdf: true
# solutions_pdf: true
# generate:
# seed: 42
# show_stdout: true
# files:
# - defaults.csv
# + [markdown] tags=["ignore"]
# <table style="width: 100%;">
# <tr style="background-color: transparent;"><td>
# <img src="https://data-88e.github.io/assets/images/blue_text.png" width="250px" style="margin-left: 0;" />
# </td><td>
# <p style="text-align: right; font-size: 10pt;"><strong>Economic Models</strong>, Spring 2020<br>
# Dr. <NAME><br>
# Notebook by <NAME></p></td></tr>
# </table>
# -
# # Project 3: Econometrics and Data Science
# + [markdown] tags=["ignore"]
# This project focuses on the application of the data science techniques from lecture. You will practice single variable ordinary least squares regression in the Data 8 style, go through a guided introduction to multivariate OLS using the package `statsmodels`, and finally create your own multivariate OLS model.
#
# After this project, you should be able to
#
# 1. Write and apply the necesssary functions to perform single variable OLS
# 2. Use the `statsmodels` package to create multivariate OLS models
# 3. Understand how to quantitatively evaluate models using the root-mean-squared error
# 4. Look for and use relationships between variables to select features for regression
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
import warnings
from ipywidgets import interact, Dropdown, IntSlider
warnings.simplefilter(action='ignore')
# %matplotlib inline
plt.style.use('seaborn-muted')
plt.rcParams["figure.figsize"] = [10,7]
# + [markdown] tags=["ignore"]
# In this project, we will be working with data on credit card defaults and billing. The data covers April to September 2005, with one row for each cardholder. It has the following columns:
#
# | Column | Description |
# |-----|-----|
# | `credit` | Total amount of credit |
# | `sex` | Cardholder sex |
# | `education` | Cardholder education level |
# | `martial_status` | Cardholder marital status |
# | `age` | Cardholder age |
# | `bill_{month}05` | Bill amount for specific month |
# | `paid_{month}05` | Amount paid in specified month |
# | `default` | Whether the cardholder defaulted |
#
# In the cell below, we load the dataset.
# -
defaults = pd.read_csv("defaults.csv")
defaults
# + active=""
# # BEGIN QUESTION
# name: q0_1
# + [markdown] tags=["ignore"]
# **Question 0.1:** Which of the columns in `defaults` would we need dummies for in order to use in an OLS model? Assign `q0_1` to an list of these column _labels_.
# + active=""
# # BEGIN SOLUTION
# + tags=["ignore"]
q0_1 = ["sex", "education", "marital_status"] # SOLUTION
q0_1
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert len(q0_1) in [3, 4]
assert "sex" in q0_1
assert "education" in q0_1
assert "marital_status" in q0_1
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# In order to use the columns you chose, we will need to create dummies for them. In lecture, we showed a function (defined in the imports cell) that will get dummies for a variable for you.
# + active=""
# # BEGIN QUESTION
# name: q0_2
# -
# **Question 0.2:** Use `pd.get_dummies` to get dummies for the variables you listed in `q0_1`.
# + active=""
# # BEGIN SOLUTION
# -
defaults = pd.get_dummies(defaults, columns=q0_1) # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert "education" not in defaults.columns
assert "marital_status" not in defaults.columns
assert "sex_male" in defaults.columns
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# ## Part 1: Single Variable OLS
#
# We'll start by doing some single variable linear regression, ala Data 8. To begin, recall that we can model $y$ based on $x$ using the form
#
# $$\Large
# \hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x
# $$
#
# We can define the **correlation coefficient** of two values to be the mean of the product of their values in standard units.
# + active=""
# # BEGIN QUESTION
# name: q1_1
# -
# **Question 1.1:** Complete the `corr` function below to compute the correlation coefficient of two arrays `x` and `y` based on the formula
#
# $$\Large
# r = \text{mean} \left ( x_\text{SU} \cdot y_\text{SU} \right )
# $$
#
# _Hint:_ You may find the `su` function, which converts an array to standard units, helpful.
# + active=""
# # BEGIN SOLUTION
# +
def su(arr):
"""Converts array arr to standard units"""
return (arr - np.mean(arr)) / np.std(arr)
def corr(x, y):
"""Calculates the correlation coefficient of two arrays"""
return np.mean(su(x) * su(y)) # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
np.random.seed(1234)
x2 = np.random.uniform(0, 10, 5)
y2 = np.random.uniform(0, 10, 5)
assert np.isclose(corr(x2, y2), 0.6410799722591175)
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
np.random.seed(2345)
x2 = np.random.uniform(0, 10, 5)
y2 = np.random.uniform(0, 10, 5)
assert np.isclose(corr(x2, y2), -0.4008555019904271)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# From this $r$ value that we have calculated above, we can compute the slope $\beta_1$ and intercept $\beta_0$ of the best-fit line using the formulas below.
#
# $$\Large
# \beta_1 = r \frac{\hat{\sigma}_y}{\hat{\sigma}_x}
# \qquad \text{ and } \qquad
# \beta_0 = \hat{\mu}_y - \beta_1 \cdot \hat{\mu}_x
# $$
# + active=""
# # BEGIN QUESTION
# name: q1_2
# -
# **Question 1.2:** Using your `corr` function, fill in the `slope` and `intercept` functions below which compute the values of $\beta_1$ and $\beta_0$ for the line of best fit that predicts `y` based on `x`. Your function should use vectorized arithmetic (i.e. no `for` loops).
#
# _Hint:_ You may find your `slope` function useful in `intercept`.
# + active=""
# # BEGIN SOLUTION
# +
def slope(x, y):
"""Computes the slope of the best-fit line of y based on x"""
return np.std(y) * corr(x, y) / np.std(x) # SOLUTION
def intercept(x, y):
"""Computes the intercept of the best-fit line of y based on x"""
return np.mean(y) - slope(x, y) * np.mean(x) # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
np.random.seed(1234)
x2 = np.random.uniform(0, 10, 5)
y2 = np.random.uniform(0, 10, 5)
assert np.isclose(slope(x2, y2), 0.853965497371089)
np.random.seed(1234)
x2 = np.random.uniform(0, 10, 5)
y2 = np.random.uniform(0, 10, 5)
assert np.isclose(intercept(x2, y2), 1.5592892975597108)
""" # BEGIN TEST CONFIG
points: 0.5
hidden: true
""" # END TEST CONFIG
np.random.seed(2345)
x2 = np.random.uniform(0, 10, 5)
y2 = np.random.uniform(0, 10, 5)
assert np.isclose(slope(x2, y2), -0.5183482739336265)
# + tags=[]
""" # BEGIN TEST CONFIG
points: 0.5
hidden: true
""" # END TEST CONFIG
np.random.seed(2345)
x2 = np.random.uniform(0, 10, 5)
y2 = np.random.uniform(0, 10, 5)
assert np.isclose(intercept(x2, y2), 7.777051922080558)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# Now let's look at how we can predict the `bill_sep05` column based on some other column of our data. We'll start by looking at the `credit` as the explanatory variable. To use our functions above, we must extract the values of each column as arrays, which we define below as `x` and `y`. We then compute the fitted values `y_hat` using the slope-intercept formula and plot the results.
# + active=""
# # BEGIN QUESTION
# name: q1_3
# -
# **Question 1.3:** Using the functions you defined in Question 1.2, regress `bill_sep05` on `credit`. Assign your predictions to `y_hat`.
# + active=""
# # BEGIN SOLUTION
# + tags=["ignore"]
x = defaults["credit"]
y = defaults["bill_sep05"]
beta_1 = slope(x, y) # SOLUTION
beta_0 = intercept(x, y) # SOLUTION
y_hat = beta_1 * x + beta_0 # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert len(y_hat) == len(y)
assert x.shape == (30000,)
assert 0.1 <= beta_1 <= 0.2
assert 23000 <= beta_0 <= 25000
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
assert np.isclose(beta_1, 0.16199038569776522)
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
assert np.isclose(beta_0, 24092.480872897704)
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
np.random.seed(1001)
sub_y = np.random.choice(y_hat, 5)
assert np.allclose(sub_y, [105087.67372178, 40291.51944267, 27332.28858685, 27332.28858685, 27332.28858685])
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# -
# Now that we have some predictions, let's plot the original data and the regression line.
plt.scatter(x, y, color="tab:blue", alpha=0.3)
plt.plot(x, y_hat, color="tab:red")
plt.title("Predict September bill with credit");
# + active=""
# # BEGIN QUESTION
# name: q1_4
# manual: true
# -
# **Question 1.4:** Does the line we found fit the data well? Explain.
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> Nope
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# -
# Let's estimate how confident we are in the significance of our $\hat{\beta}_1$ coefficient.
# + active=""
# # BEGIN QUESTION
# name: q1_5
# -
# **Question 1.5:** Fill in the code below to bootstrap our $\hat{\beta}_1$ and find the 95% confidence interval. Store the lower and upper bounds as `ci95_lower` and `ci95_upper`, respectively. (The cell may take a couple minutes to run.)
#
# _Hint:_ Since we're only interested in $\hat{\beta}_1$, we don't need to find the intercept or fit our $x$ values.
# + active=""
# # BEGIN SOLUTION
# + tags=[]
np.random.seed(42) # SEED
betas = []
for i in np.arange(200):
sample = defaults.sample(5000) # defaults is a huge table, so we'll only sample 5000 rows
sample_x = sample["credit"] # SOLUTION
sample_y = sample["bill_sep05"] # SOLUTION
betas.append(slope(sample_x, sample_y)) # SOLUTION
ci95_lower = np.percentile(betas, 2.5) # SOLUTION
ci95_upper = np.percentile(betas, 97.5) # SOLUTION
print("95% CI: ({}, {})".format(ci95_lower, ci95_upper))
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert 0.13 <= ci95_lower <= 0.17
assert 0.16 <= ci95_upper <= 0.20
""" # BEGIN TEST CONFIG
points: 0.5
hidden: true
""" # END TEST CONFIG
assert np.isclose(ci95_lower, 0.1442893482315043)
""" # BEGIN TEST CONFIG
points: 0.5
hidden: true
""" # END TEST CONFIG
assert np.isclose(ci95_upper, 0.1863526283850078)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q1_6
# manual: true
# -
# **Question 1.6:** Using your 95% confidence interval, is it likely that the credit has no effect on the September 2005 bill? Justify your answer.
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> No, the CI does not contain 0.
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# Obviously, we can see that our best-fit line does not predict perfectly. There are plenty of points in the scatterplot that do not fall on the line. But how do we quantify the error of our model? There are many so-called *loss functions*, but in this notebook we will use the **root-mean-squared error**, which is defined as
#
# $$\Large
# \text{RMSE} = \sqrt{ \frac{1}{n} \sum_{i=1}^n \left ( y_i - \hat{y}_i \right ) ^2 }
# $$
#
# where $n$ is the number of observations. The effect of this is to take the mean of the distance of each value of $\hat{y}$ from its corresponding value in $y$; squaring these values keeps them positive, and then we take the square root to correct the units of the error.
# + active=""
# # BEGIN QUESTION
# name: q1_7
# -
# **Question 1.7:** Complete the function `rmse` below which computes the root-mean-squared error of the prediction `y_hat` on `y`. Again, no `for` loops.
# + active=""
# # BEGIN SOLUTION
# -
def rmse(y, y_hat):
"""Computes the RMSE of prediction y_hat based on y"""
return np.sqrt(np.mean((y - y_hat)**2)) # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
np.random.seed(1234)
y2 = np.random.uniform(0, 10, 5)
y_hat2 = np.random.uniform(0, 10, 5)
assert np.isclose(rmse(y2, y_hat2), 2.440102731334708)
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
np.random.seed(2345)
y2 = np.random.uniform(0, 10, 5)
y_hat2 = np.random.uniform(0, 10, 5)
assert np.isclose(rmse(y2, y_hat2), 4.034226624125118)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q1_8
# + [markdown] tags=["ignore"]
# **Question 1.8:** Use your `rmse` function to compute the RMSE of our prediction `y_hat` based on `y` from above.
# + active=""
# # BEGIN SOLUTION
# -
single_var_error = rmse(y, y_hat) # SOLUTION
single_var_error
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert 70000 <= single_var_error <= 80000
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
assert np.isclose(single_var_error, 70571.40305975602)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# Now that we know how to predict based on and quantify the error of a model, let's write a function that will encapsulate this pipeline for us.
# + active=""
# # BEGIN QUESTION
# name: q1_9
# manual: true
# -
# **Question 1.9:** Fill in the function `pred_and_plot` below which models `bill_sep05` based on a column `col`, plots the scatterplot and line of best fit, and computes the RMSE of the model. Then choose a column you think might be related to `bill_sep05` and use your `pred_and_plot` function to determine its prediction RMSE and plot the regression line.
#
# _Hint:_ Your code from Question 1.3 may be helpful here...
# + active=""
# # BEGIN SOLUTION
# + tags=["include"]
def pred_and_plot(col):
"""Performs single variable OLS to predict bill_sep05 based on col"""
x = defaults[col] # SOLUTION
y = defaults["bill_sep05"] # SOLUTION
beta_1 = slope(x, y) # SOLUTION
beta_0 = intercept(x, y) # SOLUTION
y_hat = beta_1 * x + beta_0 # SOLUTION
model_rmse = rmse(y, y_hat) # SOLUTION
### DO NOT EDIT THE REST OF THIS FUNCTION ###
print("RMSE: {:.5f}".format(rmse(y, y_hat)))
plt.scatter(x, y, color="tab:blue", alpha=0.3)
plt.plot(x, y_hat, color="tab:red")
plt.title("Predict September bill with {}".format(col))
""" # BEGIN PROMPT
### Provide your column name below ###
pred_and_plot(...)
""" # END PROMPT
pred_and_plot("paid_apr05") # SOLUTION NO PROMPT)
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# In looking through different features, you should have noticed that most of them don't follow a linear relationship very well. In practice, you often need _multiple_ features (explanatory variables) to predict an outcome variable, and it is for this reason that we often use **multiple linear regression** to predict variables.
# -
# Finally, before moving on to the multivariable case, let's think about using whether or not an individual defaults as a predictor of their September 2005 bill.
# + active=""
# # BEGIN QUESTION
# name: q1_10
# -
# **Question 1.10:** Assign `default_beta_1` and `default_beta_0` to the slope and intercept of your regression of `bill_sep05` on the `default` column of the table `defaults`.
#
# _Hint:_ Our outcome variable hasn't changed, so we can reuse the array `y` defined earlier.
# + active=""
# # BEGIN SOLUTION
# +
default_x = defaults["default"] # SOLUTION
default_beta_1 = slope(default_x, y) # SOLUTION
default_beta_0 = intercept(default_x, y) # SOLUTION
print("y_hat = {} * x + {}".format(default_beta_1, default_beta_0))
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert -4000 <= default_beta_1 <= -3000
assert 51000 <= default_beta_0 <= 52000
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
assert np.isclose(default_beta_1, -3485.0649761630766)
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
assert np.isclose(default_beta_0, 51994.22727272727)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q1_11
# manual: true
# -
# **Question 1.11:** Interpret the value of `default_beta_1`. Basically, what do we expected to happen when `default` changes from 0 to 1? Explain.
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> We expect the bill to go down by approx \\$3,485.
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# ## Part 2: Guided Multivariable OLS
# + [markdown] tags=["ignore"]
# When we predict a variable $y$ based on some set of $p$ explanatory variables $x$, we create a model of the world with set of weights $\left \{ \beta_i \right \}$ such that we have
#
# $$\Large
# y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \cdots + \beta_p x_p + \varepsilon
# $$
#
# Because of the error term $\varepsilon$, we will instead create predictions $\hat{y}$, such that
#
# $$\Large
# \hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x_1 + \hat{\beta}_2 x_2 + \cdots + \hat{\beta}_p x_p
# $$
#
# Let's model the September bill based on the other bills in the data set (April to August). Recall from lecture that we can model an outcome variable `Y` based on columns from our data `defaults` by extracting the values of the table into an array. In the cell below, we create the arrays `X` and `Y`.
# -
X = defaults[["bill_aug05", "bill_jul05", "bill_jun05", "bill_may05", "bill_apr05"]]
Y = defaults["bill_sep05"]
# + [markdown] tags=["ignore"]
# Recall that we can fit a multivariate OLS model using `statsmodels` by calling the function `sm.OLS` on the outcome and explanatory variables. In the cell below, we create a model based on _all_ the columns in the table (except, of course, the outcome variable).
# -
# create an OLS object with the data
model = sm.OLS(Y, sm.add_constant(X))
result = model.fit()
result.summary()
# + active=""
# # BEGIN QUESTION
# name: q2_1
# + [markdown] tags=["ignore"]
# **Question 2.1:** What is the standard error of the coefficient of `bill_jun05`?
#
# <ol style="list-style-type: lower-alpha;">
# <li>0.005</li>
# <li>0.010</li>
# <li>0.039</li>
# <li>0.007</li>
# </ol>
#
# Assign your answer to `q2_1` below.
# + active=""
# # BEGIN SOLUTION
# -
q2_1 = "d" # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
q2_1 in ["a", "b", "c", "d"]
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
q2_1 == "d"
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q2_2
# manual: true
# -
# **Question 2.2:** Which bills are likely good predictors of `bill_sep05`? Justify your response.
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> August, July, and June. These have CIs that don't contain 0, and their $t$ statistics are high.
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# Now let's look and see what values our model predicts for our outcome variable. Recall that we can extract the fitted values from the result using `result.fittedvalues`.
# + active=""
# # BEGIN QUESTION
# name: q2_3
# -
# **Question 2.3:** Assign `Y_hat` to the fitted values of `result`. Then assign `multi_rmse` to the RMSE of this prediction based on `Y`.
# + active=""
# # BEGIN SOLUTION
# +
Y_hat = result.fittedvalues # SOLUTION
multi_rmse = rmse(Y, Y_hat) # SOLUTION
multi_rmse
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert len(Y_hat) == defaults.shape[0]
assert 22000 <= multi_rmse <= 23000
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
np.random.seed(1234)
expected = np.array([131575.00462172, 26777.22647566, 2573.18036936, 2824.68190761, 50299.39627643])
actual = np.random.choice(Y_hat, 5)
assert np.allclose(expected, actual)
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
assert np.isclose(multi_rmse, 22561.189743524323)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# We see from this RMSE that the prediction is (much) better than the single variable case, but it's still not too good. Let's try and select better features to see if we can lower our RMSE.
# + active=""
# # BEGIN QUESTION
# name: q2_4
# -
# **Question 2.4:** Add one more column label to the array `new_features` below. Then fill in the code below to create a new OLS model based on the columns in `new_features`, storing the fitted values in `new_Y_hat`. **Don't forget to apply `sm.add_constant` to `new_X` in your `sm.OLS` call!**
#
# _Hint:_ Our outcome variable `Y` hasn't changed, so we can reuse the same array as earlier.
# + active=""
# # BEGIN SOLUTION
# +
# BEGIN SOLUTION NO PROMPT
new_features = ["bill_aug05", "bill_jul05", "paid_aug05", "paid_jul05", "sex_male", "paid_apr05"]
# END SOLUTION
""" # BEGIN PROMPT
new_features = ["bill_aug05", "bill_jul05", "paid_aug05", "paid_jul05", "sex_male", ...]
""" # END PROMPT
new_X = defaults[new_features] # SOLUTION
new_model = sm.OLS(Y, sm.add_constant(new_X)) # SOLUTION
new_result = new_model.fit() # SOLUTION
new_Y_hat = new_result.fittedvalues # SOLUTION
new_Y_hat
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert len(new_features) == 6
assert new_X.shape[1] == 6
assert len(new_Y_hat) == defaults.shape[0]
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# Now that we have some predictions, let's look at the accuracy of our model.
# + active=""
# # BEGIN QUESTION
# name: q2_5
# -
# **Question 2.5:** Calculate the RMSE of `new_Y_hat` based on `Y` and store this value as `new_rmse`.
# + active=""
# # BEGIN SOLUTION
# -
new_rmse = rmse(Y, new_Y_hat) # SOLUTION
new_rmse
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert 22000 <= new_rmse <= 23000
""" # BEGIN TEST CONFIG
points: 1
hidden: true
""" # END TEST CONFIG
np.isclose(new_rmse, 22495.963258409643)
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q2_6
# manual: true
# -
# **Question 2.6:** Did the RMSE go up or down in Question 2.7 compared to Question 2.4? Why do you think so?
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> You will get full points as long as you provide a good reason for why you think your RMSE went up or down.
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q3
# points: 3
# + [markdown] tags=["ignore"]
# ## Part 3: Unguided Multivariable OLS
# + [markdown] tags=["ignore"]
# In this section of the assignment, you will use `statsmodels` and OLS to create your own model to predict the September 2005 bill. Your model will be scored out of **5 points**, and a portion of your score will be determined based on your RMSE. The scores you will receive are given in the table below.
#
# | RMSE | Score (out of 5) |
# |-----|-----|
# | $\le$ 20,000 | 6 |
# | $\le$ 30,000 | 5 |
# | $\le$ 50,000 | 4 |
# | $\le \infty$ | 3 |
#
# Note that it is possible to receive a 6 out of 5 for an especially good model, and that as long as you *create a model*, you are guaranteed a 3 out of 5. **To submit your model, you must assign `my_labels` to an array of the columns you want your model to use. You may not use more than 10 columns and, of course, you can't use the column `bill_sep05` in your features.** Your model RMSE will be calculated using the following code:
#
# ```python
# X, Y = defaults[my_labels], defaults["bill_sep05"]
# model = sm.OLS(Y, sm.add_constant(X))
# result = model.fit()
# Y_hat = result.fittedvalues
# rmse(Y, Y_hat)
# ```
#
# To select your features, use the widget below to look for correlations between variables and the September 2005 bill. It requires your `pred_and_plot` function to work, so you will need to finish that function before using the widget.
# + tags=["ignore"]
interact(pred_and_plot, col=Dropdown(options=defaults.columns));
# + [markdown] tags=["ignore"]
# Add and remove cells below as needed, but *make sure you define `my_labels`*. We have provided code for you to create your `X` array; just fill in the `...` in `my_labels` with your columns and use the space at the bottom to work on your model. Good luck!
# + active=""
# # BEGIN SOLUTION
# +
# BEGIN SOLUTION NO PROMPT
my_labels = ['bill_aug05','bill_jun05','bill_may05','bill_apr05','paid_sep05',
'paid_aug05','paid_jul05','paid_jun05','paid_may05','paid_apr05']
my_X = defaults[my_labels]
my_model = sm.OLS(Y, sm.add_constant(my_X))
my_result = my_model.fit()
my_Y_hat = my_result.fittedvalues
rmse(Y, my_Y_hat)
# END SOLUTION
""" # BEGIN PROMPT
my_labels = [...]
my_X = defaults[my_labels]
my_model = ...
my_result = ...
my_Y_hat = ...
rmse(...)
"""; # END PROMPT
# + active=""
# # END SOLUTION
# + active=""
# # BEGIN TESTS
# -
assert len(my_labels) <= 10
assert "bill_sep05" not in my_labels
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q3_1
# + active=""
# # BEGIN TESTS
# -
# HIDDEN
my_X = defaults[my_labels]
my_model = sm.OLS(Y, sm.add_constant(my_X))
my_result = my_model.fit()
my_Y_hat = my_result.fittedvalues
assert rmse(Y, my_Y_hat) <= 50000
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q3_2
# + active=""
# # BEGIN TESTS
# -
# HIDDEN
my_X = defaults[my_labels]
my_model = sm.OLS(Y, sm.add_constant(my_X))
my_result = my_model.fit()
my_Y_hat = my_result.fittedvalues
assert rmse(Y, my_Y_hat) <= 30000
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q3_3
# + active=""
# # BEGIN TESTS
# -
# HIDDEN
my_X = defaults[my_labels]
my_model = sm.OLS(Y, sm.add_constant(my_X))
my_result = my_model.fit()
my_Y_hat = my_result.fittedvalues
assert rmse(Y, my_Y_hat) <= 20000
# + active=""
# # END TESTS
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# ## Part 4: Reflection
#
# In this section of the assignment, you will answer some conceptual questions about the choices you made in creating your model in Part 3. **This section heavily influences your grade, as we are looking to ensure that you are using econometric intuition while modeling. Please answer thoughtfully and, as always, *show us the numbers*.**
# + active=""
# # BEGIN QUESTION
# name: q4_1
# manual: true
# -
# **Question 4.1:** Explain one choice you made in selecting features while modeling in Part 3 and why you made it. (Your explanation should take at least a few sentences, and should justify your choice mathematically (i.e. with numerical evidence).)
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> You should describe a choice you made and give mathematical justifications for why you made it. For example, I replace feature A with feature B because A's correlation with $y$ was <a number> but B's was <a number>, and this lowered the RMSE from <a number> to <a number>. Basically, show me the numbers.
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q4_2
# manual: true
# -
# **Question 4.2:** Use your `pred_and_plot` function in the cell below to generate a visualization that helped you choose a feature in Part 3.
# + active=""
# # BEGIN SOLUTION
# -
pred_and_plot("bill_apr05") # SOLUTION
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + active=""
# # BEGIN QUESTION
# name: q4_3
# manual: true
# -
# **Question 4.3:** Choose a column you regressed on. Report its coefficient, $t$ statistic, and 95% CI. Interpret the coefficient's value. Is the variable likely significant? Explain.
# + active=""
# # BEGIN SOLUTION
# -
# <div class="alert alert-danger">
#
# <strong>SOLUTION:</strong> Full points with reporting all values and explanation using $t$ statistic and/or 95% CI.
#
# </div>
# + active=""
# # END SOLUTION
# + active=""
# # END QUESTION
# + [markdown] tags=["ignore"]
# ---
#
# ### References
#
# * Data from https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients#
| examples/data-88e-proj03/proj03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import numpy as np
import os
import string
import requests
import io
import nltk
from zipfile import ZipFile
from sklearn.feature_extraction.text import TfidfVectorizer
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess = tf.Session()
batch_size = 200
max_features = 1000
save_file_name = 'temp_spam_data.csv'
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
target = [1. if x=='spam' else 0. for x in target]
texts = [x.lower() for x in texts]
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
texts = [' '.join(x.split()) for x in texts]
def tokenizer(text):
words = nltk.word_tokenize(text)
return words
tfidf = TfidfVectorizer(tokenizer=tokenizer, stop_words='english', max_features=max_features)
sparse_tfidf_texts = tfidf.fit_transform(texts)
train_indices = np.random.choice(sparse_tfidf_texts.shape[0], round(0.8*sparse_tfidf_texts.shape[0]), replace=False)
test_indices = np.array(list(set(range(sparse_tfidf_texts.shape[0])) - set(train_indices)))
texts_train = sparse_tfidf_texts[train_indices]
texts_test = sparse_tfidf_texts[test_indices]
target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])
target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])
A = tf.Variable(tf.random_normal(shape=[max_features,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
x_data = tf.placeholder(shape=[None, max_features], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
model_output = tf.add(tf.matmul(x_data, A), b)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
my_opt = tf.train.GradientDescentOptimizer(0.0025)
train_step = my_opt.minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
train_loss = []
test_loss = []
train_acc = []
test_acc = []
i_data = []
for i in range(10000):
rand_index = np.random.choice(texts_train.shape[0], size=batch_size)
rand_x = texts_train[rand_index].todense()
rand_y = np.transpose([target_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
if (i+1)%100==0:
i_data.append(i+1)
train_loss_temp = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
train_loss.append(train_loss_temp)
test_loss_temp = sess.run(loss, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])})
test_loss.append(test_loss_temp)
train_acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y})
train_acc.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])})
test_acc.append(test_acc_temp)
if (i+1)%500==0:
acc_and_loss = [i+1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]
acc_and_loss = [np.round(x,2) for x in acc_and_loss]
print('Generation # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
plt.plot(i_data, train_loss, 'k-', label='Train Loss')
plt.plot(i_data, test_loss, 'r--', label='Test Loss', linewidth=4)
plt.title('Cross Entropy Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Cross Entropy Loss')
plt.legend(loc='upper right')
plt.show()
plt.plot(i_data, train_acc, 'k-', label='Train Set Accuracy')
plt.plot(i_data, test_acc, 'r--', label='Test Set Accuracy', linewidth=4)
plt.title('Train and Test Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
| Section02/Implementing TF-IDF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="oYmLuFpOyZMr"
# # Introducción a Google Colaboratory
#
# <!-- badges: start -->
# [](https://jvelezmagic.com/)
# [](https://platzi.com/datos/)
# <!-- badges: end -->
# + [markdown] id="VCtwnpKEyl4b"
# ## Primeros pasos
#
# **Colaboratory**, o mejor conocido como **Colab**, provee de un entorno interactivo a nivel de **archivo**. Dentro de él podrás **escribir** y **ejecutar código** python de una forma **sencilla** y **colaborativa** en tiempo real **sin** ninguna **configuración** extra.
#
# El componente de trabajo **clave** en **Colab** son los **notebooks**, también conocidos como libretas interactivas o bloc de notas. Un notebook es una herramienta en la que puedes combinar **código ejecutable** y **texto enriquecido** en un sólo lugar. Además, podrás incluir **imágenes**, **HTML**, **LaTeX** y mucho más.
#
# Por defecto, **Colab** te provee de múltiples **fragmentos de código** que te permitirán agilizar tu trabajo. Por ejemplo, montando tu espacio de Google Drive para acceder a los datos. 😉
#
# Si eres curioso (y yo sé que sí) y deseas aprender más sobre lo básico de **Colab**, te recomiendo seguir su [documentación oficial](https://colab.research.google.com/notebooks/intro.ipynb). 🎯
#
# Colaborar en ciencia de datos nunca había sido tan sencillo. Es tu momento de crear, analizar y visualizar datos. ¡Tu camino como científico de datos está comenzando y el éxito clama por ti! 🙌
#
#
#
#
#
# + [markdown] id="tWuEwHcFyfJP"
# ### Instrucciones
#
# 1. En tu navegador, da clic en el botón `Conectar` situado en la parte superior derecha de este notebook.
# 2. Ingresa con tu cuenta de **Google**.
# 3. Lee con atención el contenido del notebook.
# 4. Explora la interfaz. Nada mejor como conocer tu espacio de trabajo.
# 5. Ejecuta las celdas de código y juega a cambiar las cosas para ver qué pasa. Es la mejor manera de aprender.
# 6. ¡**Disfruta**, estás un paso más cerca de convertirte en un científico de datos! 🎉
# + [markdown] id="PfeqtFtrygoH"
# ### Ejecución y edición de celdas
#
# #### Celdas de código
#
# A continuación, se encuentra una **celda de código** con una pequeña instrucción de **Python** que imprime un texto:
#
#
# + id="muSlAETY-_Hg" colab={"base_uri": "https://localhost:8080/"} outputId="cda4ef60-a4c4-4836-ff63-fc74ea9f699b"
print("¡Hola, Platzinauta!")
# + [markdown] id="ykK9FAazIMsh"
# Interactuar con las **celdas de código** es tan sencillo como dar clic en ella y presionar `Command/Ctrl + Enter`para **ejecutarla** y simplemente `escribir` para **editarla**.
#
# **Nota:** Los comandos de teclado suelen abreviarse como `Command/Ctrl + ...`, indicando si debes presionas `Command` en macOS o `Ctrl` en Windows o Ubuntu. 👀
#
# > Edita, ejecuta, observa y repite. Ese es el ciclo de trabajo en los **notebooks**. 😋
# + [markdown] id="P43s2UaN_ASi"
# #### Celdas de texto
#
# Las celdas de texto dentro de los notebooks funcionan con **markdown**, el cual es un lenguaje de marcado ligero cuyo objetivo es **maximizar** la **legibilidad** y **sencillez** de escritura. Es decir, si tú observaras texto plano de markdown y lo compararas con su versión renderizada, el resultado debería lucir muy similar. ✨
#
# > De hecho, todo lo que estás leyendo en este momento ha sido escrito con **markdown**. Para observar el contenido crudo y editarlo, puedes dar doble clic sobre la celda o presionar `Enter`. Se ve muy similar, ¿cierto? 👀
#
# Para conocer más sobre cómo escribir utilizando **markdown**, te recomiendo revisar la [documentación oficial](https://www.markdownguide.org/). ¡Te aseguro que no te arrepentirás de aprender más sobre este lenguaje de marcado! 🎉
#
# Incluso puedes incluir matemáticas utilizando [LaTeX](https://www.latex-project.org/) que será renderizado por [MathJax](https://www.mathjax.org/). Por ejemplo:
# `$x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$` se convierte en $x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$
# + [markdown] id="auNMnauzAJ6G"
# ### Añadir, mover y remover celdas
#
# Puedes **añadir** celdas utilizando los botones `+ Código` y `+ Texto` que se muestran cuando te desplazas entre celdas. Estos botones también los puedes encontrar en la barra de herramientas situada en la parte superior izquierda de este notebook. También, si lo prefieres, puedes **agregar** celdas de código arriba o abajo de la celda actual utilizando `Command/Ctrl + M + A` o `Command/Ctrl + M + B`, respectivamente. 🙌
#
# Si deseas **mover** una celda posición, puedes dar clic sobre la celda deseada, aparecera una barra de herramientas en la esquina superior deracha de la celda y deberás dar clic en los botones de `Arriba`-`Abajo`, o a través de atajos de teclado, con las combinaciones `Command/Ctrl + M + K` y `Command/Ctrl + M + J`, respectivamente.
#
# Para **eliminar** una celda puedes dar clic sobre ella y sobre el la barra de herramientas que aparecera en la esquina superior deracha de la celda da clic en el **ícono** de `basura`. También, puedes utilizar el atajo de teclado `Command/Ctrl + M + D`.
# + [markdown] id="KWFwe9kAygIL"
# ### Variables
#
# Entre las características más destacables de las sesiones interactivas dentro de los notebooks es que las **variables** que definas en una celda pueden ser utilizadas después en otras:
# + id="cEh1WCr1ABbF"
mi_primera_variable = "tiene un contenido muy extenso"
mi_segunda_variable = "es muy interesante"
# + id="69EXBUlmAC1N" colab={"base_uri": "https://localhost:8080/"} outputId="20e7eb42-1544-4a84-d187-d1996e62accb"
print(f"¡Este curso {mi_primera_variable} y {mi_segunda_variable}!")
# + [markdown] id="6GDhIPCBJCvH"
# ### Uso de la línea de comandos (shell)
#
# En **Colab** y, en general, en cualquier sistema con base en **Jupyter Notebooks** o **Ipython** puedes ejecutar **comandos** de la **línea de comandos** (_shell_) en las celdas de los **notebooks** al poner un signo de exclamación o bang (`!`) al inicio del comando. 🙌
#
# Esto puede ser múy útil para muchas cosas, como **crear**, **mover** o **eliminar** archivos. También, podrás utilizarlo para **instalar** dependencias del sistema o librerías para Python. Prácticamente, podrás hacer **todo**. 👀
#
# Aquí te dejo el [Curso de Introducción a la Terminal y Línea de Comandos](https://platzi.com/clases/terminal/) de [Enrique Devars](https://twitter.com/codevars/) por si deseas profundizar más en el tema. 😉
# + colab={"base_uri": "https://localhost:8080/"} id="Xvdca_KwJdEh" outputId="6017f311-f840-4a61-d957-2a876c14cdd8"
# !echo "Cada línea es un comando y tiene su función:"
# !ls # Listar archivos
# !pwd # Directorio actual
# !cd .. && pwd # Moverte un directorio atrás.
# !echo "¡Lo estás haciendo increíble!"
# + [markdown] id="FOpU-pdhyfii"
# ### Instalación de librerías
#
# Las **librerías** son un conjunto de implementaciones funcionales en un lenguaje de programación. Instalar **librerías**, es decir, código de terceros, dentro de **Colab** es sencillo. 👇
#
# Por ejemplo, para este notebook es necesaria la libería **session_info**. Para instalarla necesitas utilizar el manejador de paquetes de python [pip](https://pypi.org/project/pip/). Ahora, para **instalar** las **dependencias** sólo necesitas **utilizar** el comando `pip install session-info` en la **línea de comandos**.
#
# Entonces, para instalar la librería dentro de un notebook necesitas escribir lo siguiente:
# + colab={"base_uri": "https://localhost:8080/"} id="B5GzDFdl2lTv" outputId="50e5377e-1300-433d-d716-59a9f8303fb4"
# !pip install session-info
# + [markdown] id="aZLWYxCqAGjt"
# ¡Listo, has instalado tus primeras librerías! 🙌
# + [markdown] id="3YFE4MiBFvr5"
# ### Subir archivos
#
# Puedes cargar **archivos** a **colab** dirigiéndote a `Archivos` en la barra lateral y usando el botón `Subir archivo`. Alternativamente, también puedes simplemente arrastrar y soltar estos archivos en la sección `Archivos` de la barra lateral.
#
# Para más información sobre formas de subir archivos a **Colab** te recomiendo su [documentación oficial](https://colab.research.google.com/notebooks/io.ipynb).
#
# 
# + [markdown] id="0hykjeSF0Ary"
# ## Ciencia de Datos
#
# Con **Colab** tendrás acceso a notebooks con todo el poder de Python para analizar y visualizar datos.
#
# Muchas de las **librerías** más comúnes para la ciencia de datos vienen **preinstaladas** por defecto. No obstante, podrás instalar más librerías o programas requeridos utilizando la sintaxis de ejecución de comandos en la terminal `!` descrita en en el bloque anterior.
#
# > Si bien, podrías trabajar con conseguir trabajar con distintas versiones de Python o inclusive otros lenguajes de programación en **Colab**, su fuerte es Python y el acceso a mayor poder de computo a través de GPUs o TPUs. Ténlo en cuenta al momento de decidir qué herramienta utilizar. 👀
#
# A continuación, puedes ver una gráfico generado con código de python utilizando **vega_datasets** para obtener los datos y **altair** para la visualización. 😋
# + colab={"base_uri": "https://localhost:8080/", "height": 368} id="CsC6ks09PBYJ" outputId="b4f82bf3-43ac-4b16-ab5a-c5fa4f468ef6"
from vega_datasets import data
import altair as alt
cars = data.cars()
(
alt.Chart(cars)
.mark_line()
.encode(
x="Year:T",
y="count(Year):Q",
color="Origin:N"
)
)
# + [markdown] id="EBBu4KQXPHeB"
# Además, **Colab** incluye una extensión que convierte los datos tabulares de **pandas** en pantallas interactivas que se pueden **filtrar**, **clasificar** y **explorar** de forma dinámica. 🤯
#
# En seguida hay un ejemplo de cómo se visualizaría la tabla utilizando **Data Tables**:
# + colab={"base_uri": "https://localhost:8080/", "height": 1454} id="OKQmQunAS2I7" outputId="00cdbbd2-9e16-4c68-af15-6699f4d066ee"
# %load_ext google.colab.data_table
cars
# + [markdown] id="vTTX8Q16begL"
# Si quieres desactivar la visualización de tablas con **Data Table** y utilizar la visualización nativa de **pandas**, usa lo siguiente:
# + colab={"base_uri": "https://localhost:8080/", "height": 660} id="J9jQNBhKbadH" outputId="0abe7bc2-4800-471e-b72e-f9fabde3c211"
# %unload_ext google.colab.data_table
cars
# + [markdown] id="Buxy04w40FWW"
# ## Atajos de teclado
#
# **Colab** tiene múltiples atajos de teclado para agilizar tu desarrollo de código dentro de él. Aprendértelos todos no es obligatorio, ni mucho menos necesario. Siéntete tranquilo. Siempre podrás consultarlos presionando `Command/Ctrl + P` y escribiendo _Mostrar combinaciones de teclas_ o simplemente _teclas_ (`Command/Ctrl + M + H`). 😋
#
# Personalmente, los únicos que te recomendaría memorizar son los comandos para **agregar**, **editar** y **ejecutar** bloques. ✨
#
# ### Comandos
#
# | Comando | Función |
# |---|---|
# | ⌘/Ctrl+O | Abrir notebook |
# | ⌘/Ctrl+Alt+N | Abrir celda de código temporal |
# | Tab | Activar o desactivar la ayuda de docstring de código |
# | ⌘/Ctrl+M L | Activar o desactivar números de línea |
# | ⌘/Ctrl+M D | Borrar celda o selección |
# | ⌘/Ctrl+M P | Celda anterior |
# | ⌘/Ctrl+M N | Celda siguiente |
# | ⌘/Ctrl+/ | Comentar la línea actual |
# | ⌘/Ctrl+M Y | Convertir a celda de código |
# | ⌘/Ctrl+M M | Convertir a celda de texto |
# | Esc | Desenfocar celda actual |
# | ⌘/Ctrl+M Z o ⌘/Ctrl+Shift+Z | Deshacer la última acción de las celdas |
# | Alt+Enter | Ejecutar la celda e insertar una nueva |
# | ⌘/Ctrl+Enter | Ejecutar la celda enfocada |
# | Shift+Enter | Ejecutar la celda y seleccionar la siguiente |
# | ⌘/Ctrl+S | Guardar notebook |
# | ⌘/Ctrl+M B | Insertar celda de código abajo |
# | ⌘/Ctrl+M A | Insertar celda de código arriba |
# | ⌘/Ctrl+M I | Interrumpir la ejecución |
# | ⌘/Ctrl+M H | Mostrar combinaciones de teclas |
# | ⌘/Ctrl+Shift+P | Mostrar paleta de comandos |
# | ⌘/Ctrl+M J | Mover las celdas seleccionadas hacia abajo |
# | ⌘/Ctrl+M K | Mover las celdas seleccionadas hacia arriba |
# + [markdown] id="VUnNsTmY1rJu"
# ## Información de sesión
#
# 👌 Es una buena práctica poner la información de sesión al final de cada notebook que realices. De esta forma, otras personas podrán ver qué versiones de librerías utilizaste para tu análisis. Además, si llegas a tener un problema, puedes compartir esta información con la comunidad para que sea más fácil replicar el error y te puedan ayudar.
#
# En el caso de **Colab**, podrías **invitarles** a colaborar para que te ayuden o podrías compartirles una invitación vía **email** o un **link** para que ellos ejecuten el proyecto en su máquina de Google. Tal como te he compartido este notebook. Para **compartir** tu notebook sólo tienes que **presionar** el botón `Compartir` situado en la parte superior derecha de la barra de navegación, ir a la sección `Obtener vínculo` y, finalmente, **escoger** el **modo** en el que deseas compartir tu notebook: **Lector**, **Comenterarista** o **Editor**. 😋
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="bU5C-lYx24wD" outputId="7a4278b1-977c-4369-e0ce-8c067c1aa6c7"
import session_info
session_info.show()
| Datos-Jupyter-Notebooks-Anaconda/introduccion_colab-2_b57bfddc-9484-4701-a638-64bcb1061ae6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## About Dataset
# Each row in the dataset represents a different major in college and contains information on gender diversity, employment rates, median salaries, and more. Here are some of the columns in the dataset:
#
# * **Rank** - Rank by median earnings (the dataset is ordered by this column).
# * **Major_code** - Major code.
# * **Major** - Major description.
# * **Major_category** - Category of major.
# * **Total** - Total number of people with major.
# * **Sample_size** - Sample size (unweighted) of full-time.
# * **Men** - Male graduates.
# * **Women** - Female graduates.
# * **ShareWomen** - Women as share of total.
# * **Employed** - Number employed.
# * **Median** - Median salary of full-time, year-round workers.
# * **Low_wage_jobs** - Number in low-wage service jobs.
# * **Full_time** - Number employed 35 hours or more.
# * **Part_time** - Number employed less than 35 hours.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Line magic to plot data inside this notebook only
# %matplotlib inline
recent_grads = pd.read_csv("recent-grads.csv")
recent_grads.iloc[0]
recent_grads.head()
recent_grads.tail()
# generate summary statistics for all of the numeric columns.
recent_grads.describe()
raw_data_counts = len(recent_grads)
raw_data_counts
recent_grads = recent_grads.dropna()
cleaned_data_counts = len(recent_grads)
cleaned_data_counts
# ### Plotting with pandas only
# Most of the plotting functionality in pandas is contained within the **DataFrame.plot()** method. When we call this method, we specify the data we want plotted as well as the type of plot. We use the kind parameter to specify the type of plot we want. We use x and y to specify the data we want on each axis. You can read about the different parameters in the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html).
# scatter plot for Sample_size and Median
recent_grads.plot(x = "Sample_size",\
y = "Median",\
kind = "scatter",\
title = "Sample_size vs. Median")
# scatter plot for Sample_size and Unemployment_rate
recent_grads.plot(x = "Sample_size",\
y = "Unemployment_rate",\
kind = "scatter",\
title = "Sample_size vs. Unemployment_rate")
# scatter plot for Full_time and Median
recent_grads.plot(x = "Full_time",\
y = "Median",\
kind = "scatter",\
title = "Full_time vs. Median")
# scatter plot for ShareWomen and Unemployment_rate
recent_grads.plot(x = "ShareWomen",\
y = "Unemployment_rate",\
kind = "scatter",\
title = "ShareWomen vs. Unemployment_rate")
# scatter plot for Men and Median
recent_grads.plot(x = "Men",\
y = "Median",\
kind = "scatter",\
title = "Men vs. Median")
# scatter plot for Women and Median
recent_grads.plot(x = "Women",\
y = "Median",\
kind = "scatter",\
title = "Women vs. Median")
# #### Visualising plots to a answer some question:
#
# * Do students in more popular majors make more money?
# - for this we will plot scatter plot between the **Total**(Total number of people with major) column and the **Median** column
# scatter plot for Total and Median
recent_grads.plot(x = "Total",\
y = "Median",\
kind = "scatter",\
title = "Total vs. Median",\
xlim = (-10000,250000))
# **Inference:** From the graph above it is not true that the students in more popular majors make more money
# * Do students that majored in subjects that were majority female make more money?
# - for this we will plot scatter plot between the **ShareWomen** (Women as share of total) column and the **Median** column
# scatter plot for ShareWomen and Median
recent_grads.plot(x = "ShareWomen",\
y = "Median",\
kind = "scatter",\
title = "ShareWomen vs. Median",\
xlim = (-0.5,1.5),\
ylim = (20000, 90000))
# **Inference:** The graph shows students that majored in subjects that were majority female make actually less money than the ones where majority is male.
# This actually tells us that men are given more salaries for the same work that women does.
# * Is there any link between the number of full-time employees and median salary?
# - Scatter plot for **Full_time** (Number employed 35 hours or more) and **Median**
# scatter plot for Full_time and Median
recent_grads.plot(x = "Full_time",\
y = "Median",\
kind = "scatter",\
title = "Full_time vs. Median",\
xlim = (-10000,200000))
# ### creating histograms
#
sns.set_style("white")
# Histogram for Sample_size
recent_grads["Sample_size"].hist()
# Histogram for Median
recent_grads["Median"].hist()
# Histogram for Employed
recent_grads["Employed"].hist()
# Histogram for ShareWomen
recent_grads["ShareWomen"].hist()
# Histogram for Unemployment_rate
recent_grads["Unemployment_rate"].hist()
# Histogram for Men
recent_grads["Men"].hist()
# Histogram for Women
recent_grads["Women"].hist()
# ### Analysis from above histograms :-
#
# 1. What percent of majors are predominantly male? Predominantly female?
# - From the histogram of sharewomen, it shows around 45% of majors are predominantly male and 55%are predominantly female.
#
# 2. What's the most common median salary range?
# - From the histogram plot for Median, it shows that the salary range between \$30,000 and \$40,000 is most common
# ## Scatter Matrix Plot
#
# In the last 2 steps, we created individual scatter plots to visualize potential relationships between columns and histograms to visualize the distributions of individual columns. A **scatter matrix plot** combines both scatter plots and histograms into one grid of plots and allows us to explore potential relationships and distributions simultaneously. A scatter matrix plot consists of n by n plots on a grid, where n is the number of columns, the plots on the diagonal are histograms, and the non-diagonal plots are scatter plots.
#
# Because scatter matrix plots are frequently used in the exploratory data analysis, pandas contains a function named **scatter_matrix()** that generates the plots for us. This function is part of the **pandas.plotting** module and needs to be imported separately.
from pandas.plotting import scatter_matrix
scatter_matrix(recent_grads[["Sample_size", "Median"]], figsize = (10,10))
# Creating a 3 by 3 scatter matrix plot
scatter_matrix(recent_grads[["Sample_size", "Median", "Unemployment_rate"]])
# ## Bar plots
# Using bar plots to compare the percentages of women (**ShareWomen**) from the first 10 rows and last 10 rows of a sorted dataframe.
recent_grads[0:10]["ShareWomen"].plot(kind = "bar", ylim = (0,1))
recent_grads[-10:]["ShareWomen"].plot(kind = "bar", ylim=(0,1))
# Use bar plots to compare the unemployment rate (**Unemployment_rate**) from the first 10 rows and last 10 rows of a sorted dataframe.
recent_grads[0:10]["Unemployment_rate"].plot(kind = "bar", ylim = (0, 0.16))
recent_grads[-10:]["Unemployment_rate"].plot(kind = "bar", ylim = (0, 0.16))
# ## Additional Analysis
# * Using a grouped bar plot to compare the number of men with the number of women in each category of majors.
import numpy as np
pivot_df = recent_grads.pivot_table(index = "Major_category",\
values = ["Men", "Women"],\
aggfunc = np.sum)
pivot_df
pivot_df.plot.bar()
fig = plt.figure(figsize = (10,10))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.boxplot(recent_grads["Median"])
ax1.set_ylim(20000, 80000)
ax2.boxplot(recent_grads["Unemployment_rate"])
plt.show()
# * Using hexagonal bin plot to visualise on Sample_size vs. Median
recent_grads.plot(x = "Sample_size", y = "Median", kind = "hexbin",\
ylim = (30000, 60000))
| Earnings_College_major.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import traceback
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
#sanity check to get price with cumsum from logged return.
np.random.seed(3)
mocked_ret = np.random.normal(loc=1e-4,scale=1e-2,size=1000)
mocked_price=(20*(1+np.cumsum(mocked_ret)))
computed_ret = np.diff(np.log(mocked_price))
computed_ret = np.insert(computed_ret,0,0,axis=0) # add 0 as first item
computed_price = 20*(1+np.cumsum(computed_ret))
plt.scatter(mocked_price,computed_price)
plt.grid(True)
plt.plot(mocked_price)
plt.plot(computed_price)
plt.grid(True)
print(len(mocked_price),len(computed_price))
# yfinance scribble
import yfinance as yf
msft = yf.Ticker("MSFT")
# get stock info
msft.info
# get historical market data
hist = msft.history(period="max")
hist.Close.plot()
| finance/unusualwhales/sanity_checks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # <span style='background:DarkOliveGreen'> The Trading Strategy using Moving Averages 9x21 🐍</span>
# <strong> Introduction </strong>
#
# The objective of this code is to be able to reduce the work of analysing a large number of stocks.
# The idea is that the investor already have selected a list of stocks he wants to analyse, and he will use this code to decide which ones to buy, to hold or to stop ("close the position").
# He can and should combine Fundamental Analysis and Technical Analysis.
#
# He should use Fundamental Analysis to generate the list of companies he would like to invest on - this part is not developed as a code in this Jupyter NoteBook (JNB), but is briefly discussed below.
#
# <strong> Fundamental Analysis </strong>
#
# The idea is to get a list of companies that are solid and could be interesting to hold for some time.
# In the beginning of the code, we provide a list of stocks traded in the Brazilian Market (called B3) based on the IDIV Index.
# This index gathers stocks that pay good dividends and have good liquidity. It's composition can be downloaded at B3 site:
# https://www.b3.com.br/pt_br/market-data-e-indices/indices/indices-de-segmentos-e-setoriais/indice-dividendos-idiv-composicao-da-carteira.htm
#
# Therefore, using the IDIV is a good way to select good companies and then we can use Technical Analysis to decide WHEN to: buy, hold or stop.
#
#
# <strong> Technical Analysis </strong>
#
# The list will be be tested by a classic Technical Indicator: the combination of two Moving Averages (MAs).
# In this code I use the MAs of 9 and 21 days, as it is a classic. But the code can be easily adapted for any other combination of MAs.
#
# <strong> Why using MAs? </strong>
#
# MAs are used to identify trends. The idea is that if the "fast" one (9 periods) crosses the "slow" one (21 periods) upwards, the stock is in a bullish trend, so we should buy it;
# if it crosses downwards, we should sell it (in this case we will use it as a STOP sign, we should "close the position" (that is, sell all the stocks we have).
# We could also open a short position on that specific stock, however we are not this risk-taker and, due to liquidity issues in the Brazilian market, that might be not a good idea.
#
#
# Hence, the idea is that this code should be run daily. </p>
# It will give you the list of stocks classified in 4 cases:
# 1. if MA9 crossed MA21 up on the previous day: buy
# 2. if MA9 is higher than MA21 but it was already higher on the day before: hold (if you already bought it before, otherwise wait)
# 3. if MA9 crossed MA21 down: sell (if you already bought it before, otherwise wait)
# obs: If we sell above the price we bought, it is considered "Take Profit" (TP), if we sell below the price we paid, it is considered "Stop Loss" (SL).
# 4. if MA9 is lower than MA21 and it was already lower on the day before: wait (you should not have a stock in this classification in your portfolio)
# (for more on this topic: https://www.daytradetheworld.com/trading-blog/alexander-elder-trade-market-beginners-2/#5_-_Create_a_money_Management_Plan)
#
# As we can see, the movement of crossing is used as signal for buying or selling.
# However, I think it should not be used alone, you should combine it with a few more technical indicators.
# I use this code to reduce the number of stocks I will analyse - I open a full Graph of the stocks in situation i) and check a few other indicators, as the RSI, for example (I like to use TradingView website for it). Then I decide to buy it or not.
#
# However, it is strongly advised to use iii) as a STOP signal!
#
# Stocks in situation ii) are more complicated - you can chose to use another criteria to make a partial TP. That could be a RSI > 70, for example.
# Stocks in situation iv): if you have time you can check on them, maybe one of them is "giving" a signal of reversing the tendency (for example: a hammer candle) that the criteria MA9x21 will not identify. However, if you buy a stock classified on "wait" it is a riskier trade and you should define another criteria for SL - as MA9 is already below MA21 this code will not give you a SL signal.
# importing libraries we'll need to get and treat data
import yfinance as yf
import pandas as pd
# creating the list of tickers: IDIV index as of 23/mar/22
idiv = ['ABCB4.SA', 'BRSR6.SA', 'BBSE3.SA', 'BBDC3.SA', 'BBDC4.SA', 'BRAP4.SA', 'BBAS3.SA', 'AGRO3.SA', 'CCRO3.SA', 'CMIG3.SA', 'CMIG4.SA', 'CESP6.SA', 'CSMG3.SA', 'CPLE3.SA', 'CPLE6.SA', 'CPFE3.SA', 'CYRE3.SA', 'DIRR3.SA', 'ELET3.SA', 'ELET6.SA', 'ENAT3.SA', 'ENBR3.SA', 'EGIE3.SA', 'ROMI3.SA', 'MYPK3.SA', 'ITSA4.SA', 'ITUB3.SA', 'ITUB4.SA', 'JHSF3.SA', 'MRVE3.SA', 'PSSA3.SA', 'QUAL3.SA', 'SAPR4.SA', 'SANB11.SA', 'CSNA3.SA', 'SYNE3.SA', 'TAEE11.SA', 'TGMA3.SA', 'VIVT3.SA', 'TRPL4.SA', 'UNIP6.SA', 'WIZS3.SA']
# getting the quotes for the list and saving it as a Pandas dataframe (df)
# we will get just the "Adj Close" because that is the one used by Technical Indicators / to build Graphs
dfidiv = yf.download(idiv, start="2022-01-01", end = "2022-03-22")["Adj Close"]
# checking the df
dfidiv.head()
# checking if we got all quotes for all the tickers on the list
print(len(idiv))
dfidiv.shape
# +
# ok! 42 tickers on the list and 45 columns on the df
# -
dfidiv.tail()
# deleting the null lines
dfidiv = dfidiv.dropna(axis = 0, how ='any')
# and visualizing the last lines
dfidiv.tail()
dfidiv.shape
# +
# so we cut 2 lines, before it was 56 lines...
# -
# we'll make a copy of this df in order to keep the original if we need to check anything...
# the copy will be used to calculate the MAs 9 and 21
dfidivb = dfidiv.copy()
dfidivb.head()
dfidivb.tail()
# In this step we will calculate de Mas 9 and 21 and their difference (MA09 minus MA21), and will
# replace the value of every cell with this difference (this will generate null values for the first 21 lines)
for (columnName) in dfidivb:
dfidivb[columnName] = (dfidivb[columnName].rolling(9).mean()) - (dfidivb[columnName].rolling(21).mean())
# having a look at this new df
dfidivb.head(25)
dfidivb.tail()
dfidivb.shape
# deleting the null lines
dfidivb = dfidivb.dropna(axis = 0, how ='any')
# and visualizing the df
dfidivb.shape
# +
# Creating the messages that will be displayed for the tickers (regarding the 4 cases explained in the introduction of this JNB)
# The messages will be based in the values in the last 2 lines of the df (if we call the day you are checking d, they will be d-1 and d-2)
# The expresison: "d-1 > 0" will be used to represent the value on d-1 is positive, etc.
# i) if d-1 > 0 and d-2 < 0 : BUY
# ii) if d-1 < 0 and d-2 <0 : STOP
# iii) if d-1 > 0 and d-2 > 0 : HOLD
# iv) if d-1 < 0 and d-2 < 0 : WAIT
# -
# creating functions for colored messages to be used
def prRed(skk): print("\033[91m {}\033[00m" .format(skk))
def prGreen(skk): print("\033[92m {}\033[00m" .format(skk))
def prYellow(skk): print("\033[93m {}\033[00m" .format(skk))
def prCyan(skk): print("\033[96m {}\033[00m" .format(skk))
def prLightGray(skk): print("\033[97m {}\033[00m" .format(skk))
# creating the function to classify the tickers and print the messages
def alertacolor (acao):
if (dfidivb.at[dfidivb.index[-1],acao] > 0) and (dfidivb.at[dfidivb.index[-2],acao] < 0):
prGreen(' Buy! : ) ')
elif (dfidivb.at[dfidivb.index[-1],acao] < 0) and (dfidivb.at[dfidivb.index[-2],acao] > 0):
prRed(' Stoooop!!')
elif (dfidivb.at[dfidivb.index[-1],acao] > 0) and (dfidivb.at[dfidivb.index[-2],acao] > 0):
prGreen(' Keep Calm and... Hold : )')
else:
prYellow(' Wait...')
# running this function for the list of tickers
for papel in idiv:
prCyan (papel+': ')
alertacolor(papel)
print(' ')
# <div class="alert-info">
# ok!
# </div>
# <div class="alert-warning">
# </br>
# <strong> References </strong>
# </div>
# + [markdown] tags=[]
# ###### Youtube Video (in Portuguese): "PYTHON PARA INVESTIMENTOS #4: Simulando uma CARTEIRA DE AÇÕES e comparando com o IBOVESPA"
# https://youtu.be/TiNLwmLN-iE?list=PLCAhGm8nJ9CBn51o0x3j1p1LuMRqpeqCy
# -
# https://www.geeksforgeeks.org/print-colors-python-terminal/
# ###### last update: 23.mar.22
# ###### to go back to my GitHub Page: https://rafsz.github.io/
| Python+Pandas21/MAs_9x21_23mar22_18h.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# We'll expand on our python data cleaning, analysis, and visualization skills by working with [Baltimore City Police Department data]() from the Baltimore City Open Data portal.
# ## import packages
# import packages to conduct data analysis and to make interactive charts and maps
# ## import data
# [Previously](https://colab.research.google.com/drive/14PJV4aPg01xX7-XnIUtrC6_GqgeriZj1) we imported data from a link to a csv uploaded to GitHub. Here, we'll upload data from a file on our computer. This might be useful if we don't have a GitHub profile or if we have a file that:
# - Is larger than 25 MB, which won't upload to GitHub
# - We don't want to convert to a CSV (e.g. part of already performed Excel analysis, etc.)
# - Is proprietary data/not our own data that we don't want to upload even to a private GitHub repository
#
#
#
# ### Importing a Data File from Your Local Machine
# Similar to saving the website link as a __variable__, here we'll save the file path name to our file as a variable. If we drag the data file into the same file as our jupyter lab notebook, we don't need to list the path file names outside of the folder we're working in, however, if our data file is in a different location on our machine--and if we don't want to move it--then we'll need to list the entire path name.
#
# We can get the path name for our file by:
# - __MacOS:__ CONTROL + click and while menu is up, press the OPTION key. The menu options should change, and you should see a menu option that says "Copy "filename.csv" as Pathname." Click this to copy the file pathname.
# - __Windows:__ SHIFT + right click, choose "Copy as Path"
# save file name as a variable with the file name in ""
# import data as a dataframe to manipulate
# preview the data
# ## Inspecting Data
# Now we can see what our data looks like, but we don't know much about what's happening within the dataset and what we can and can't potentially do with this. There are a few functions that we can use to gain some high-level insights into what our data has.
# ### General Big-Picture Counts and Stats
# +
# to look at the stats on the columns we have numbers in we can use df.describe() to give us
#the count, mean, maximum, minimum, standard deviation, and percentiles within those columns
# -
# we can also use df.info() to see the data types within columns
# this will be important if some of our "number" columns aren't actually "numbers"
# or if we need to convert columns to dates or times
# ## Data Cleaning
# There are a few types of __data types__ that we'll need to be concerned with in our analysis for this class:
#
# - __object or string (str)__: these are any combination of letters, numbers, and characters that are essentially an entity of data. We can manipulate these objects by splitting on specific characters, adding or subtracting, grouping by similarities, etc.
# - __integer (int)__: these are integers. We can perform any arithmetic function with these as long as the equation elements are also integers or...
# - __float (float)__: these are numbers that have a decimal in them. Similar to integers, we can perform any arithmetic functions on these values
# - __datetime (datetime)__: these are dates, times, or both dates and times. It's advantageous to convert actual date/time data into a datetime format so that we can perform arithmetic on these values (e.g. subtracting dates to get the number of days in between, adding times to get an end time, etc.)
# - __Boolean (bool)__: these are data types that are either True or False. We use boolean data types a lot in Python logic expressions
# Most of our numerical data in our BPD dataset is already classified as an int, but our date and time values are classified as "objects." We can convert our date and time columns into date/time values by "redefining" them.
# remove all non numerical values from ArrestDate and ArrestTime
# Next, we'll create a new column that combines the arrest date and time so that we can convert this to a datetime datatype and manipulate the data.
# combine arrest date and time columns
# preview the new column
# +
# convert ArrestDateTime column to datetime
# -
# now check to see that we converted our column to a datetime format
# preview data
# ## Data Manipulation
# Now that we have our data set up with the correct data types, we can start to dive deep and aggregate this information to better understand what's happening with the Baltimore City Police Department and the arrests over time.
# #### Arrest Year Column
# Let's look at how a few of these column values change each year. To make this easier, we'll create a new column for the _ArrestYear_ from the ArrestDate column, and then aggregate some variables by the year column.
# make a new column for the ArrestDate year
# We may also want to look at arrests by month or day of the week, so we'll add these columns as well
# +
# make columns to define the month and day of the week
#Monday = 0, Sunday = 6
# -
# check new columns
# #### How have the number of crimes in the daytime, evening, and night changed over the years?
# We'll define daytime, evening, and night as:
# - __morning__: 12 AM-8 AM
# - __daytime__: 8 AM - 4 PM
# - __night__: 4 PM-12 AM
#
# and categorize each arrest automatically by separating the time of day into equal bins
# first, make a new column that extracts the arrest time hour
# separate our ArrestHour column into bins
# +
# preview new column
# -
# Now, we want to create an aggregated table of the number of arrests per day "segment" for each year. Previously we did this by using the Python version of a pivot table. Here, we'll use the pandas.groupby function to similarly group our column values.
#
# The general formula for a pandas groupby function is:
#
# ```
# new_df = df.groupby("columns you want to aggregate")["columns you want as the values/what you will perform functions on"].agg([calculation_you_want_to_perform])
# ```
#
# Here, we'll aggregate/group the dataframe by arrest year __and__ day segment and count the number of arrests in each segment by using the arrest ID as the unique identifier to count values.
# +
# make an aggregated dataframe to look at the number of arrests in each day segment over the years available
# -
# preview new dataframe
# ## Data Visualizations
# Here, we'll make a few visualizations to look at this distribution in interactive charts with plotly express
#
# ```
# line_timeofday_arrests = px.line(df,
# x = "",
# y = "",
# color = "",
# hover_name = "",
# title = "",
# labels = {"": "", "": ""},
# )
# ```
# #### Line Graph to Compare Number of Arrests per year
# +
# line graph of number of arrrests in each time period,
# -
# view line graph
# export the visual to an html to share
line_timeofday_arrests.write_html("line_graph_bpd_arrest_dayperiod.html")
# #### Pie Chart to compare the distribution of arrests throughout the week
# make a pie chart to show distribution of arrests during the week
week_dist_arrests = px.pie(df_bpd,
values="",
names="")
# show pie chart
# #### Plotly Animation to show changes in number of arrests over years
# aggregate data to count the number of arrests per weekday per year
# preview new df
# look up the maximum value for number of arrests to add into the animation
# make animated bar chart to show changes in number of arrests during each week day over the years provided
# view animation
# export animation to html
animation_bar_arrests.write_html("weekday_arrest_trend_animation.html")
| 2020-02-27-plotly-animations-bpd-melanieshimano-STARTER.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
# # Automatically generate new Python files from Sportradar API info page
# ---
import time
import json
import re
from selenium import webdriver
# ## Start the Selenium browser (Chrome)
# Start a new instance of Chrome
url = "https://developer.sportradar.com/io-docs"
browser = webdriver.Chrome()
browser.get(url)
# ## Get the method names and descriptions for a given API
def getEndpointInfo():
"""Return a list with info on each endpoint listed on the Sportradar API info page.
The method will only return info on an API that is currently displayed on page.
"""
list_of_endpoints = browser.find_elements_by_class_name('endpointList')
for n,item in enumerate(list_of_endpoints):
eps = item.find_element_by_class_name('endpoint')
title = eps.find_element_by_class_name('title')
name = title.find_element_by_class_name('name')
if name.text != "":
print(name.text + '\n'+str(n))
# Expand all endpoint descriptions.
actions = title.find_element_by_class_name('actions')
expand = actions.find_element_by_class_name('expand-methods')
expand.click()
time.sleep(0.5)
# Scrape the GET method names, URIs, and descriptions
endpoints = []
method_list = eps.find_element_by_class_name("methods")
methods = method_list.find_elements_by_class_name('method')
for method in methods:
m = method.find_element_by_class_name('title')
# Get the name of and URI for the current endpoint
name = m.find_element_by_class_name('name').text
uri = m.find_element_by_class_name('uri').text
# Get the endpoint's description
h = method.find_element_by_class_name('hidden')
d = method.find_element_by_class_name('description')
p = d.find_element_by_tag_name('p')
description = p.text
# Get default values for the parameters for each method (for tests)
table_items = h.find_elements_by_class_name('type-pathReplace')
param_names, param_values = [], []
for t in table_items[:-1]:
param_name = t.find_element_by_class_name('name')
p_name = param_name.get_attribute('textContent').strip().split(':',1)[-1]
param_names.append(p_name)
if p_name in ['year', 'month', 'day']:
options = t.find_elements_by_tag_name('option')
for o in options:
if o.get_property('selected'):
param_values.append(o.get_property('value'))
else:
try:
options = t.find_elements_by_tag_name('option')
param_values.append(t.find_elements_by_tag_name('option')[-1].get_property('value'))
except:
param_values.append(t.find_elements_by_tag_name('input')[-1].get_property('value'))
default_params = dict(zip(param_names, param_values))
endpoints.append({'name':name, 'uri': uri,
'description': description, 'defaults': default_params})
print('-'*20)
print(default_params)
print('{n}: {u}\n{d}'.format(n=name, u=uri, d=description))
return endpoints
# ## Get information on all possible APIs provided by Sportradar
api_names_to_scrape = ['Beach Volleyball Trial', 'Darts Trial', 'eSports Dota 2 Trial', 'Global Basketball Trial', 'Global Ice Hockey Trial', 'Golf Trial', 'eSports LoL Trial', 'MLB v6.5 Trial', 'NASCAR Official Trial', 'NBA Official Trial', 'NFL Official Trial v2', 'NHL Official Trial', 'Rugby v2 API', 'Soccer EU Trial v3', 'Tennis v2 Trial', 'WNBA Trial']
# +
services = browser.find_element_by_class_name("services")
# Names of the APIs
api_selector = services.find_element_by_tag_name('h2')
selector = api_selector.find_element_by_tag_name('select')
options = selector.find_elements_by_tag_name('option')[1:]
# Descriptions of the APIs
api_elems = services.find_elements_by_class_name('apiDescriptionList')
# Extract the names and descriptions of the desired APIs
apis = []
for api, option in zip(api_elems, options):
name = option.text
if name in api_names_to_scrape:
description = api.get_attribute('textContent').strip()
print("-----------\n{n}: {d}".format(n=name, d=description))
option.click() # Select the API, changing the displayed methods
time.sleep(1)
# Extract the endpoint names and descriptions for the current API
endpoint_info = getEndpointInfo()
apis.append({'name': name, 'description': description, 'endpoints': endpoint_info})
# Save the API information
filename = 'api_names_and_endpoints'
with open(filename + '.json', 'w') as outfile:
json.dump(apis, outfile)
# -
# ## Load the saved API data
apis = json.load(open('api_names_and_endpoints.json', 'rb'))
# ## Generate new Python classes and files from API info
apis[0].keys()
[a['name'] for a in apis]
endpoints = apis[10]['endpoints']
endpoints[2]
uri = endpoints[2]['uri']
uri
# +
max_width = 79
def formatArgListAndPathFromURI(uri, format_for_tests=False):
uri = uri.replace('//','/') # Deal with a typo on Sportradar
regex = re.compile('(?!:format)(:[\w_]+)', re.VERBOSE)
# Identify the arguments to the URI, excluding :format
parameters = [s.strip(':') for s in regex.findall(uri) if s != '']
if format_for_tests:
arg_list = ''
for p in parameters:
arg_list += "self.{p}, ".format(p=p)
if parameters:
arg_list = arg_list.strip(', ')
else:
arg_list = ", ".join(parameters)
# Create the path string, allowing user to substitute in arguments
path = (regex.sub('{\g<1>}', uri).split(':format')[0] + '"').replace('{:','{')
print(path)
# Make sure dates are formatted properly
if 'year' in parameters:
path = path.replace('{year}', '{year:4d}')
if 'month' in parameters:
path = path.replace('{month}', '{month:02d}')
if 'day' in parameters:
path = path.replace('{day}', '{day:02d}')
# Append .format() to the end of the path string
format_suffix = ""
for p in parameters:
format_suffix += "{arg}={val}, ".format(arg=p, val=p)
format_suffix = ".format({})".format(format_suffix.strip(', '))
path += format_suffix
# path = path.split('/',3)[-1]
# Comply to max width of lines
if len(path) > max_width-8:
regex = re.compile(r'\.format\(', re.VERBOSE)
path = regex.sub(r'.format(\n\t\t\t', path)
path = '"' + path
return arg_list, path
def formatPathFromURI(uri):
parameters = [s.strip(':') for s in re.findall(r'(:[\w_]+)*', uri) if s != ''][:-1]
def formatDocString(doc, mw=79):
if len(doc) > mw-7:
idx_cut = (mw-len(doc[mw::-1].split(' ')[0])) # kludge
doc = doc[:idx_cut] + '\n\t\t\t' + formatDocString(doc[idx_cut:].strip(' '), mw)
return doc
def paramValsAreComplete(endpoint):
for val in endpoint['defaults'].values():
if val == '':
return False
return True
def formatDefaultParamVals(endpoints):
assert type(endpoints)==list, 'Must provide a list of endpoints'
varlist = ''
used_vars = []
for ep in endpoints:
defaults = ep['defaults']
for var,val in defaults.items():
if var not in used_vars and val != '':
if var in ['year', 'month', 'day']:
varlist += '\t\tcls.{var} = {val}\n'.format(var=var, val=int(val))
else:
varlist += '\t\tcls.{var} = "{val}"\n'.format(var=var, val=val)
used_vars.append(var)
return varlist
def formatClassName(name):
name = name.replace(' Trial','').replace('Official','')
name = name.replace(' ','').replace('.','_').replace('(','')
name = name.replace(')','').replace('-','_').replace(',','_')
return name
def writeToPythonFile(api_info):
"""Write methods based on the endpoint attributes
:param api_info: (dict) Contains API name, description and list of endpoints
"""
# --------------------------------------------------------
# Write the methods
# --------------------------------------------------------
# Class name
class_name = formatClassName(api_info['name'])
regex = re.compile(r'v[\d_]+', re.IGNORECASE)
class_name = regex.sub('', class_name)
# Add comment heading and import statement
txt = "# Sportradar APIs\n# Copyright 2018 <NAME>\n# See LICENSE for details.\n\n"
txt += "from sportradar.api import API\n\n\n"
# Add class name to file
txt += "class {}(API):\n\n".format(class_name)
# Add __init__ function to class
txt += "\tdef __init__(self, api_key, format_='json', timeout=5, sleep_time=1.5):\n"
txt += "\t\tsuper().__init__(api_key, format_, timeout, sleep_time)\n\n"
txt = txt.replace('\t', 4*' ')
# Write the method, including arguments, doc string, and URI path
endpoints = api_info['endpoints']
for n,ep in enumerate(endpoints):
mn = 'get_' + formatClassName(ep['name']).lower()
doc = formatDocString(ep['description'], max_width)
doc = doc + '"""' if len(doc) < max_width-8 else doc + '\n\t\t"""'
args, path = formatArgListAndPathFromURI(ep['uri'])
arglist = 'self, {a}'.format(a=args) if args != '' else 'self'
# Assemble the method string
txt += '\tdef {method_name}({args}):\n'.format(method_name=mn, args=arglist)
txt += '\t\t"""{doc}\n\t\tpath = {path}\n'.format(doc=doc, path=path)
txt += '\t\treturn self._make_request(path)\n\n'
txt = txt.replace('\t', 4*' ')
print('-'*20)
print(api_info['name'])
# print(path)
# print(txt)
# Save to Python file
filename = class_name + '.py'
with open(filename, 'w+') as pyfile:
pyfile.write(txt)
# --------------------------------------------------------
# Write the tests
# --------------------------------------------------------
# Write the test front matter
txt = "import os\nimport unittest\nfrom sportradar import {}\n\n".format(class_name)
txt += '# Import API keys from environment variables\n'
txt += 'api_key_name = "SPORTRADAR_API_KEY_{}"\n'.format(class_name.upper())
txt += 'api_key = os.environ.get(api_key_name, None)\n'
txt += 'assert api_key is not None, "Must declare environment variable: {key_name}".format(\n'
txt += '\tkey_name=api_key_name)\n'
txt += 'api = {}.{}(api_key, format_="json", timeout=5, sleep_time=1.5)\n\n\n'.format(
class_name, class_name)
txt += 'class TestAPI(unittest.TestCase):\n\n'
txt += ' @classmethod\n def setUpClass(cls):\n'
txt += '\t\tprint("\\n---------------------\\nSetting up {} tests...\\n".format("{}"))\n'.format("{}", class_name)
txt += '\t\tcls.auth = api_key\n\t\tcls.api = api\n'
txt += '{defaults}\n'.format(defaults=formatDefaultParamVals(endpoints))
# Write the test methods
# Write the method, including arguments, doc string, and URI path
for n,ep in enumerate(endpoints):
if paramValsAreComplete(ep):
mn = 'test_get_' + formatClassName(ep['name']).lower()
doc = '"""Test the {} GET query"""'.format(ep['name'].lower())
args, path = formatArgListAndPathFromURI(ep['uri'], format_for_tests=True)
arglist = '{a}'.format(a=args) if args != '' else ''
print(arglist)
# Assemble the method string
txt += '\tdef {method_name}(self):\n\t\t{doc}\n\t\tmsg = "Response status is not 200"\n'.format(
method_name=mn, doc=doc)
txt += '\t\tresponse = self.api.{}({})\n'.format(
mn.split('test_')[1], arglist.split('self, ', 1)[-1])
if n == len(endpoints)-1:
txt += '\t\tself.assertEqual(response.status_code, 200, msg)\n'
else:
txt += '\t\tself.assertEqual(response.status_code, 200, msg)\n\n'
txt = txt.replace('\t', 4*' ')
# Save to Python file
filename = 'test_' + class_name + '.py'
with open(filename, 'w+') as pyfile:
pyfile.write(txt)
# -
# api_names_to_scrape = ['Beach Volleyball Trial', 'Darts Trial', 'Golf Trial', 'MLB v6.5 Trial', 'NASCAR Official Trial', 'NBA Official Trial', 'NFL Official Trial v2', 'NHL Official Trial', 'Tennis v2 Trial', 'WNBA Trial']
api_names_to_scrape = ['eSports Dota 2 Trial']
for api in apis:
if api['name'] in api_names_to_scrape:
writeToPythonFile(api)
# +
| assets/code/scrapeSportradarEndpoints.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.0
# language: julia
# name: julia-1.3
# ---
# +
using DifferentialEquations, Plots
g = 9.79 # Gravitational constants
L = 1.00 # Length of the pendulum
#Initial Conditions
u₀ = [0, π / 60] # Initial speed and initial angle
tspan = (0.0, 6.3) # time domain
#Define the problem
function simplependulum(du, u, p, t)
θ = u[1]
dθ = u[2]
du[1] = dθ
du[2] = -(g/L)*θ
end
#Pass to solvers
prob = ODEProblem(simplependulum, u₀, tspan)
sol = solve(prob, Tsit5(), reltol = 1e-6)
# -
# Analytic solution
# +
u = u₀[2] .* cos.(sqrt(g / L) .* sol.t)
plot(sol.t, getindex.(sol.u, 2), label = "Numerical")
plot!(sol.t, u, label = "Analytic")
# -
# [Numbers with Uncertainties](http://tutorials.juliadiffeq.org/html/type_handling/02-uncertainties.html)
# +
using Measurements
g = 9.79 ± 0.02; # Gravitational constants
L = 1.00 ± 0.01; # Length of the pendulum
#Initial Conditions
u₀ = [0 ± 0, π / 60 ± 0.01] # Initial speed and initial angle
#Define the problem
function simplependulum(du, u, p, t)
θ = u[1]
dθ = u[2]
du[1] = dθ
du[2] = -(g/L)*θ
end
#Pass to solvers
prob = ODEProblem(simplependulum, u₀, tspan)
sol = solve(prob, Tsit5(), reltol = 1e-6);
nothing # hide
# -
# Analytic solution
# +
u = u₀[2] .* cos.(sqrt(g / L) .* sol.t)
plot(sol.t, getindex.(sol.u, 2), label = "Numerical")
plot!(sol.t, u, label = "Analytic")
# -
# *This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
| notebooks/01.Measurements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sacred import Experiment
import tensorflow as tf
import threading
import numpy as np
import os
import Datasets
from Input import Input as Input
from Input import batchgenerators as batchgen
import Models.WGAN_Critic
import Models.Unet
import Utils
import cPickle as pickle
import Test
# -
import pickle
dsd_train, dsd_test = Datasets.getDSDFilelist("DSD100.xml")
dataset = dict()
dataset["train_sup"] = dsd_train # 50 training tracks from DSD100 as supervised dataset
dataset["train_unsup"] = [] # Initialise unsupervised dateaset structure (fill up later)
dataset["valid"] = [dsd_test[0][:25], dsd_test[1][:25], dsd_test[2][:25]] # Validation and test contains 25 songs of DSD each, plus more (added later)
dataset["test"] = [dsd_test[0][25:], dsd_test[1][25:], dsd_test[2][25:]]
#Zip up all paired dataset partitions so we have (mixture, accompaniment, drums) tuples
dataset["train_sup"] = zip(dataset["train_sup"][0], dataset["train_sup"][1], dataset["train_sup"][2])
dataset["valid"] = zip(dataset["valid"][0], dataset["valid"][1], dataset["valid"][2])
dataset["test"] = zip(dataset["test"][0], dataset["test"][1], dataset["test"][2])
from Sample import Sample
import glob
import os.path
dataset['train_unsup'] = [] #list of tuples of sample objects (mix, acc, drums)
unsup_mix = []
for i in range(1, 40):
unsup_mix.append(Sample.from_path('/home/ubuntu/AAS/data/unsup/mix/' + str(i) + '.mp3'))
#load drums
unsup_drums = []
for i in range(1, 178):
unsup_drums.append(Sample.from_path('/home/ubuntu/AAS/data/unsup/drums/' + str(i) + '.wav'))
import librosa
def add_audio(audio_list, path_postfix):
'''
Reads in a list of audio files, sums their signals, and saves them in new audio file which is named after the first audio file plus a given postfix string
:param audio_list: List of audio file paths
:param path_postfix: Name to append to the first given audio file path in audio_list which is then used as save destination
:return: Audio file path where the sum signal was saved
'''
save_path = audio_list[0] + "_" + path_postfix + ".wav"
if not os.path.exists(save_path):
for idx, instrument in enumerate(audio_list):
instrument_audio, sr = librosa.load(instrument, sr=None)
if idx == 0:
audio = instrument_audio
else:
audio += instrument_audio
if np.min(audio) < -1.0 or np.max(audio) > 1.0:
print("WARNING: Mixing tracks together caused the result to have sample values outside of [-1,1]. Clipping those values")
audio = np.minimum(np.maximum(audio, -1.0), 1.0)
librosa.output.write_wav(save_path, audio, sr)
return save_path
unsup_acc = []
#TODO: wrap this in a loop, check for num of folders in unsup/acc, check for num of files for inner loop
path = '/home/ubuntu/AAS/data/unsup/acc/5/'
audio_list = []
for i in range(1,6):
audio_list.append(path + str(i) + '.wav') #TODO: check for filetype
summed_path = add_audio(audio_list, "a")
unsup_acc.append(Sample.from_path(summed_path))
unsup_acc.append(Sample.from_path('/home/ubuntu/AAS/data/unsup/acc/3/1.mp3'))
unsup_acc
dataset['train_unsup'] = [unsup_mix, unsup_acc, unsup_drums]
len(dataset)
dataset['train_unsup'][1][2].path
with open('dataset.pkl', 'wb') as file:
pickle.dump(dataset, file)
print("Created dataset structure")
# unsup_drums = unsup_mix[39:]
# unsup_mix = unsup_mix[:39]
unsup_drums[-1].path
# +
#replaced
# root = '/home/ubuntu/AAS'
# if dataset == 'train_unsup':
# mix_list = glob.glob(root+dataset+'/*.wav')
# voice_list = list()
# else:
# mix_list = glob.glob(root+dataset+'/Mixed/*.wav')
# voice_list = glob.glob(root+dataset+'/Drums/*.wav')
# mix = list()
# voice = list()
# for item in mix_list:
# mix.append(Sample.from_path(item))
# for item in voice_list:
# voice.append(Sample.from_path(item))
# return mix, voice
# -
for i in range(len(dataset['test'])):
for samp in dataset['test'][i]:
samp.path = '/home/ubuntu/AAS/' + samp.path
for tup in dataset['train_sup']:
for samp in tup[0]:
print(samp.path)
dataset['test'][0][0].path
from pysndfile import formatinfo, sndfile
from pysndfile import supported_format, supported_endianness, supported_encoding, PyaudioException, PyaudioIOError
with open('dataset.pkl', 'r') as file:
dataset = pickle.load(file)
print("Loaded dataset from pickle!")
| Untitled.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Overfitting & Regularization
# State-of-the-art neural networks used in deep learning typically come with millions of weights. Unsurprisingly, it is therefore rarely an issue to push the training error to 0. In particular, without any regularization there is instant death through overfitting. In today's lecture, we discuss possible avenues for combatting overfitting via regularization methods tailored for DL-applications.
# ## Data Augmentation
# The cleanest and most effective way to avoid overfitting is to get more labelled data. For instance, searching for *white mug* yields the following results. What could be the problem when training on this data set?
# <img src="images/mug.png" alt="Images for mugs on google" style="width: 1000px;"/>
#
# Unfortunately though, getting more labelled data also happens to be the most expensive option and can in many cases even be infeasible. What you can do is to artificially inflate the labelled data at your disposal. This is known as **data augmentation**.
# More precisely, data augmentation consists of taking samples from the training set and applying small random geometric perturbations. This could be rotations, shifts, flipping, color changes, etc.
library(keras)
options(repr.plot.width=10, repr.plot.height=5)
# +
mnist <- dataset_mnist()
c(X_train, y_train) %<-% mnist$train
# -
plot(as.raster(X_train[3,,] / 255))
# +
gen <- image_data_generator(rotation_range = 90,
horizontal_shift=2)
flow <- flow_images_from_data(array_reshape(X_train[1:5,,], c(5, 28, 28, 1)),
generator = gen)
augm_images <- reticulate::iter_next(flow)
plot(as.raster(augm_images[1,,,1] / 255))
# -
# +
mnist <- dataset_mnist()
c(X_train, y_train) %<-% mnist$train
# -
# Let's visualize a sample of the digits.
ndigits <- 5
par(mfrow=c(1, ndigits))
par(mar = rep(0, ndigits - 1))
for(i in 1:ndigits)
plot(as.raster(X_train[i,,] / 255))
# Then, we augment the data via an ``ImageDataGenerator``.
# +
datagen = image_data_generator(rotation_range=45)
flow <- datagen$flow(array_reshape(X_train[1:5,,], c(ndigits, 28, 28, 1)),
shuffle = F)
augm <- reticulate::iter_next(flow)
par(mfrow=c(1, ndigits))
par(mar = rep(0, ndigits - 1))
for(i in 1:ndigits)
plot(as.raster(augm[i,,,]/255))
# -
# Here is an example from the [keras blog](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)
# on what data augmentation may look like on cat images.
# <img src="images/augment.png" style="width: 1000px;"/>
# https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
# ## $L_1$ & $L_2$ Regularization
# As in classical statistics, we can also regularize the model by penalizing large weights in a suitable norm. For $L_2$-norm, this corresponds to [ridge regression](https://en.wikipedia.org/wiki/Tikhonov_regularization), whereas $L_1$-norm yields [Lasso](https://en.wikipedia.org/wiki/Lasso_(statistics%29).
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/L1_and_L2_balls.svg/800px-L1_and_L2_balls.svg.png" style="width: 1000px;"/>
# By Nicoguaro - Own work, CC BY 4.0, https://commons.wikimedia.org/w/index.php?curid=58258966
model <- keras_model_sequential()
model %>%
layer_dense(65, input_shape=c(10), kernel_regularizer=regularizer_l2(1e-2))
model
# In contrast to classical statistics, regularizing weights is only moderately effective in deep learning. On a second thought, this is not surprising, since the role of weights in deep nets is in stark contrast to the one in classical statistics. Deep nets are overparametrized on purpose -- we do not aim to reduce the weights to a small number of interpretable coefficients.
# ## Dropout
# Dropout is a regularization technique specifically devised for deep learning. It was introduced in 2014 by [<NAME>, <NAME>, <NAME>, <NAME> & <NAME>](http://jmlr.org/papers/v15/srivastava14a.html).
# Deep nets tend to fit the training data ridiculously well by creating highly elaborate interdepencies between different activation patterns. When seeing a new image, these highly elaborate interdependencies break down and the model is lost.
# **Dropout** prevents the development of intricate dependencies by randomly resetting outputs of groups of neurons to 0 during training. During production all weights are used, but are rescaled by the dropout retention probability to account the difference to the training setting.
# This is motivated from sexual reproduction in biology. This principle leads to a random mixing of genes, which ensures robustness in the face of changing environments. Make sure to watch https://www.youtube.com/watch?v=DleXA5ADG78 for details.
# <img src="images/dropout.png" alt="Dropout" style="width: 700px;"/>
# Srivastava, Nitish, et al. [Dropout: a simple way to prevent neural networks from
# overfitting](http://jmlr.org/papers/v15/srivastava14a.html), JMLR 2014
# This scaling rule can be established rigorously for basic architectures, see [Chapter 7.12 of the deep learning book](http://www.deeplearningbook.org/contents/regularization.html). It is a cousin of the *bagging* idea which lies at the basis of random forests.
# The Keras documentation presents an example on how to use Dropout layers https://keras.io/getting-started/sequential-model-guide/.
model <- keras_model_sequential()
model %>%
layer_dense(64, input_shape=c(20), activation='relu') %>%
layer_dropout(.5) %>%
layer_dense(64, activation='relu') %>%
layer_dropout(.5) %>%
layer_dense(1, activation='sigmoid')
model
# ## Batch Normalization
# **Batch normalization** is the most modern and powerful regularization method for deep nets. It was developed by [<NAME> & <NAME>](https://arxiv.org/abs/1502.03167) in 2015 and is based on a simple, yet universally applicable paradigm: Standardize your inputs!
# In backpropagation all layers are trained simultaneously. In particular, the input for higher layers is unstable for a long time, since it comes from lower hidden layers that are themselves subject to the training process. That is, we experience an **internal covariate shift**.
# The most immediate approach is to standardize the inputs before activations are computed. However, this has to be done in a way that is compatible with backpropagation, as the following [example](https://arxiv.org/abs/1502.03167) shows:
# Suppose that a neuron receives an input from a lower layer, where a bias $b$ was added to the corresponding output $u$, i.e., $x = u + b$. Moreover, suppose we have training data $\{u_1, \ldots, u_N\}$ resulting in $\{x_1, \ldots, x_N\}$ after adding the bias. Naïve normalization would replace the input $u + b$ of a neuron by
# $$ u + b - \frac1N \sum_{i \le N} (u_i + b) = u - \frac1N \sum_{i \le N}{u_i}$$
# Then, after a gradient step, the bias would be updated as $b + \Delta b$.
# This causes a peculiar behavior: Since we subtract again the mean value with the new bias, the change of $b$ by $\Delta b$ did not have any effect on the output of the layer and the bias $b$ would escape to infinity.
# Batch normalization avoids this pitfall by normalizing the data within every mini-batch and provide additional scaling and bias parameter $\gamma$ and $\beta$ that are part of the backpropagation:
# <img src="images/batchnorm.PNG" alt="Drawing" style="width: 500px;"/>
# <NAME> & <NAME>: [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167).
# We stress that the normalization is done *separately over each input* to a neuron. That means, for a neuron with input $\{x^{(1)}, \ldots x^{(d)}\}$ batch normalization introduces parameters $\gamma^{(1)}, \ldots, \gamma^{(d)}$ and $\beta^{(1)}, \ldots, \beta^{(d)}$.
# In order to perform backpropagation, it is pivotal that all operations in batch normalization are differentiable. We verify this here for the derivatives with respect to the new parameters $\gamma$ and $\beta$:
# $$\frac{\partial \ell}{\partial \gamma} = \sum_{i \le m} \frac{\partial \ell}{\partial y_i} \widehat{x_i}\quad\text{ and }\quad\frac{\partial \ell}{\partial \beta} = \sum_{i \le m} \frac{\partial \ell}{\partial y_i}.$$
#
# During production the sample mean and sample variance over the mini-batches are replaced by the mean and variance over the entire training set.
# Batch normalization offers two crucial benefits for training deep nets:
#
# 1. The normalization stabilizes the training process, thereby allowing for higher learning rates. Hence, we can train faster.
# 2. There is less need for other regularization methods, because computing means and variances separately on each mini-batch introduces randomness.
# Here is an example of how to add batch normalization in Keras. Note that there is [no clear consensus](https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md) whether batch normalization should be applied before or after the activation function.
model <- keras_model_sequential()
model %>%
layer_dense(64, input_shape=c(20), use_bias=F) %>%
layer_batch_normalization() %>%
layer_activation_relu() %>%
layer_dense(64, activation='relu') %>%
layer_dense(1, activation='sigmoid')
# # Embeddings
# So far, we have explored how to apply deep learning to continuous input data. However, also for problems involving categorical input data, deep learning has become an indispensable tool in the context of generating **embeddings**.
# When working with categorical data, embeddings associate with each discrete category a vector in a euclidean vector space $\mathbb R^d$ for a suitable dimension $d$. The most primitive form is **one-hot embedding**, where $d$ equals the number of categories and the $i$th discrete outcome is mapped to the $i$th standard unit vector.
# The idea is to apply neural networks to learn embeddings that are defined in lower dimension and where semantically close concepts are also close in Euclidean distance. We illustrate this idea first in the context of natural language processing at the hand of **Word2Vec** introduced by [<NAME>, <NAME>, <NAME> & <NAME>](https://arxiv.org/abs/1301.3781) in 2013. Then, in the problem classes, we apply embeddings to recommend movies.
# ### Word2Vec
# Before we move to the details on how to generate word embeddings. We look at the beautiful visualization provided at http://projector.tensorflow.org/.
# The goal of a versatile word-vector representation is to map semantically related words to nearby points in euclidean space. In particular, we would like to be able to perform **word-vector arithmetics** such as
# $$\mathsf{king} - \mathsf{man} = \mathsf{queen} - \mathsf{woman}$$
# The idea of Word2Vec rests on the observation that semantically similar words should occur in similar contexts. Therefore, we translate the embedding problem into a supervised learning setting with the aim of inferring the word from a context missing one word. This is nicely illustrated in the [tensorflow tutorial on word embeddings](https://www.tensorflow.org/tutorials/representation/word2vec).
# <img src="images/skipgram.png" alt="skipgram" style="width: 700px;"/>
#
#
# https://www.tensorflow.org/tutorials/representation/word2vec
# For instance, considering the example from the [tutorial above](https://www.tensorflow.org/tutorials/representation/word2vec), from the sentence
#
# ``the quick brown fox jumped over the lazy dog``
#
# we could extract the training pairs
#
# ``(quick, the), (quick, brown), (brown, quick), (brown, fox), ...``
# To the $k$th word in a vocabulary, we associate two vectors $v_k, v_k' \in \mathbb{R}^d$ corresponding to the situations where the word appears in the context and in the missing position, respectively. Given that the $k$th word is in the context, we use the scalar product $v_\ell' v_k^\top$ as a feature for logistic regression. To these positive training examples we add negative training examples that are sampled at random.
# We will meet word embeddings again, when discussing [recurrent neural networks](./rnn.ipynb).
# ### Embeddings outside NLP
# Word2Vec is one of the most glamorous success stories of embeddings. However, this does not imply that the use of embeddings is restricted to the domain of NLP -- on the contrary, whenever dealing with a large amount of categorical data, embeddings are a powerful tool to exploit hidden correlations. For instance, this also applies to movie recommendation.
# In another context, in a cooperation with [Google Cloud](https://cloud.google.com), the insurance company [Axa](https://www.axa.com/) developed an MLP for insurance pricing:
# https://cloud.google.com/blog/products/gcp/using-machine-learning-for-insurance-pricing-optimization.
# <img src="images/axa.PNG" alt="MLP for insurance" style="width: 1000px;"/>
#
#
# https://cloud.google.com/blog/products/gcp/using-machine-learning-for-insurance-pricing-optimization
| notebooks/reg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
medicare = pd.read_csv("/netapp2/home/se197/data/CMS/Data/medicare.csv")
# +
train_set = medicare[medicare.Hospital != 'BWH'] # MGH; n = 204014
validation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither; n = 115726
import numpy as np
fifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50)
train_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
train_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
validation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
validation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
# +
predictor_variable = [
'Co_CAD_R0', 'Co_Embolism_R0', 'Co_DVT_R0', 'Co_PE_R0', 'Co_AFib_R0',
'Co_Hypertension_R0', 'Co_Hyperlipidemia_R0', 'Co_Atherosclerosis_R0',
'Co_HF_R0', 'Co_HemoStroke_R0', 'Co_IscheStroke_R0', 'Co_OthStroke_R0',
'Co_TIA_R0', 'Co_COPD_R0', 'Co_Asthma_R0', 'Co_Pneumonia_R0', 'Co_Alcoholabuse_R0',
'Co_Drugabuse_R0', 'Co_Epilepsy_R0', 'Co_Cancer_R0', 'Co_MorbidObesity_R0',
'Co_Dementia_R0', 'Co_Depression_R0', 'Co_Bipolar_R0', 'Co_Psychosis_R0',
'Co_Personalitydisorder_R0', 'Co_Adjustmentdisorder_R0', 'Co_Anxiety_R0',
'Co_Generalizedanxiety_R0', 'Co_OldMI_R0', 'Co_AcuteMI_R0', 'Co_PUD_R0',
'Co_UpperGIbleed_R0', 'Co_LowerGIbleed_R0', 'Co_Urogenitalbleed_R0',
'Co_Othbleed_R0', 'Co_PVD_R0', 'Co_LiverDisease_R0', 'Co_MRI_R0',
'Co_ESRD_R0', 'Co_Obesity_R0', 'Co_Sepsis_R0', 'Co_Osteoarthritis_R0',
'Co_RA_R0', 'Co_NeuroPain_R0', 'Co_NeckPain_R0', 'Co_OthArthritis_R0',
'Co_Osteoporosis_R0', 'Co_Fibromyalgia_R0', 'Co_Migraine_R0', 'Co_Headache_R0',
'Co_OthPain_R0', 'Co_GeneralizedPain_R0', 'Co_PainDisorder_R0',
'Co_Falls_R0', 'Co_CoagulationDisorder_R0', 'Co_WhiteBloodCell_R0', 'Co_Parkinson_R0',
'Co_Anemia_R0', 'Co_UrinaryIncontinence_R0', 'Co_DecubitusUlcer_R0',
'Co_Oxygen_R0', 'Co_Mammography_R0', 'Co_PapTest_R0', 'Co_PSATest_R0',
'Co_Colonoscopy_R0', 'Co_FecalOccultTest_R0', 'Co_FluShot_R0', 'Co_PneumococcalVaccine_R0', 'Co_RenalDysfunction_R0', 'Co_Valvular_R0', 'Co_Hosp_Prior30Days_R0',
'Co_RX_Antibiotic_R0', 'Co_RX_Corticosteroid_R0', 'Co_RX_Aspirin_R0', 'Co_RX_Dipyridamole_R0',
'Co_RX_Clopidogrel_R0', 'Co_RX_Prasugrel_R0', 'Co_RX_Cilostazol_R0', 'Co_RX_Ticlopidine_R0',
'Co_RX_Ticagrelor_R0', 'Co_RX_OthAntiplatelet_R0', 'Co_RX_NSAIDs_R0',
'Co_RX_Opioid_R0', 'Co_RX_Antidepressant_R0', 'Co_RX_AAntipsychotic_R0', 'Co_RX_TAntipsychotic_R0',
'Co_RX_Anticonvulsant_R0', 'Co_RX_PPI_R0', 'Co_RX_H2Receptor_R0', 'Co_RX_OthGastro_R0',
'Co_RX_ACE_R0', 'Co_RX_ARB_R0', 'Co_RX_BBlocker_R0', 'Co_RX_CCB_R0', 'Co_RX_Thiazide_R0',
'Co_RX_Loop_R0', 'Co_RX_Potassium_R0', 'Co_RX_Nitrates_R0', 'Co_RX_Aliskiren_R0',
'Co_RX_OthAntihypertensive_R0', 'Co_RX_Antiarrhythmic_R0', 'Co_RX_OthAnticoagulant_R0',
'Co_RX_Insulin_R0', 'Co_RX_Noninsulin_R0', 'Co_RX_Digoxin_R0', 'Co_RX_Statin_R0',
'Co_RX_Lipid_R0', 'Co_RX_Lithium_R0', 'Co_RX_Benzo_R0', 'Co_RX_ZDrugs_R0',
'Co_RX_OthAnxiolytic_R0', 'Co_RX_Barbiturate_R0', 'Co_RX_Dementia_R0', 'Co_RX_Hormone_R0',
'Co_RX_Osteoporosis_R0', 'Co_N_Drugs_R0', 'Co_N_Hosp_R0', 'Co_Total_HospLOS_R0',
'Co_N_MDVisit_R0', 'Co_RX_AnyAspirin_R0', 'Co_RX_AspirinMono_R0', 'Co_RX_ClopidogrelMono_R0',
'Co_RX_AspirinClopidogrel_R0', 'Co_RX_DM_R0', 'Co_RX_Antipsychotic_R0'
]
co_train_gpop = train_set[predictor_variable]
co_train_gpop_split = np.array_split(co_train_gpop, 5)
co_train_high = train_set_high[predictor_variable]
co_train_high_split = np.array_split(co_train_high, 5)
co_train_low = train_set_low[predictor_variable]
co_train_low_split = np.array_split(co_train_low, 5)
co_validation_gpop = validation_set[predictor_variable]
co_validation_gpop_split = np.array_split(co_validation_gpop, 5)
co_validation_high = validation_set_high[predictor_variable]
co_validation_high_split = np.array_split(co_validation_high, 5)
co_validation_low = validation_set_low[predictor_variable]
co_validation_low_split = np.array_split(co_validation_low, 5)
# -
co_train_gpop
# +
out_train_death_gpop = train_set['ehr_claims_death']
out_train_death_gpop_split = []
for parts in co_train_gpop_split:
out_train_death_gpop_split.append(out_train_death_gpop[parts.index])
out_train_death_high = train_set_high['ehr_claims_death']
out_train_death_high_split = []
for parts in co_train_high_split:
out_train_death_high_split.append(out_train_death_high[parts.index])
out_train_death_low = train_set_low['ehr_claims_death']
out_train_death_low_split = []
for parts in co_train_low_split:
out_train_death_low_split.append(out_train_death_low[parts.index])
out_validation_death_gpop = validation_set['ehr_claims_death']
out_validation_death_gpop_split = []
for parts in co_validation_gpop_split:
out_validation_death_gpop_split.append(out_validation_death_gpop[parts.index])
out_validation_death_high = validation_set_high['ehr_claims_death']
out_validation_death_high_split = []
for parts in co_validation_high_split:
out_validation_death_high_split.append(out_validation_death_high[parts.index])
out_validation_death_low = validation_set_low['ehr_claims_death']
out_validation_death_low_split = []
for parts in co_validation_low_split:
out_validation_death_low_split.append(out_validation_death_low[parts.index])
# +
def rf(X_train, y_train, Or_X, Or_y):
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
param_grid = {'bootstrap': [True],
'max_depth': [6, 10],
'min_samples_leaf': [3, 5],
'min_samples_split': [4, 6],
'n_estimators': [100, 350]
}
max_depth_grid = [6,10]
min_samples_leaf_grid = [3,5]
min_samples_split_grid = [4,6]
n_estimators_grid = [100,350]
forest_clf = RandomForestClassifier()
best_clf = GridSearchCV1(clf = forest_clf,
param_grid = param_grid,
cv = 5,
n_jobs = 10,
X = X_train,
y = y_train,
Or_X = Or_X,
Or_y = Or_y)
return best_clf
#forest_grid_search = GridSearchCV(forest_clf, param_grid, cv=5,
#scoring="accuracy",
#return_train_score=True,
#verbose=True,
#n_jobs=10)
def GridSearchCV1(clf, param_grid, cv, n_jobs, X, y, Or_X, Or_y):
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
import sklearn
import numpy
max_depth_grid = [6,10]
min_samples_leaf_grid = [3,5]
min_samples_split_grid = [4,6]
n_estimators_grid = [100,350]
cv = KFold(n_splits = 5, random_state = 1, shuffle = True)
log_loss = []
#iter = 0
index = [0,0,0,0]
hyper_grid = [-1,-1,-1,-1]
max_value = 100000 #very large number
values_iterated = False
best_clf = 0
cur_iter = 0
for max_depth_val in [6,10]:
for min_samples_leaf_val in [3,5]:
for min_samples_split_val in [4,6]:
for n_estimators_val in [100,350]:
max_depth = max_depth_val
min_samples_leaf = min_samples_leaf_val
min_samples_split = min_samples_split_val
n_estimators = n_estimators_val
log_loss = []
iter = 0
for train_index, test_index in cv.split(X):
cur_iter = cur_iter + 1
X_train, y_train, X_val, y_val = X.iloc[train_index], y.iloc[train_index], Or_X[iter], Or_y[iter]
iter = iter + 1
clf = RandomForestClassifier(max_depth = max_depth,
min_samples_leaf = min_samples_leaf,
min_samples_split = min_samples_split,
n_estimators = n_estimators,
n_jobs = n_jobs)
model = clf.fit(X_train, y_train)
prob = model.predict_proba(X_val)[:,1]
log_loss.append(sklearn.metrics.log_loss(y_val, prob)) #check doucmentation for order
mean = numpy.mean(log_loss)
if (mean < max_value):
hyper_grid = [max_depth, min_samples_leaf, min_samples_split, n_estimators]
max_value = mean
best_clf = model
print(cur_iter)
return best_clf
'''
for i in range(len(index)):
if (index[i] == 1):
if (i == len(index) - 1):
values_iterated = True
print(cur_iter)
return best_clf
continue
else:
index[i] = index[i] + 1
break
'''
#best_clf = forest_grid_search.fit(X_train, y_train)
#return best_clf
# -
def scores(X,y):
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
pred = best_clf.predict(X)
actual = y
print(accuracy_score(actual,pred))
#, file = open('death_smote_rf_ehr.out', 'a')
print(f1_score(actual,pred))
print(fbeta_score(actual,pred, average = 'macro', beta = 2))
print(roc_auc_score(actual, best_clf.predict_proba(X)[:,1]))
print(log_loss(actual,best_clf.predict_proba(X)[:,1]))
def cross_val(X,y,Or_X, Or_y, val_X, val_y):
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import fbeta_score
import sklearn
import numpy as np
cv = KFold(n_splits=5, random_state=1, shuffle=True)
log_loss = []
auc = []
accuracy = []
f1 = []
f2 = []
iter = 0
for train_index, test_index in cv.split(X):
X_train, X_test, y_train, y_test = X.iloc[train_index], Or_X[iter], y.iloc[train_index], Or_y[iter]
iter = iter + 1
model = rf(X_train, y_train, val_X, val_y)
prob = model.predict_proba(X_test)[:,1] # prob is a vector of probabilities
#print(prob)
pred = np.round(prob) # pred is the rounded predictions
log_loss.append(sklearn.metrics.log_loss(y_test, prob))
auc.append(sklearn.metrics.roc_auc_score(y_test, prob))
accuracy.append(sklearn.metrics.accuracy_score(y_test, pred))
f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro'))
f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2))
print(np.mean(accuracy))
print(np.mean(f1))
print(np.mean(f2))
print(np.mean(auc))
print(np.mean(log_loss))
#co_train_gpop_sm,out_train_hemorrhage_gpop_sm, co_validation_gpop_split, out_validation_hemorrhage_gpop_split
co_train_gpop_split[4]
# +
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state = 42)
co_train_gpop_sm,out_train_death_gpop_sm = sm.fit_resample(co_train_gpop,out_train_death_gpop)
#best_clf = rf(co_train_gpop_sm, out_train_death_gpop_sm, co_train_gpop_split, out_train_death_gpop_split)
cross_val(co_train_gpop_sm, out_train_death_gpop_sm, co_validation_gpop_split, out_validation_death_gpop_split, co_train_gpop_split, out_train_death_gpop_split)
# -
# #### General Population
# +
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state = 42)
co_train_gpop_sm,out_train_death_gpop_sm = sm.fit_resample(co_train_gpop,out_train_death_gpop)
best_clf = rf(co_train_gpop_sm, out_train_death_gpop_sm, co_train_gpop_split, out_train_death_gpop_split)
# +
cross_val(co_train_gpop_sm, out_train_death_gpop_sm, co_train_gpop_split, out_train_death_gpop_split, co_validation_gpop_split, out_validation_death_gpop_split)
print("", file = open('death_smote_rf_ehr.out', 'a'))
scores(co_train_gpop, out_train_death_gpop)
print("", file = open('death_smote_rf_ehr.out', 'a'))
scores(co_validation_gpop, out_validation_death_gpop)
# -
# # Low Continuity
# +
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state = 42)
co_train_low_sm,out_train_death_low_sm = sm.fit_resample(co_train_low,out_train_death_low)
best_clf = rf(co_train_low_sm, out_train_death_low_sm, co_train_low_split, out_train_death_low_split)
cross_val(co_train_low_sm, out_train_death_low_sm, co_train_low_split, out_train_death_low_split, co_validation_low_split, out_validation_death_low_split)
print("")
scores(co_train_low, out_train_death_low)
print("")
scores(co_validation_low, out_validation_death_low)
# -
# # High Continuity
# +
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state = 42)
co_train_high_sm,out_train_death_high_sm = sm.fit_resample(co_train_high,out_train_death_high)
best_clf = rf(co_train_high_sm, out_train_death_high_sm, co_train_high_split, out_train_death_high_split)
cross_val(co_train_high_sm, out_train_death_high_sm, co_train_high_split, out_train_death_high_split, co_validation_high_split, out_validation_death_high_split)
print("")
scores(co_train_high, out_train_death_high)
print("")
scores(co_validation_high, out_validation_death_high)
# -
| EHR_Only/RF/Death_SMOTE_Custom_GridSearch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
# 创建一维数组
array = np.arange(20)
array
# -
array.shape
array[3]
array[3] = 'Numpy'
# 创建二维数组
array = np.arange(20).reshape(4, 5)
array
array[3][4]
array.shape
# 创建三维数组及更多维度
array = np.arange(27).reshape(3,3,3)
array
array.shape
np.arange(10, 35, 3)
# use other numpy function
np.zeros((2,4))
np.ones((3,4))
np.empty((2,3))
np.full((2,2), 3)
np.eye(3,3)
np.linspace(0, 10, num=4)
# 从Python列表转换
array = np.array([4,5,6])
array
list = [4,5,6]
list
type(list)
type(array)
array = np.array([(1,2,3),(4,5,6)])
array.shape
# 使用特殊的库函数
np.random.random((2,2))
| cerate_numpy_method.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from autumn.tools.curve.tanh import tanh_based_scaleup
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from summer.utils import ref_times_to_dti
from autumn.tools.plots.utils import REF_DATE
# Manually adjusted parameters to approximate proportion complying to physical distancing recommendation
fit = tanh_based_scaleup(0.1, 595, 0.165, 0.241)
times = (579, 580, 581, 582, 583, 589, 590, 591, 592, 593, 594, 595, 601, 602, 603, 604, 605, 606, 607, 608, 610, 611)
data = (0.166282, 0.163876, 0.165184, 0.165816, 0.164541, 0.169811, 0.174684, 0.186215, 0.195701, 0.194186, 0.198492, 0.196989, 0.198243, 0.201465, 0.210588, 0.215973, 0.218785, 0.231221, 0.240678, 0.242152, 0.240936, 0.241212)
fit_times = range(570, 620)
fig = plt.figure(figsize=(8, 6))
date_form = DateFormatter("%d %b")
plt.style.use("ggplot")
axis = fig.add_subplot()
axis.plot(ref_times_to_dti(REF_DATE, fit_times), [fit(time) for time in fit_times], color="k")
axis.scatter(ref_times_to_dti(REF_DATE, times), data)
axis.set_ylabel("Physical distancing compliance")
axis.xaxis.set_tick_params(rotation=45)
axis.xaxis.set_major_formatter(date_form)
| notebooks/user/jtrauer/vic_microdistancing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ## Forced SIR model using R and simecol
library(deSolve)
library(simecol)
library(reshape2)
sirforcedode <- new("odeModel",
main = function(time, init, parms, ...){
with(as.list(c(init,parms)),{
# ODEs
N <- S+I+R
dS <- mu*N-beta(beta0,beta1,omega,time)*S*I/N-mu*S
dI <- beta(beta0,beta1,omega,time)*S*I/N-gamma*I-mu*I
dR <- gamma*I-mu*R
list(c(dS,dI,dR))
})},
equations = list(
beta = function(beta0,beta1,omega,time){beta0*(1+beta1*sin(omega*time))}
),
parms = c(beta0=10./7,beta1=0.05,omega=2*pi/365,gamma=1./7,mu=1./(70*365)),
times = c(from=0,to=100*365,by=1),
init = c(S=99999,I=1,R=0),
solver = "lsoda"
)
# Simulate until equilibrium
sirforcedode <- sim(sirforcedode)
# Reset initial values
init(sirforcedode) <- unlist(out(sirforcedode)[100*365,2:4])
# Look at 10 years
times(sirforcedode) <- c(from=0,to=10*365,by=1)
# Simulate
sirforcedode <- sim(sirforcedode)
sirforced_out <- out(sirforcedode)
sirforced_out_long <- melt(sirforced_out,"time")
# ### Visualisation
library(ggplot2)
ggplot(sirforced_out_long[sirforced_out_long$variable=="I",],aes(x=time,y=value,colour=variable,group=variable))+
# Add line
geom_line(lwd=2)+
#Add labels
xlab("Time")+ylab("Number")+
theme(legend.position="none")
| notebooks/sirforced/r.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.1 64-bit (''CBAS-Full'': conda)'
# language: python
# name: python38164bitcbasfullconda6b117a9c142448dcacfad6a91392d3ed
# ---
# # Plotting data in DB
#
# * data from TsDB
# * from start to March-20th
# * pull data
# * re-create charts with this slightly different DF
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.figure_factory as FF
import plotly.offline as offline
from datetime import datetime
import glob
import os.path
import pymysql
import sqlconfig # From sqlconfig.py
import pandas as pd
import sqlalchemy
import psycopg2
from tqdm import tqdm
print("Import Complete")
# ### SQL setup
# create engine for CBAS db
passwd = sqlconfig.passwd # From sqlconfig.py
user = sqlconfig.user # From sqlconfig.py
DB = 'cbas' #name of databases to activate
user
engine = sqlalchemy.create_engine('postgresql+psycopg2://'+user+':'+passwd+'@172.16.31.10/'+DB)
# Just going to try pulling everythingto see what we have....
query= '''
SELECT *
FROM cbasdef
'''
# +
CBAS= pd.read_sql(query,engine,index_col=["timestamp"])
# -
CBAS
# What sensors do we have?
CBAS['sensor'].unique()
# Okay, so I want to pull 5 devices, (leave protoCBAS-B)
# Lets try pulling data into a list of Dfs like they were with CSVs
query1= '''
SELECT *
FROM cbasdef
WHERE sensor = 'BEEM-A'
AND timestamp BETWEEN '2020-01-06 00:00:00' and '2020-03-20 11:59:00'
'''
query2= '''
SELECT *
FROM cbasdef
WHERE sensor = 'BEEM-C'
'''
query3= '''
SELECT *
FROM cbasdef
WHERE sensor = 'BEEM-D'
'''
query4= '''
SELECT *
FROM cbasdef
WHERE sensor = 'Moe'
'''
query5= '''
SELECT *
FROM cbasdef
WHERE sensor = 'protoCBAS-G'
'''
path = [query1,query2,query3,query4,query5]
#place query in CBAStest df
dfs = [pd.read_sql(f,engine,index_col=["timestamp"])for f in path]
dfs
import plotly.graph_objs as go
import plotly.figure_factory as FF
import plotly.offline as offline
from plotly.subplots import make_subplots
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.express as px
init_notebook_mode(connected=True)
mmarker = dict(symbol = "circle-open",
size = 5,
maxdisplayed = 300)
fig = go.Figure([go.Scatter(x=d.index, y=d['RCO2'], name=d["sensor"].iloc[0],hoverinfo= "x+y+text+name",mode="markers+lines",marker = mmarker)for d in dfs])
fig.show(renderer="notebook_connected")
# Okay, now we prob want this in as little queries as possible...
# * device can be a variable
# * maybye date range as well
#
devices = "'BEEM-A','BEEM-C','BEEM-D','Moe','protoCBAS-G'"
devices
queryA= '''
SELECT *
FROM cbasdef
WHERE sensor IN ( '''
queryB = ''' )
AND timestamp BETWEEN '2020-02-06 00:00:00' and '2020-03-20 11:59:00'
'''
fullquery = queryA+devices+queryB
fullquery
CBAS= pd.read_sql(fullquery,engine,index_col=["timestamp"])
CBAS.sensor.unique()
CBAS.groupby(CBAS['sensor']).count() # count of how many rows in each group
# Issue here is getting these into individual DFs...
| test/Notebooks/Sample plot from SQL .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python3
# ---
# +
from os import path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from ydata_synthetic.synthesizers import ModelParameters
from ydata_synthetic.preprocessing.timeseries import processed_stock
from ydata_synthetic.synthesizers.timeseries import TimeGAN
# +
#Specific to TimeGANs
seq_len=24
n_seq = 6
hidden_dim=24
gamma=1
noise_dim = 32
dim = 128
batch_size = 128
log_step = 100
learning_rate = 5e-4
gan_args = ModelParameters(batch_size=batch_size,
lr=learning_rate,
noise_dim=noise_dim,
layers_dim=dim)
# -
stock_data = processed_stock(path='../data/stock_data.csv', seq_len=seq_len) # 归一化 & Patch
print(len(stock_data), stock_data[0].shape)
stock_data[1]
synth = TimeGAN(model_parameters=gan_args, hidden_dim=24, seq_len=seq_len, n_seq=n_seq, gamma=1)
synth.train(stock_data, train_steps=10000)
# synth.save("../output/synthesizer_stock.pkl")
synth_data = synth.sample(len(stock_data))
synth_data.shape
# +
#Reshaping the data
cols = ['Open','High','Low','Close','Adj Close','Volume']
#Plotting some generated samples. Both Synthetic and Original data are still standartized with values between [0,1]
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(15, 10))
axes=axes.flatten()
time = list(range(1,25))
obs = np.random.randint(len(stock_data))
for j, col in enumerate(cols):
df = pd.DataFrame({'Real': stock_data[obs][:, j],
'Synthetic': synth_data[obs][:, j]})
df.plot(ax=axes[j],
title = col,
secondary_y='Synthetic data', style=['-', '--'])
fig.tight_layout()
# +
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.gridspec as gridspec
sample_size = 250
idx = np.random.permutation(len(stock_data))[:sample_size]
real_sample = np.asarray(stock_data)[idx]
synthetic_sample = np.asarray(synth_data)[idx]
#for the purpose of comparision we need the data to be 2-Dimensional. For that reason we are going to use only two componentes for both the PCA and TSNE.
synth_data_reduced = real_sample.reshape(-1, seq_len)
stock_data_reduced = np.asarray(synthetic_sample).reshape(-1,seq_len)
n_components = 2
pca = PCA(n_components=n_components)
tsne = TSNE(n_components=n_components, n_iter=300)
#The fit of the methods must be done only using the real sequential data
pca.fit(stock_data_reduced)
pca_real = pd.DataFrame(pca.transform(stock_data_reduced))
pca_synth = pd.DataFrame(pca.transform(synth_data_reduced))
data_reduced = np.concatenate((stock_data_reduced, synth_data_reduced), axis=0)
tsne_results = pd.DataFrame(tsne.fit_transform(data_reduced))
fig = plt.figure(constrained_layout=True, figsize=(20,10))
spec = gridspec.GridSpec(ncols=2, nrows=1, figure=fig)
#TSNE scatter plot
ax = fig.add_subplot(spec[0,0])
ax.set_title('PCA results',
fontsize=20,
color='red',
pad=10)
#PCA scatter plot
plt.scatter(pca_real.iloc[:, 0].values, pca_real.iloc[:,1].values,
c='black', alpha=0.2, label='Original')
plt.scatter(pca_synth.iloc[:,0], pca_synth.iloc[:,1],
c='red', alpha=0.2, label='Synthetic')
ax.legend()
ax2 = fig.add_subplot(spec[0,1])
ax2.set_title('TSNE results',
fontsize=20,
color='red',
pad=10)
plt.scatter(tsne_results.iloc[:sample_size, 0].values, tsne_results.iloc[:sample_size,1].values,
c='black', alpha=0.2, label='Original')
plt.scatter(tsne_results.iloc[sample_size:,0], tsne_results.iloc[sample_size:,1],
c='red', alpha=0.2, label='Synthetic')
ax2.legend()
fig.suptitle('Validating synthetic vs real data diversity and distributions',
fontsize=16,
color='grey')
| notebooks/test_TimeGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scikit-Plot for Quick Machine Learning Evaluation
# by <NAME> 12/9/18
#
# This is the notebook for the [Scikit-Plot Youtube Tutorial](https://www.youtube.com/watch?v=w5UPXd3B7Jw). You may read this on your own or follow the video.
# ## By the end of the tutorial you will be able to:
#
# * Apply [Scikit-Plot](https://scikit-plot.readthedocs.io/en/stable/) to quickly evaluate your machine learning models.
#
# Data Source: [Absenteeism At Work Dataset from UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Absenteeism+at+work)
# +
# Import Libraries
import pandas as pd
import scikitplot as skplt
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
from os import chdir
# %matplotlib inline
# -
# Load Data
chdir('D:\\')
absentees = pd.read_csv('Absenteeism_at_work.csv',sep=';')
# Inspect the Data
absentees.head()
# Check the info.
absentees.info()
# See the class and counts for Disciplinary Failure (0 = success, 1 = failure).
absentees['Disciplinary failure'].value_counts().plot.bar(title = 'Disciplinary Failure Counts')
plt.xlabel('Classes')
plt.ylabel('Totals')
# # Classifier Evaluation
#
# **Note**: No feature engineering or EDA was applied before classifying. This is because the purpose of this notebook is just to showcase Scikit-Plot. When building a real classifier **always** perform EDA and feature engineering first!
# Set up feature matrix X and class vector y.
X = absentees[['Age','Distance from Residence to Work','Work load Average/day ','Weight']]
y = absentees['Disciplinary failure']
# Split data into training and test.
X_train, X_test, y_train, y_test = train_test_split(X, y , test_size = 0.2, random_state = 1)
# Initiate classifier and fit to data
classifier = RandomForestClassifier(n_estimators = 100)
classifier.fit(X_train, y_train)
# Apply test data to the classifier and get prediction probabilities
y_pred = classifier.predict_proba(X_test)
# Plot ROC curve with scikit-plot
skplt.metrics.plot_roc(y_test,y_pred)
plt.show()
# Now to plot the precision recall plot with scikit-plot
skplt.metrics.plot_precision_recall(y_test, y_pred)
plt.show()
# # Clustering Evaluation
# Data to cluster
data_to_cluster = absentees[['Transportation expense','Age']]
# Long way to plot the elbow plot.
WCSS = []
for i in range(1,30):
kmeans = KMeans(n_clusters=i, random_state=1)
kmeans.fit(data_to_cluster)
WCSS.append(kmeans.inertia_)
plt.plot(range(1,30),WCSS)
plt.title('Elbow Plot')
plt.xlabel('Number of Clusters')
plt.ylabel('WCSS')
plt.show()
# Cluster data above
kmeans = KMeans(random_state = 1)
clusters = kmeans.fit_predict(data_to_cluster)
# Plot elbow curve with scikit-plot.
skplt.cluster.plot_elbow_curve(clf=kmeans,X=data_to_cluster, cluster_ranges=range(1, 30))
plt.show()
# Plot silhouette plot for alternate cluster evaluation.
skplt.metrics.plot_silhouette(data_to_cluster,clusters)
plt.show()
# # Practice Exercises
#
# * Apply Scikit-Plot to speed up your Machine Learning Model evaluation.
#
# # Summary
#
# * Scikit-Plot significantly speeds up the model evaluation process.
#
# ## Scikit-Plot Links
#
# * [Github](https://github.com/reiinakano/scikit-plot)
# * [Documentation](https://scikit-plot.readthedocs.io/en/stable/)
# # Thanks for watching/reading! Let me know if you found this helpful by hitting the like button, subscribing, or leaving a comment!
#
# Links for contact in notebook and description
# * [Video Tutorial](https://www.youtube.com/watch?v=w5UPXd3B7Jw)
# * [GitHub](http://github.com/johndeJesus22)
# * [Twitter](https://twitter.com/johnnydata22)
# * [LinkedIn](https://www.linkedin.com/in/jdejesus22/)
| Scikit-Plot for Quick Machine Learning Evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import unicode_literals, print_function, division
# from io import open
import unicodedata
from pathlib import Path
from random import randint
from string import ascii_letters
import torch
print(f"PyTorch version: {torch.__version__}")
# -
# Prep
all_letters: str = ascii_letters + " .;:'"
n_letters: int = len(all_letters)
print(n_letters)
# +
find_files = lambda glob_pattern: [f for f in Path().glob(glob_pattern)]
for p in find_files("**/data/names/*.txt"): print(p.name, p.name[:p.name.index(p.suffix)])
# +
def unicode_to_ascii(s):
return "".join([c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn" and c in all_letters])
print(unicode_to_ascii('Ślusàrski'))
# +
cat_lines = {}
fp_gen = find_files("**/data/names/*.txt")
for p in fp_gen:
with p.open(encoding = "utf-8") as f:
lines = f.read().strip().split("\n")
_cat = p.name[:p.name.index(p.suffix)]
cat_lines[_cat] = list(map(unicode_to_ascii, lines))
# Categories to list
all_cats = list(cat_lines.keys())
n_cats = len(all_cats)
print(f"Category count: {len(cat_lines)}")
print(cat_lines["Italian"][:5])
# -
# ### Turn names into Tensors
# +
def letter_to_index(Char: str) -> int:
return all_letters.find(Char)
def letter_to_tensor(Char: str) -> torch.tensor:
Tensor = torch.zeros(1, n_letters)
Tensor[0][letter_to_index(Char)] = 1
return Tensor
def line_to_tensor(Line: str) -> torch.tensor:
Tensor = torch.zeros(len(Line), 1, n_letters)
for Index, Char in enumerate(Line):
Tensor[Index][0][letter_to_index(Char)] = 1
return Tensor
# Tests
print(letter_to_tensor("J"))
print(line_to_tensor("Jones").size())
# -
# ### Create Recurrent Neural Network (RNN)
# +
nn = torch.nn
class RNN(nn.Module):
"""Simple recurrent neural network (RNN)
Model Notes:
* Two linear layers
* Two states: Input and Hidden
* LogSoftmax after output.
"""
__slots__ = ["input_size", "hidden_size", "output_size",]
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2b = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim = 1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2b(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
# +
# Init RNN
n_hidden: int = 128
rnn: RNN = RNN(n_letters, n_hidden, n_cats)
input_rnn = letter_to_tensor("Albert")
hidden_rnn = torch.zeros(1, n_hidden)
# Note: If you have a single sample, just use input.unsqueeze(0) to add a fake batch dimension.
# https://pytorch.org/tutorials/beginner/former_torchies/nnft_tutorial.html
output, next_hidden = rnn(input_rnn[0].unsqueeze(0), hidden_rnn)
print(output)
# -
# ## Training the Model
#
# ### Preparation
# +
def category_from_output(output: torch.tensor) -> tuple:
top_n, top_i = output.topk(1)
cat_i = top_i[0].item()
return all_cats[cat_i], cat_i
# Test
print(category_from_output(output))
# +
N_SAMPLES: int = 10
def random_choice(L: list):
return L[randint(0, len(L) - 1)]
def random_training_example() -> tuple:
_cat = random_choice(all_cats)
_line = random_choice(cat_lines[_cat])
_cat_tensor = torch.tensor([all_cats.index(_cat)], dtype = torch.long)
_line_tensor = line_to_tensor(_line)
return _cat, _line, _cat_tensor, _line_tensor
rng = range(N_SAMPLES)
for i in rng:
category, line, cat_tensor, line_tensor = random_training_example()
print(f"Category: {category}, Line: {line}")
# -
# ## Training the Model
#
# ### Training Network
criterion = nn.NLLLoss()
| notebook-samples/nlp-pytorch/.ipynb_checkpoints/classify_names_w_rnn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
peaks = pd.read_csv("~/Hsf1/ChIP-Seq/macs2/ChIP_1M_peaks.narrowPeak",
delimiter='\t',
header=None)
peaks
peaks = pd.read_csv("~/Hsf1/ChIP-Seq/macs2/ChIP_1M_peaks.xls",
delimiter='\t',
comment='#')
peaks
peaks.sort_values(by="-log10(qvalue)")
peaks.sort_values(by="fold_enrichment")
| 04_chip_seq_peaks/01_chip_seq_peaks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Collision Avoidance - Live Demo
#
# In this notebook we'll use the model we trained to detect whether the robot is ``free`` or ``blocked`` to enable a collision avoidance behavior on the robot.
#
# ## Load the trained model
#
# We'll assumed that you've already downloaded the ``best_model.pth`` to your workstation as instructed in the training notebook. Now, you should upload this model into this notebook's
# directory by using the Jupyter Lab upload tool. Once that's finished there should be a file named ``best_model.pth`` in this notebook's directory.
#
# > Please make sure the file has uploaded fully before calling the next cell
#
# Execute the code below to initialize the PyTorch model. This should look very familiar from the training notebook.
# +
import torch
import torchvision
model = torchvision.models.alexnet(pretrained=False)
model.classifier[6] = torch.nn.Linear(model.classifier[6].in_features, 2)
# -
# Next, load the trained weights from the ``best_model.pth`` file that you uploaded
model.load_state_dict(torch.load('best_model.pth'))
# Currently, the model weights are located on the CPU memory execute the code below to transfer to the GPU device.
device = torch.device('cuda')
model = model.to(device)
# ### Create the preprocessing function
#
# We have now loaded our model, but there's a slight issue. The format that we trained our model doesnt *exactly* match the format of the camera. To do that,
# we need to do some *preprocessing*. This involves the following steps
#
# 1. Convert from BGR to RGB
# 2. Convert from HWC layout to CHW layout
# 3. Normalize using same parameters as we did during training (our camera provides values in [0, 255] range and training loaded images in [0, 1] range so we need to scale by 255.0
# 4. Transfer the data from CPU memory to GPU memory
# 5. Add a batch dimension
# +
import cv2
import numpy as np
mean = 255.0 * np.array([0.485, 0.456, 0.406])
stdev = 255.0 * np.array([0.229, 0.224, 0.225])
normalize = torchvision.transforms.Normalize(mean, stdev)
def preprocess(camera_value):
global device, normalize
x = camera_value
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
x = x.transpose((2, 0, 1))
x = torch.from_numpy(x).float()
x = normalize(x)
x = x.to(device)
x = x[None, ...]
return x
# -
# Great! We've now defined our pre-processing function which can convert images from the camera format to the neural network input format.
#
# Now, let's start and display our camera. You should be pretty familiar with this by now. We'll also create a slider that will display the
# probability that the robot is blocked.
# +
import traitlets
from IPython.display import display
import ipywidgets.widgets as widgets
from jetbot import Camera, bgr8_to_jpeg
camera = Camera.instance(width=224, height=224)
image = widgets.Image(format='jpeg', width=224, height=224)
blocked_slider = widgets.FloatSlider(description='blocked', min=0.0, max=1.0, orientation='vertical')
camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
display(widgets.HBox([image, blocked_slider]))
# -
# We'll also create our robot instance which we'll need to drive the motors.
# +
from jetbot import Robot
robot = Robot()
# -
# Next, we'll create a function that will get called whenever the camera's value changes. This function will do the following steps
#
# 1. Pre-process the camera image
# 2. Execute the neural network
# 3. While the neural network output indicates we're blocked, we'll turn left, otherwise we go forward.
# 下記のコードを実行する際は、VS-C3のL・Rボタンのいずれかを押した状態で実行してください。ロボットが急に走り出す可能性があります。
#
# カメラ画像を問題なく認識できることが確認でき次第、コントローラのボタンから指を話してください。
# +
import torch.nn.functional as F
import time
def update(change):
global blocked_slider, robot
x = change['new']
x = preprocess(x)
y = model(x)
# we apply the `softmax` function to normalize the output vector so it sums to 1 (which makes it a probability distribution)
y = F.softmax(y, dim=1)
prob_blocked = float(y.flatten()[0])
blocked_slider.value = prob_blocked
if prob_blocked < 0.5:
robot.forward(0.1)
else:
robot.left(0.1)
time.sleep(0.001)
update({'new': camera.value}) # we call the function once to intialize
# -
# Cool! We've created our neural network execution function, but now we need to attach it to the camera for processing.
#
# We accomplish that with the ``observe`` function.
#
# > WARNING: This code will move the robot!! Please make sure your robot has clearance. The collision avoidance should work, but the neural
# > network is only as good as the data it's trained on!
camera.observe(update, names='value') # this attaches the 'update' function to the 'value' traitlet of our camera
# Awesome! If your robot is plugged in it should now be generating new commands with each new camera frame. Perhaps start by placing your robot on the ground and seeing what it does when it reaches an obstacle.
#
# If you want to stop this behavior, you can unattach this callback by executing the code below.
camera.unobserve(update, names='value')
time.sleep(0.5)robot.stop()
# Perhaps you want the robot to run without streaming video to the browser. You can unlink the camera as below.
camera_link.unlink() # don't stream to browser (will still run camera)
# To continue streaming call the following.
camera_link.link() # stream to browser (wont run camera)
# ### Conclusion
#
# That's it for this live demo! Hopefully you had some fun and your robot avoided collisions intelligently!
#
# If your robot wasn't avoiding collisions very well, try to spot where it fails. The beauty is that we can collect more data for these failure scenarios
# and the robot should get even better :)
| notebooks/collision_avoidance/live_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time-Constraint NFV Profiling (or Benchmarking)
#
# This file builds a first playground to start with the TC profiling work.
#
#
# global plotting setup
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
import numpy as np
import seaborn
# # Model
#
# We want to model complex network service chain that consists of multiple VNFs.
#
# ## Service
#
# * A service is a directed graph of functions
# * We don't consider multiple forwarding graphs
# * We don't care about the links
# * A service can have multiple `metrics` aka. `ServiceMetrics` that specify how the computed performance values of the VNFs are combined to represent the service's total performance (f: service_graph -> performance value)
#
# ## VNF
#
# * A VNF is a single network function
# * A VNF is a single VDU
# * A VNF has a resource configuration: `cpu`, `mem`, `blkio`
# * A service can have multiple `metrics` aka. `VnfMetrics` that specify its performance for a given configuration (f: configuration -> performance value)
#
# +
class Service(object):
"""
Represents an abstract network service.
Holds a DAG that represents the service chain
and references the involved VNFs.
"""
pass
class VNF(object):
def __init__(self, name):
self.name = name
self._metrics = dict()
def add_metric(self, m):
self._metrics[m.name] = m
def calc(self, m_name, cpu, mem, blkio):
return self._metrics.get(m_name).calc(cpu, mem, blkio)
class ThroughputVnfMetric(object):
def __init__(self, name, parameter):
self.name = name
self.parameter = parameter
def calc(self, cpu, mem, blkio):
return (cpu**2 + (mem * 2 + 0.5* blkio)) * self.parameter
m1 = ThroughputVnfMetric("t1", 1.0)
m2 = ThroughputVnfMetric("t1", 0.3)
v1 = VNF("vnf1")
v1. add_metric(m1)
v2 = VNF("vnf2")
v2. add_metric(m2)
# test
x1 = range(0, 100)
plt.plot(x1, [v1.calc("t1", i , i , i) for i in x1], label=v1.name)
plt.plot(x1, [v2.calc("t1", i , i , i) for i in x1], label=v2.name)
plt.title("VNF performance")
plt.legend()
plt.show()
# -
| prototyping/tc_profiling_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is an example notebook.
#
# Some information about the current environment
# +
import os
import sys
print (os.getcwd())
print (sys.version)
print (sys.executable)
print (sys.path)
# -
# The below will use our package however first we need to ensure it is available (otherwise you get an error about the module not being found). You can either run setup.py as discussed in the readme to install the package or modify the path to include the src folder.
# Explicitly set path so don't need to run setup.py - if we have multiple copies of the code we would otherwise need
# to setup a seperate environment for each to ensure the code pointers are correct.
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), os.pardir, 'src')))
# Use our package
from examplepackage import examplemodule
examplemodule.hello_world()
| notebooks/eda/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Entity Extraction and Document Classification
# ## 1. Setup
#
# To prepare your environment, you need to install some packages and enter credentials for the Watson services.
# ## 1.1 Install the necessary packages
#
# You need the latest versions of these packages:
# Watson Developer Cloud: a client library for Watson services.
# NLTK: leading platform for building Python programs to work with human language data.
# ### Install the Watson Developer Cloud package:
# !pip install watson-developer-cloud==1.5
# ### Install NLTK:
# !pip install --upgrade nltk
# ### Install IBM Cloud Object Storage Client:
# !pip install ibm-cos-sdk
# ### Now restart the kernel by choosing Kernel > Restart.
# ## 1.2 Import packages and libraries
# Import the packages and libraries that you'll use:
# +
import json
import watson_developer_cloud
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 \
import Features, EntitiesOptions, KeywordsOptions
import ibm_boto3
from botocore.client import Config
import re
import nltk
import datetime
from nltk import word_tokenize,sent_tokenize,ne_chunk
import numpy as np
import unicodedata
# -
# ## 2. Configuration
# Add configurable items of the notebook below
# ### 2.1 Add your service credentials from IBM Cloud for the Watson services
# You must create a Watson Natural Language Understanding service on IBM Cloud. Create a service for Natural Language Understanding (NLU). Insert the username and password values for your NLU in the following cell. Do not change the values of the version fields.
# Run the cell.
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2018-03-23',
username="",
password="")
# ### 2.2 Add your service credentials for Object Storage
# You must create Object Storage service on IBM Cloud. To access data in a file in Object Storage, you need the Object Storage authentication credentials. Insert the Object Storage authentication credentials as credentials_1 in the following cell after removing the current contents in the cell.
# +
# @hidden_cell
# The following code contains the credentials for a file in your IBM Cloud Object Storage.
# You might want to remove those credentials before you share your notebook.
credentials_1 = {
'IBM_API_KEY_ID': '',
'IAM_SERVICE_ID': '',
'ENDPOINT': 'https://s3.eu-geo.objectstorage.service.networklayer.com',
'IBM_AUTH_ENDPOINT': 'https://iam.eu-gb.bluemix.net/oidc/token',
'BUCKET': '',
'FILE': 'form-doc-1.txt'
}
# -
# ### 2.3 Global Variables
# Add global variables.
sampleText='form-doc-1.txt'
ConfigFileName_Entity='config_entity_extract.txt'
ConfigFileName_Classify= 'config_legaldocs.txt'
# ### 2.4 Configure and download required NLTK packages
# Download the 'punkt' and 'averaged_perceptron_tagger' NLTK packages for POS tagging usage.
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
# ## 3. Persistence and Storage
# ### 3.1 Configure Object Storage Client
# +
cos = ibm_boto3.client('s3',
ibm_api_key_id=credentials_1['IBM_API_KEY_ID'],
ibm_service_instance_id=credentials_1['IAM_SERVICE_ID'],
ibm_auth_endpoint=credentials_1['IBM_AUTH_ENDPOINT'],
config=Config(signature_version='oauth'),
endpoint_url=credentials_1['ENDPOINT'])
def get_file(filename):
'''Retrieve file from Cloud Object Storage'''
fileobject = cos.get_object(Bucket=credentials_1['BUCKET'], Key=filename)['Body']
return fileobject
def load_string(fileobject):
'''Load the file contents into a Python string'''
text = fileobject.read()
return text
def put_file(filename, filecontents):
'''Write file to Cloud Object Storage'''
resp = cos.put_object(Bucket=credentials_1['BUCKET'], Key=filename, Body=filecontents)
return resp
# -
# ## 4. Input Data
# Read the data file for entity extraction from Object Store
# Read the configuration file for augumented entity-value pairs from Object Store.
text_file= load_string(get_file(sampleText))
if isinstance(text_file, bytes):
text_file = text_file.decode('utf-8')
print(text_file)
config_entity = load_string(get_file(ConfigFileName_Entity)).decode('utf-8')
print(config_entity)
config_class = load_string(get_file(ConfigFileName_Classify)).decode('utf-8')
print(config_class)
# ## 5. Entity Extraction
# Extract required entities present in the document and augment the response to NLU's results
# ### 5.1 Entites Extracted by Watson NLU
def analyze_using_NLU(analysistext):
""" Call Watson Natural Language Understanding service to obtain analysis results.
"""
response = natural_language_understanding.analyze(
text=analysistext,
features=Features(keywords=KeywordsOptions()))
response = [r['text'] for r in response['keywords']]
return response
# ### 5.2 Extract Entity-Value
# Custom entity extraction utlity fucntions for augumenting the results of Watson NLU API call
# +
def POS_tagging(text):
""" Generate Part of speech tagging of the text.
"""
sent = re.sub(r'\n',' ',text)
words = nltk.word_tokenize(sent)
POSofText = nltk.tag.pos_tag(words)
return POSofText
entval= dict()
def text_extract(reg, tag,text):
""" Use Chunking to extract text from sentence
"""
entities = list()
chunkParser= nltk.RegexpParser(reg)
chunked= chunkParser.parse(POS_tagging(text))
#print(chunked)
for subtree in chunked.subtrees():
if subtree.label() == 'Chunk':
#print(subtree.leaves())
entities.append(subtree.leaves())
#print(entities)
for i in range(len(entities)):
for j in range(len(entities[i])):
#print(entities[i][j][0].lower())
if tag.strip().lower() in entities[i][j][0].lower():
#print(entities[i])
entval.update({tag: find_NNP(entities[i],tag)})
return entval
def find_NNP(ent, tag):
""" Find NNP POS tags
"""
e= ent
for i in range(len(e)):
if (tag not in e[i]) and (e[i][1] == 'NNP'):
return e[i][0]
def checkValid(date):
#f= datetime.datetime.strftime(date)
try:
datetime.datetime.strptime(date.strip(),"%d/%m/%Y")
return 1
except ValueError as err:
print(err)
return 0
def date_extract(reg, tag, text, stage_name):
#print(reg)
d= dict()
dates=re.findall(tag.lower()+' '+reg,text.lower())
print(dates)
temp= dates[0].strip(tag.lower())
ret= checkValid(temp)
if ret == 1:
d.update({tag.lower():temp})
print(d)
def amt_extract(reg,tag,text):
a= dict()
amt= re.findall(reg,text)
print(amt)
entities_req= list()
def entities_required(text,step, types):
""" Extracting entities required from configuration file
"""
configjson= json.loads(config_entity)
for i in range(len(step)):
if step[i]['type'] == types:
entities_req.append(str(step[i]['tag']))
#entities_req.append([c['tag'] for c in configjson['configuration']['class'][i]['steps'][j]])
return entities_req
# entlist= list()
def extract_entities(config,text):
""" Extracts entity-value pairs
"""
configjson= json.loads(config)
#print(configjson)
#print(configjson['configuration']['class'][0]['steps'][0]['entity'][0]['tag'])
classes=configjson['configuration']['class']
#for i in range(len(classes)):
stages= classes['stages']
for j in range(len(stages)):
if stages[j]['name']=='Intro':
steps= stages[j]['steps']
for k in range(len(steps)):
if steps[k]['type'] == 'text':
#temp=entities_required(text,steps,steps[k]['type'])
#print(temp)
ent = text_extract(steps[k]['regex'],steps[k]['tag'],text)
#elif steps[k]['type'] == 'date':
#dates= date_extract(steps[k]['regex1'],steps[k]['tag'],text, stages[j]['name'])
elif stages[j]['name']=='Parties to Contract':
steps= stages[j]['steps']
for k in range(len(steps)):
if steps[k]['type'] == 'text':
#temp=entities_required(text,steps,steps[k]['type'])
ent = text_extract(steps[k]['regex'],steps[k]['tag'],text)
#print(ent)
return ent
# +
extract_entities(config_entity, text_file)
# -
# ## 6. Document Classification
# Classify documents based on entities extracted from the previous step
def entities_required_classification(text,config):
""" Extracting entities from configuration file
"""
entities_req= list()
configjson= json.loads(config)
for stages in configjson['configuration']['classification']['stages']:
class_req= stages['doctype']
entities_req.append([[c['text'],class_req] for c in stages['entities']])
return entities_req
#entities_required_classification(text2,config1)
def classify_text(text, entities,config):
""" Classify type of document from list of entities(NLU + Configuration file)
"""
e= dict()
entities_req= entities_required_classification(text,config)
for i in range(len(entities_req)):
temp= list()
for j in range(len(entities_req[i])):
entities_req[i][j][0]= entities_req[i][j][0].strip()
entities_req[i][j][0]= entities_req[i][j][0].lower()
temp.append(entities_req[i][j][0])
res= analyze_using_NLU(text)
#temp= temp + res
#print text
#text= text.decode('utf-8')
if all(str(x) in text.lower() for x in temp) and any(str(y) in text.lower() for y in res):
return entities_req[i][j][1]
def doc_classify(text,config,config1):
""" Classify type of Document
"""
entities= analyze_using_NLU(text)
temp= extract_entities(config,text)
for k,v in temp.items():
entities.append(k)
#print(entities)
entities= [e.lower() for e in entities]
entities= [e.strip() for e in entities]
entities= set(entities)
ret=classify_text(text,entities,config1)
return ret
doc_classify(text_file,config_entity,config_class)
| notebooks/Entity Extraction and Document Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import time
from psi.protocol import rsa
from psi.datastructure import bloom_filter
def run_protocol(client_set, server_set):
## BASE
server = rsa.Server()
public_key = server.public_key
client = rsa.Client(public_key)
random_factors = client.random_factors(len(client_set))
## SETUP
signed_server_set = server.sign_set(server_set)
# must encode to bytes
signed_server_set = [str(sss).encode() for sss in signed_server_set]
bf = bloom_filter.build_from(signed_server_set)
## ONLINE
A = client.blind_set(client_set, random_factors)
B = server.sign_set(A)
unblinded_client_set = client.unblind_set(B, random_factors)
# must encode to bytes
unblinded_client_set = [str(ucs).encode() for ucs in unblinded_client_set]
intr = client.intersect(client_set, unblinded_client_set, bf)
return intr
# -
listA = [i for i in range(10)]
listB = [i for i in range(1000)]
start = time.time()
result = run_protocol(listA, listB)
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time*1000) + "[ms]")
print(result[0:5])
| psi_ex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #### New to Plotly?
# Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
# <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
# <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
# #### Install NumPy Using PIP
# `pip` is a package that used in the Terminal to download modules from the web. `pip` will already be installed on your computer if you are using `Version 2.7.9+` for Python 2 or `Version 3.4+` for Python 3. To check what version of Python you are using, open up the Terminal and simply type `python` and hit Return. The Version number will appear in the output:
#
# `Python 2.7.11 (v2.7.11:6d1b6a68f775, Dec 5 2015, 12:54:16)` <br>`[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin`<br>`Type "help", "copyright", "credits" or "license" for more information.`
#
# In the Terminal, run `pip install numpy` to install the latest available Version of `NumPy`:
# !pip install numpy
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# ! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'Install-Numpy.ipynb', 'numpy/install-numpy/', 'Install NumPy | plotly',
'How to install NumPy using pip.',
title = 'Install NumPy | plotly',
name = 'Install NumPy',
has_thumbnail='true', thumbnail='thumbnail/numpy-logo.jpg',
language='numpy', page_type='example_index', redirect_from='numpy/download-numpy/',
display_as='getting-started', order=1)
# -
| _posts/numpy/install-numpy/Install-Numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
plt.rcParams['figure.dpi'] = 150
# +
from sklearn.datasets import make_blobs
plt.rcParams['figure.dpi'] = 150
# create dataset
X, y = make_blobs(
n_samples=150, n_features=2,
centers=3, cluster_std=0.5,
shuffle=True, random_state=0
)
# plot
plt.scatter(
X[:, 0], X[:, 1],
edgecolor='black', s=50
)
plt.show()
# -
from sklearn.cluster import KMeans
wss= []
index = []
for i in range(1,10):
km = KMeans(
n_clusters=i, init='random',
n_init=10, max_iter=10000,
tol=1e-04, random_state=0
)
y_km = km.fit_predict(X)
wss.append(km.inertia_)
index.append(i)
index, wss
plt.plot(index,wss, marker= "+" )
km = KMeans(
n_clusters=3, init='random',
n_init=10, max_iter=10000,
tol=1e-04, random_state=0
)
y_km = km.fit_predict(X)
plt.scatter(X[:,0], X[:,1], c=y_km, s=50, cmap=plt.cm.Paired, alpha=0.4)
plt.scatter(km.cluster_centers_[:, 0],km.cluster_centers_[:, 1],
s=250, marker='*', label='centroids',
edgecolor='black',
c=np.arange(0,3),cmap=plt.cm.Paired,)
# +
from ipywidgets import *
def func(iteration_step,clusters):
km = KMeans(
n_clusters=clusters, init='random',
n_init=1, max_iter=iteration_step,
tol=1e-04, random_state=0
)
y_km = km.fit_predict(X)
plt.scatter(X[:,0], X[:,1], c=y_km, cmap=plt.cm.Paired, alpha=0.4)
plt.scatter(km.cluster_centers_[:, 0],km.cluster_centers_[:, 1],
s=250, marker='*', label='centroids',
edgecolor='black',
c=np.arange(0,clusters),cmap=plt.cm.Paired,)
interact(func,iteration_step=IntSlider(min=1, max=23, step=1,continuous_update=False),
clusters=IntSlider(min=1, max=23, step=1,continuous_update=False));
# -
| exercises/minimal_clustering&classification/clustering_kmeans_BLOBS_sumofsquare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="V1yAtcJaYmm3" colab_type="text"
# | Name | Description | Date
# | :- |-------------: | :-:
# |<font color=red>__<NAME>__</font>| __Regularized polynomial regression with linear and random sampling - LOOP__. | __On 11th of August 2019__
# + [markdown] id="yacSm-TJYlza" colab_type="text"
# # Ridge/LASSO polynomial regression with linear and random sampling
# * Input variable space is constructed using random sampling/cluster pick/uniform sampling
# * Linear fit is often inadequate but higher-order polynomial fits often leads to overfitting i.e. learns spurious, flawed relationships between input and output
# * Ridge and LASSO regression are used with varying model complexity (degree of polynomial)
# * Model score is obtained on a test set and average score over a # of runs is compared for linear and random sampling
# + [markdown] id="Benpwr0kYlzb" colab_type="text"
# ### Import libraries
# + id="WRCm69nPYlzc" colab_type="code" colab={}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="VZDfHQ8-Ylze" colab_type="code" colab={}
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
# + [markdown] id="7eM42qjYYlzg" colab_type="text"
# ### Global variables for the program
# + id="qkozS7N8Ylzh" colab_type="code" colab={}
N_points = 41 # Number of points for constructing function
x_min = 1 # Min of the range of x (feature)
x_max = 10 # Max of the range of x (feature)
noise_mean = 0 # Mean of the Gaussian noise adder
noise_sd = 5 # Std.Dev of the Gaussian noise adder
ridge_alpha = tuple([10**(x) for x in range(-4,0,1) ]) # Alpha (regularization strength) of ridge regression
lasso_eps = 0.001
lasso_nalpha=20
lasso_iter=2000
degree_min = 2
degree_max = 8
# + [markdown] id="PfBhRKZeYlzj" colab_type="text"
# ### Generate feature and output vector following a non-linear function
#
# $$ The\ ground\ truth\ or\ originating\ function\ is\ as\ follows:\ $$
#
# $$ y=f(x)= x^2.sin(x).e^{-0.1x}+\psi(x) $$
#
# $$: \psi(x) = {\displaystyle f(x\;|\;\mu ,\sigma ^{2})={\frac {1}{\sqrt {2\pi \sigma ^{2}}}}\;e^{-{\frac {(x-\mu )^{2}}{2\sigma ^{2}}}}} $$
# + id="NIt1gVDlYlzm" colab_type="code" colab={}
def func(x):
result = x**2*np.sin(x)*np.exp(-(1/x_max)*x)
return (result)
noise_x = np.random.normal(loc=noise_mean,scale=noise_sd,size=N_points)
# + id="kzIOIkvHYlzo" colab_type="code" colab={} outputId="aa144c28-8f9e-4f9d-ecf4-182521739e3a"
var_linear =[]
var_random =[]
mean_linear =[]
mean_random =[]
dfs = []
for i in range(50):
#x_smooth = np.array(np.linspace(x_min,x_max,1001))
# Linearly spaced sample points
X=np.array(np.linspace(x_min,x_max,N_points))
# Samples drawn from uniform random distribution
X_sample = x_min+np.random.rand(N_points)*(x_max-x_min)
#noise_x = np.random.normal(loc=noise_mean,scale=noise_sd,size=N_points)
y = func(X)+noise_x
y_sampled = func(X_sample)+noise_x
df = pd.DataFrame(data=X,columns=['X'])
df['Ideal y']=df['X'].apply(func)
df['y']=y
df['X_sampled']=X_sample
df['y_sampled']=y_sampled
X_train, X_test, y_train, y_test = train_test_split(df['X'], df['y'], test_size=0.33)
X_train=X_train.values.reshape(-1,1)
X_test=X_test.values.reshape(-1,1)
linear_sample_score = []
poly_degree = []
for degree in range(degree_min,degree_max+1):
model = make_pipeline(PolynomialFeatures(degree), RidgeCV(alphas=ridge_alpha,normalize=True,
cv=5))
#model = make_pipeline(PolynomialFeatures(degree), LassoCV(eps=lasso_eps,n_alphas=lasso_nalpha,
#max_iter=lasso_iter,normalize=True,cv=5))
#model = make_pipeline(PolynomialFeatures(degree), LinearRegression(normalize=True))
model.fit(X_train, y_train)
y_pred = np.array(model.predict(X_train))
test_pred = np.array(model.predict(X_test))
RMSE=np.sqrt(np.sum(np.square(y_pred-y_train)))
test_score = model.score(X_test,y_test)
linear_sample_score.append(test_score)
poly_degree.append(degree)
var_linear.append(np.std(np.array(linear_sample_score)))
mean_linear.append(np.mean(np.array(linear_sample_score)))
# Modeling with randomly sampled data set
X_train, X_test, y_train, y_test = train_test_split(df['X_sampled'], df['y_sampled'], test_size=0.33)
X_train=X_train.values.reshape(-1,1)
X_test=X_test.values.reshape(-1,1)
random_sample_score = []
poly_degree = []
for degree in range(degree_min,degree_max+1):
model = make_pipeline(PolynomialFeatures(degree),RidgeCV(alphas=ridge_alpha,normalize=True,
cv=5))
#model = make_pipeline(PolynomialFeatures(degree), LassoCV(eps=lasso_eps,n_alphas=lasso_nalpha,
#max_iter=lasso_iter,normalize=True,cv=5))
#model = make_pipeline(PolynomialFeatures(degree), LinearRegression(normalize=True))
model.fit(X_train, y_train)
y_pred = np.array(model.predict(X_train))
test_pred = np.array(model.predict(X_test))
RMSE=np.sqrt(np.sum(np.square(y_pred-y_train)))
test_score = model.score(X_test,y_test)
random_sample_score.append(test_score)
poly_degree.append(degree)
var_random.append(np.std(np.array(random_sample_score)))
mean_random.append(np.mean(np.array(random_sample_score)))
df_score = pd.DataFrame(data={'degree':[d for d in range(degree_min,degree_max+1)],
'Linear sample score':linear_sample_score,
'Random sample score':random_sample_score})
dfs.append(df_score)
#print(df_score)
#print("\n")
print ("Run # {} finished".format(i+1))
# + [markdown] id="hQ8X_PodYlzr" colab_type="text"
# max_var = max(np.max(var_linear),np.max(var_random))
# min_mean = min(np.min(mean_linear),np.min(mean_random))
#
# plt.figure()
# plt.xlim((0.0,max_var+0.05))
# plt.ylim((0.0,max_var+0.05))
# plt.xlabel("Variation of linearly sampled score")
# plt.ylabel("Variation of randomly sampled score")
# plt.scatter(var_linear,var_random)
#
# plt.figure()
# plt.xlim((min_mean-0.05,1.0))
# plt.ylim((min_mean-0.05,1.0))
# plt.xlabel("Mean of linearly sampled score")
# plt.ylabel("Mean of randomly sampled score")
# plt.scatter(mean_linear,mean_random)
# + id="1DAX3qhnYlzr" colab_type="code" colab={}
df1=pd.concat(dfs)
# + id="AEepqTGrYlzt" colab_type="code" colab={} outputId="f3374175-ba00-477f-b7fd-6edba4762e25"
rand = []
lin = []
for i in range(degree_max+1-degree_min):
rand.append(df1.loc[i]['Random sample score'].mean())
lin.append(df1.loc[i]['Linear sample score'].mean())
plt.figure(figsize=(8,5))
plt.plot(range(degree_min, degree_max+1),lin)
plt.plot(range(degree_min, degree_max+1),rand)
plt.xlabel("Model complexity (degree of polynomial)")
plt.ylabel("Model score on test set")
plt.legend(['Linear sampling method','Random sampling method'])
plt.grid(True)
| Function Approximation by Neural Network/Regularized polynomial regression with linear and random sampling - LOOP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simple Regression Example
#
# 
# +
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
np.random.seed(101)
np.random.seed(101)
n_features = 10
n_dense_neuros = 3
# -
# ## Create a random data
_w_,_b_ = np.random.rand(2)
POINTS = 1000
MARGIN = 0.00001
# +
def noising():
return np.random.uniform(-1.5, 1.5, POINTS)
def teach_me(x):
return _w_ * x + _b_
data = np.linspace(0,10,POINTS) + noising()
labels = np.linspace(0,10,POINTS) + noising()
# -
plt.plot(data, labels, '*')
plt.plot(data, teach_me(data), 'r')
W = tf.Variable(_w_)
b = tf.Variable(_b_)
error = 0
for d,l in zip(data, labels):
l_hat = W*d + b
error += (l-l_hat)**2
optimizer = tf.train.GradientDescentOptimizer(learning_rate=MARGIN)
train = optimizer.minimize(error)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
training_steps = 1000
for s in range(training_steps):
sess.run(train)
final_slope, final_intersept = sess.run([W,b])
# ## Testing
# +
data_test = np.linspace(-1,11,POINTS)
label_test = final_slope * data_test + final_intersept
plt.plot(data, labels, '*')
plt.plot(data, teach_me(data), 'r')
plt.plot(data_test, label_test, 'g')
# -
| TensorFlowUDemy/Solve regression problem using tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-foodworks]
# language: python
# name: conda-env-.conda-foodworks-py
# ---
# %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
filtered_df = pd.read_csv("../results/filtered.sampleids.txt", sep="\t", index_col=0)
filtered_df.head()
# Load up the sample names in data
df_samples = pd.read_csv("../data/taxonomy_clr_s.txt", sep='\t', index_col=0)
df_samples.head()
filtered_samples = df_samples.iloc[:, [_ in filtered_df.index for _ in df_samples.columns]]
filtered_samples.shape
above_average = filtered_samples[filtered_samples.mean(axis=1) > 0.]
above_average.var(axis=1).sort_values(ascending=False).head(n=10)
filtered_samples.var(axis=1).sort_values(ascending=False).head(n=5)
filtered_samples.median(axis=1).sort_values(ascending=False).head(n=5)
# +
# Species to Predict
# k__Bacteria;p__Bacteroidetes;c__Bacteroidia;o__Bacteroidales;f__Rikenellaceae;g__Alistipes;s__Alistipes obesi
# k__Bacteria;p__Firmicutes;c__Clostridia;o__Clostridiales;f__Ruminococcaceae;g__Ruminiclostridium;s__[Eubacterium] siraeum
# k__Bacteria;p__Bacteroidetes;c__Bacteroidia;o__Bacteroidales;f__Barnesiellaceae;g__Barnesiella;s__Barnesiella intestinihominis
# k__Bacteria;p__Bacteroidetes;c__Bacteroidia;o__Bacteroidales;f__Bacteroidaceae;g__Bacteroides;s__Bacteroides coprocola
# k__Bacteria;p__Actinobacteria;c__Actinobacteria;o__Bifidobacteriales;f__Bifidobacteriaceae;g__Bifidobacterium;s__Bifidobacterium longum
# -
filtered_samples.shape
def get_sample_for_day_plus_1(sample_name, filtered_df):
row = filtered_df.loc[sample_name, :]
result = filtered_df[(filtered_df['UserName'] == row['UserName']) & (filtered_df['StudyDayNo'] == row['StudyDayNo'] + 1)]
if result.shape[0] == 1:
return result
else:
return pd.Series()
get_sample_for_day_plus_1('MCT.f.0002', filtered_df)
# +
species_to_predict = ['k__Bacteria;p__Bacteroidetes;c__Bacteroidia;o__Bacteroidales;f__Rikenellaceae;g__Alistipes;s__Alistipes obesi']
filtered_sample_map = np.array([0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype='bool')
# -
def yield_rows_for_network(filtered_samples, filtered_df, filtered_sample_map=pd.Series()):
seen = set()
for sample in filtered_samples.columns:
if not sample in seen:
seen.add(sample)
next_sample_row = get_sample_for_day_plus_1(sample, filtered_df)
if not next_sample_row.empty:
next_sample = next_sample_row.iloc[0]
if not next_sample.name in seen:
# Grab the sample info
sample_info = filtered_df.loc[sample, :]
if filtered_sample_map.any():
sample_info_filtered = sample_info[filtered_sample_map]
new_row = filtered_samples[sample].append(sample_info_filtered)
else:
new_row = filtered_samples[sample].append(sample_info)
next_row = filtered_samples.loc[:, next_sample.name].copy()
next_row.index = ['dayplus1;' + _ for _ in next_row.index]
new_row = new_row.append(next_row)
new_row.name = sample
yield new_row
df_predict = pd.concat(yield_rows_for_network(filtered_samples, filtered_df, filtered_sample_map=filtered_sample_map), axis=1).T
df_predict.shape
# +
import itertools
def yield_blacklist(df_predict):
for pair in itertools.combinations(df_predict.columns, 2):
for _ in (pair, pair[::-1]):
if not 'dayplus1;' in _[1]:
yield _
elif 'dayplus1;' in _[0]:
yield _
# -
df_blacklist = pd.DataFrame(yield_blacklist(df_predict), columns=['from', 'to'])
df_blacklist.head()
df_blacklist.to_csv("../results/blacklist.all.txt", sep='\t', index=False)
df_predict.to_csv("../results/prediction.all.txt", sep='\t')
# +
df_small_data = filtered_samples.loc[above_average.var(axis=1).sort_values(ascending=False).head(n=10).index, :]
df_small_predict = pd.concat(yield_rows_for_network(df_small_data, filtered_df, filtered_sample_map=filtered_sample_map), axis=1).T
df_small_blacklist = pd.DataFrame(yield_blacklist(df_small_predict), columns=['from', 'to'])
df_small_blacklist.to_csv("../results/blacklist.small.txt", sep='\t', index=False)
df_small_predict.to_csv("../results/prediction.small.txt", sep='\t')
# -
| notebooks/Species to Predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hidden Linear Function Problem
#
# In this notebook we consider a problem from paper [1] and build a quantum cirquit, which solves it, in Cirq.
# ## The problem
#
# Consider $A \in \mathbb{F}_2^{n \times n}$ - upper-triangular binary matrix, $b \in \mathbb{F}_2^n$ - binary vector.
#
# Define a function $q : \mathbb{F}_2^n \to \mathbb{Z}_4 $:
#
# $$q(x) = (2 x^T A x + b^T x) ~\text{mod}~ 4, $$
#
# Also define
#
# $$\mathcal{L}_q = \Big\{x \in \mathbb{F}_2^n : q(x \oplus y) = (q(x) + q(y)) ~\text{mod}~ 4 ~~ \forall y \in \mathbb{F}_2^n \Big\}.$$
#
# Turns out that restriction of $q$ on $\mathcal{L}_q$ is a linear function, i.e. there exists such $z \in \mathbb{F}_2^n$, that
#
# $$q(x) = 2 z^T x \forall x \in \mathcal{L}_q.$$
#
# Our task is, given $A$ and $b$, to find $z$. There may be multiple answers - we need to find any such answer.
# ## Preparation and bruteforce solution
#
# For small values of $n$ we can solve this problem with a trivial bruteforce solution. First, we need to build $\mathcal{L}_q$ by checking for all $2^n$ binary vectors, which of them belongs to it(by definition). Then we need to try all possible $z \in \mathbb{F}_2^n$, and for each of them and for each $x \in \mathcal{L}_q$ check whether $q(x) = 2 z^T x$.
#
# Below we implement a class which represents instance of a problem and solves it with a bruteforce solution.
import numpy as np
import cirq
class HiddenLinearFunctionProblem:
def __init__(self, A, b):
self.n = A.shape[0]
assert A.shape == (self.n, self.n)
assert b.shape == (self.n, )
for i in range(self.n):
for j in range(i+1):
assert A[i][j] == 0, 'A[i][j] can be 1 only if i<j'
self.A = A
self.b = b
def q(self, x):
assert x.shape == (self.n, )
return (2 * (x @ self.A @ x) + (self.b @ x)) % 4
def bruteforce_solve(self):
all_vectors = [np.array([(m>>i)%2 for i in range(self.n)]) for m in range(2**self.n)]
def vector_in_L(x):
for y in all_vectors:
if self.q( (x + y)%2 ) != (self.q(x) + self.q(y))%4:
return False
return True
self.L = [x for x in all_vectors if vector_in_L(x)]
self.all_zs = [z for z in all_vectors if self.is_z(z)]
# Whether given vector z is solution to this problem.
def is_z(self, z):
assert z.shape == (self.n, )
assert self.L is not None
for x in self.L:
if self.q(x) != 2 * ((z @ x) % 2):
return False
return True
# For testing, we need to generate an instance of a problem. We can generate random $A$ and $b$. However, for some $A$ and $b$ problem is trivial - that is, $\mathcal{L}_q = \{0\}$ and therefore any $z$ is a solution. In fact, product of $|\mathcal{L}_q|$ and number of solutions is always equal to $2^n$ (see prrof in [1]), so we want a problem with large $\mathcal{L}_q$.
#
# Code below can be used to generate random problem with given size of $\mathcal{L}_q$.
# +
def random_problem(n, seed=None):
if seed is not None:
np.random.seed(seed)
A = np.random.randint(0, 2, size=(n,n))
for i in range(n):
for j in range(i+1):
A[i][j] = 0
b = np.random.randint(0, 2, size=n)
problem = HiddenLinearFunctionProblem(A, b)
return problem
def find_interesting_problem(n, min_L_size):
for _ in range(1000):
problem = random_problem(n)
problem.bruteforce_solve()
if len(problem.L) >= min_L_size and not np.max(problem.A) == 0:
return problem
return None
problem = find_interesting_problem(10, 4)
print(len(problem.L), len(problem.all_zs))
# -
# We found a problem with $n=10$ and $|\mathcal{L}_q|=16$, so only 64 of 1024 possible vectors are solutions. So, chance of randomly guessing a solution is $\frac{1}{16}$.
A = np.array([[0, 1, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
b = np.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 1])
problem_10_64 = HiddenLinearFunctionProblem(A, b)
problem_10_64.bruteforce_solve()
print(len(problem_10_64.L), len(problem_10_64.all_zs))
# ## Solution with a quantum circuit
# As shown in [1], given problem can be solved by a quantum circuit, which implements operator $H ^ {\otimes n} U_q H ^ {\otimes n}$, where
#
# $$U_q = \prod_{1 < i < j < n} CZ_{ij}^{A_{ij}} \cdot \bigotimes_{j=1}^{n} S_j^{b_j} .$$
#
# We need to apply this operator to $| 0^n \rangle$ and measure the result - result is guaranteed to be one of the solutions. Moreover, we can get any solution with equal probability.
#
# Let's implement code which would generate such circuit and simulate it.
#
# Note that:
#
# * We use Cirq S gate, whose matrix is $\left(\begin{smallmatrix}1 & 0\\0 & i\end{smallmatrix}\right)$. In the paper [1] matrix of S gate is defined as $\left(\begin{smallmatrix}1 & 0\\0 & -i\end{smallmatrix}\right)$. But for this problem it doesn't matter.
#
# * We reorder CZ gates in such a way so they take less moments. This is a problem of minimal [edge coloring](https://en.wikipedia.org/wiki/Edge_coloring), and we solve it here with a simple greedy algorithm. We can do that because CZ gates commute (because their matrices are diagonal).
#
# * All gates are Clifford gates, so we can use Clifford simulator.
# +
# Given adjacency matrix A, returns list of lists of edges, such as
# edges in each list do not have common vertex.
def graph_coloring(A):
A = np.copy(A)
n = A.shape[0]
ans = []
while np.max(A) != 0:
edges_group = []
used = np.zeros(n, dtype=np.bool)
for i in range(n):
for j in range(n):
if A[i][j] == 1 and not used[i] and not used[j]:
edges_group.append((i, j))
A[i][j] = 0
used[i] = used[j] = True
ans.append(edges_group)
return ans
def generate_circuit_for_problem(problem):
qubits = cirq.LineQubit.range(problem.n)
circuit = cirq.Circuit()
# Hadamard gates at the beginning.
circuit += cirq.Moment([cirq.H(qubits[i]) for i in range(problem.n)])
# Controlled-Z gates encoding the matrix A.
for layer in graph_coloring(problem.A):
for i, j in layer:
circuit += cirq.CZ(qubits[i], qubits[j])
# S gates encoding the vector b.
S_moment = cirq.Moment()
for i in range(problem.n):
if problem.b[i] == 1:
S_moment += cirq.S.on(qubits[i])
circuit += S_moment
# Hadamard gates at the end.
circuit += cirq.Moment([cirq.H(qubits[i]) for i in range(problem.n)])
# Measurements.
circuit += cirq.Moment([cirq.measure(qubits[i], key=str(i)) for i in range(problem.n)])
return circuit
def solve_problem(problem, print_circuit=False):
circuit = generate_circuit_for_problem(problem)
if print_circuit:
print(circuit)
sim = cirq.CliffordSimulator()
result = sim.simulate(circuit)
z = np.array([result.measurements[str(i)][0] for i in range(problem.n)])
return z
solve_problem(problem_10_64, print_circuit=True)
# -
# Now let's test this algorithm. Let's solve it with a circuit 100 times and each time check that measurement result is indeed an answer to the problem.
# +
def test_problem(problem):
problem.bruteforce_solve()
for _ in range(100):
z = solve_problem(problem)
assert problem.is_z(z)
test_problem(problem_10_64)
print('OK')
# -
# Let's repeat that for 10 other problems with $n=8$ and chance of random guessing at most $\frac{1}{4}$.
for _ in range(10):
test_problem(find_interesting_problem(8, 4))
print('OK')
# Now, let's run our algorithm on a problem with $n=200$.
# %%time
problem = random_problem(200, seed=0)
solve_problem(problem, print_circuit=False)
# ## Why is this problem interesting?
#
# ### 1. It's a problem without an oracle
#
# This problem is similar to a problem solved by [Bernstein–Vazirani algorithm](https://en.wikipedia.org/wiki/Bernstein%E2%80%93Vazirani_algorithm). It also finds coefficients of unknown linear function. But in Bernstein-Vazirani algorithm this function is represented by an oracle. In this problem, the linear function is "hidden" in inputs $A$ and $b$.
#
# ### 2. Quantum circuits have advantage over classical when solving this problem
#
# According to [Gottesman–Knill theorem](https://en.wikipedia.org/wiki/Gottesman%E2%80%93Knill_theorem), this problem can be solved in polynomial time on classical computer, because it can be solved by simulating Clifford circuit. So, it might look like quantum comuters aren't better than classical ones in solving this problem.
#
# However, if we apply certain restrictions on matrix $A$, the circuit will have fixed depth (i.e. number of Moments). Namely, if the matrix $A$ is an adjacency matrix of a "grid" graph (whose edges can be colored in 4 colors), all CZ gates will fit in 4 moments, and overall we will have only 8 moments - and this doesn't depend on $n$.
#
# But for classical circuits it can be proven (see [1]) that even if we restrict matrix $A$ in the same way, the depth of classical circuit (with gates of bounded fan-in) must grow as $n$ grows (in fact, it grows as $\log(n)$).
# ## References
#
#
# [1] [Quantum advantage with shallow circuits](https://arxiv.org/pdf/1704.00690.pdf) by <NAME>, <NAME> and <NAME>.
| 2D Hidden Linear Function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kmjohnson3/Intro-to-MRI/blob/master/AdvancedNoteBooks/VarNetToyExample.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="JtPieDbTAH6L"
# # Overview
# This code is meant as a toy/teaching example roughly following the paper:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Learning a variational network for reconstruction of accelerated MRI data. Magnetic Resonance in Medicine. 2018;79(6):3055–3071. [link](https://onlinelibrary.wiley.com/doi/epdf/10.1002/mrm.26977)
#
# Major differences include:
# * Uses synthetic data using random shapes and fake 4 channel senstivity maps
# * Written in PyTorch (>1.8) with native complex support
# * The networks are slighty smaller and modified to train in ~10 minutes rather than days
#
# + id="mwwwsFsHQGGf"
import torch
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import random
import torch
import torch.nn as nn
import torch.fft
import numpy as np
import skimage
import skimage.draw
# + [markdown] id="NHnHvWtfRopr"
# Below are the MRI encoding operators. In MRI, the encoding operator if just a Fourier transform of the image multiplied by a senssitivity map. The sensitivity map is essentially a smooth complex function that details how each RF reciever (~antenna) detects the signal. We can detail this in matrix form as:
#
# $Ex = FMx = d + \epsilon$
#
# where:
# * $E$ is the encoding operator ($N_m$ x $N_xN_y$)
# * $F$ is the Fourier transform operator ($N_m$ x $N_xN_yN_c$)
# * $M$ is a sensitivity matrix, a stack of diagonal matrices ($N_xN_yN_c$ x $N_xN_y$)
# * $N_m$ is the number of measurements
# * $N_x$ is the image size in $x$
# * $N_y$ is the image size in $y$
# * $N_c$ is the number of coils ( 4 in this case)
#
# We won't store these matrices but use operators to perform them.
# + id="Ebk2D0F3PQ7h"
def sense_adjoint( maps, data):
im = torch.fft.ifftshift( data, dim=(-2, -1))
im = torch.fft.ifft2( im, dim=(-2,-1))
im = torch.fft.fftshift( im, dim=(-2,-1))
im *= torch.conj(maps)
im = torch.sum(im, dim=-3, keepdim=True)
return im
def sense( maps, image ):
kdata = maps * image
kdata = torch.fft.ifftshift(kdata, dim=(-2, -1))
kdata = torch.fft.fft2( kdata, dim=(-2,-1))
kdata = torch.fft.fftshift(kdata, dim=(-2, -1))
return kdata
# + [markdown] id="Thm4SbwUluhP"
# # Image generation
# We will use fake data. Specifically, a 2D image with a number of circles generated at random. We are using the scikit-image function random_shapes [here](https://scikit-image.org/docs/dev/api/skimage.draw.html#skimage.draw.random_shapes). However, in the scikit code the random generator is reinitialized every call. Below is just a copy of this with the randomness removed. It still relies on the core libraries in scikit.
# + id="wF2JjRwEPxpz"
def random_shapes(image_shape,
max_shapes,
min_shapes=1,
min_size=2,
max_size=None,
multichannel=False,
num_channels=1,
shape=None,
intensity_range=None,
allow_overlap=False,
num_trials=100,
random_seed=None,
*,
channel_axis=-1):
"""Generate an image with random shapes, labeled with bounding boxes.
The image is populated with random shapes with random sizes, random
locations, and random colors, with or without overlap.
Shapes have random (row, col) starting coordinates and random sizes bounded
by `min_size` and `max_size`. It can occur that a randomly generated shape
will not fit the image at all. In that case, the algorithm will try again
with new starting coordinates a certain number of times. However, it also
means that some shapes may be skipped altogether. In that case, this
function will generate fewer shapes than requested.
Parameters
----------
image_shape : tuple
The number of rows and columns of the image to generate.
max_shapes : int
The maximum number of shapes to (attempt to) fit into the shape.
min_shapes : int, optional
The minimum number of shapes to (attempt to) fit into the shape.
min_size : int, optional
The minimum dimension of each shape to fit into the image.
max_size : int, optional
The maximum dimension of each shape to fit into the image.
multichannel : bool, optional
If True, the generated image has ``num_channels`` color channels,
otherwise generates grayscale image. This argument is deprecated:
specify `channel_axis` instead.
num_channels : int, optional
Number of channels in the generated image. If 1, generate monochrome
images, else color images with multiple channels. Ignored if
``multichannel`` is set to False.
shape : {rectangle, circle, triangle, ellipse, None} str, optional
The name of the shape to generate or `None` to pick random ones.
intensity_range : {tuple of tuples of uint8, tuple of uint8}, optional
The range of values to sample pixel values from. For grayscale
images the format is (min, max). For multichannel - ((min, max),)
if the ranges are equal across the channels, and
((min_0, max_0), ... (min_N, max_N)) if they differ. As the
function supports generation of uint8 arrays only, the maximum
range is (0, 255). If None, set to (0, 254) for each channel
reserving color of intensity = 255 for background.
allow_overlap : bool, optional
If `True`, allow shapes to overlap.
num_trials : int, optional
How often to attempt to fit a shape into the image before skipping it.
random_seed : int, optional
Seed to initialize the random number generator.
If `None`, a random seed from the operating system is used.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
Returns
-------
image : uint8 array
An image with the fitted shapes.
labels : list
A list of labels, one per shape in the image. Each label is a
(category, ((r0, r1), (c0, c1))) tuple specifying the category and
bounding box coordinates of the shape.
Examples
--------
>>> import skimage.draw
>>> image, labels = skimage.draw.random_shapes((32, 32), max_shapes=3)
>>> image # doctest: +SKIP
array([
[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=uint8)
>>> labels # doctest: +SKIP
[('circle', ((22, 18), (25, 21))),
('triangle', ((5, 6), (13, 13)))]
"""
if min_size > image_shape[0] or min_size > image_shape[1]:
raise ValueError('Minimum dimension must be less than ncols and nrows')
max_size = max_size or max(image_shape[0], image_shape[1])
if channel_axis is None:
num_channels = 1
if intensity_range is None:
intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )
else:
tmp = (intensity_range, ) if num_channels == 1 else intensity_range
for intensity_pair in tmp:
for intensity in intensity_pair:
if not (0 <= intensity <= 255):
msg = 'Intensity range must lie within (0, 255) interval'
raise ValueError(msg)
#random = np.random.RandomState(random_seed)
user_shape = shape
image_shape = (image_shape[0], image_shape[1], num_channels)
image = np.full(image_shape, 255, dtype=np.uint8)
filled = np.zeros(image_shape, dtype=bool)
labels = []
num_shapes = np.random.randint(min_shapes, max_shapes + 1)
colors = skimage.draw._random_shapes._generate_random_colors(num_shapes, num_channels,
intensity_range, np.random)
shape = (min_size, max_size)
for shape_idx in range(num_shapes):
if user_shape is None:
shape_generator = np.random.choice(skimage.draw._random_shapes.SHAPE_CHOICES)
else:
shape_generator = skimage.draw._random_shapes.SHAPE_GENERATORS[user_shape]
for _ in range(num_trials):
# Pick start coordinates.
column = np.random.randint(max(1, image_shape[1] - min_size))
row = np.random.randint(max(1, image_shape[0] - min_size))
point = (row, column)
try:
indices, label = shape_generator(point, image_shape, shape,np.random)
except ArithmeticError:
# Couldn't fit the shape, skip it.
indices = []
continue
# Check if there is an overlap where the mask is nonzero.
if allow_overlap or not filled[indices].any():
image[indices] = colors[shape_idx]
filled[indices] = True
labels.append(label)
break
else:
warn('Could not fit any shapes to image, '
'consider reducing the minimum dimension')
if channel_axis is None:
image = np.squeeze(image, axis=2)
else:
image = np.moveaxis(image, -1, channel_axis)
image=np.squeeze(image)
return image, labels
# + [markdown] id="txeO3qb0rWhB"
# # Data generator
# From the images, we will generate data using simple simulated sensitivity maps. This is a very simple data generator. You can change the image size and examples per epoch, which are just made up.
# + id="JjfwKOojPhm4"
class ToyDataGenerator(torch.utils.data.Dataset):
def __init__(self, image_size=(64,64), examples_per_epoch=1000, channels=4):
self.image_size = image_size
self.examples_per_epoch = examples_per_epoch
self.get_maps(channels)
def get_maps(self, channels):
if channels == 1:
self.maps = torch.ones( (1,) + self.image_size, dtype=torch.complex64 )
else:
[x,y] = torch.meshgrid(torch.linspace(0,1,self.image_size[0]),torch.linspace(0,1,self.image_size[0]))
self.maps = torch.stack( (x, y, 1.0-x,1-.0-y),dim=0).type(torch.complex64)
self.maps /= torch.sqrt(torch.sum(torch.abs(self.maps)**2,dim=0,keepdim=True))
def __len__(self):
return self.examples_per_epoch
def __getitem__(self, idx):
# Images of random shapes
image, labels = random_shapes(image_shape=self.image_size,
max_shapes=10,
min_shapes=1,
min_size=2,
max_size=None,
multichannel=False,
allow_overlap=True,
shape='circle')
# Convert background to 0 rather than 255
image = 255 - image
# Convert to tensor and scale 0 to 1
image = torch.tensor(image).unsqueeze(0).unsqueeze(0)
image = image / torch.max(torch.abs(image))
# Add linear phase of random amount and direction
import math
a = 2.0 * math.pi * torch.rand(1)
theta = torch.tensor([math.cos(a), -math.sin(a), 0.0, math.sin(a), math.cos(a), 0.0]).view(-1, 2, 3)
phase = torch.nn.functional.affine_grid(theta, image.size())
image = image[0]*torch.exp(1j*phase[...,0])
# Generate data using the encoding operator
data = sense( self.maps, image)
# Add noise
data = data + 1e-4*(torch.randn(data.shape) + 1j*torch.randn(data.shape) )
return self.maps, data, image
# + [markdown] id="i5sCzz-ztnxV"
# # Neural Network Components
#
# These are some of the components to run this. We need:
# * Complex convolutions with kernel normalization
# * Radial basis function activation
# * The denoising CNN (VarNet)
# * The unrolled network
#
# + id="vZsZty8oP8kg"
class RunningAverage:
def __init__(self): # initialization
self.count = 0
self.sum = 0
def reset(self):
self.count = 0
self.sum = 0
def update(self, value, n=1):
self.count += n
self.sum += value * n
def avg(self):
return self.sum / self.count
class RadialBasisActivation(nn.Module):
def __init__(self, in_channels=1, channels=48):
super(RadialBasisActivation, self).__init__()
# Scaling and bias for each channel
self.scale_in = nn.Conv2d(in_channels=in_channels, out_channels=channels, kernel_size=1)
self.scale_out = nn.Conv2d(in_channels=channels, out_channels=in_channels, kernel_size=1, bias=False)
def forward(self, input):
# Apply a gaussian activation to real and imaginary
activated_real = torch.exp(-self.scale_in(input.real)**2)
activated_imag = torch.exp(-self.scale_in(input.imag)**2)
# Sum the channels using learned scaling
out = self.scale_out(activated_real) + 1j*self.scale_out(activated_imag)
return out
class ComplexReLu(nn.Module):
def forward(self, input):
mag = torch.abs( input)
return torch.nn.functional.relu(mag).type(torch.complex64)/(mag+1e-6)*input
def apply_complex(fr, fi, input, dtype = torch.complex64):
return (fr(input.real)-fi(input.imag)).type(dtype) + 1j*(fr(input.imag)+fi(input.real)).type(dtype)
class ComplexConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
dilation=1, groups=1, bias=True):
super(ComplexConv2d, self).__init__()
self.conv_r = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.conv_i = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, input):
return apply_complex(self.conv_r, self.conv_i, input)
class NormalizedComplexConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
dilation=1, groups=1, bias=True):
super(NormalizedComplexConv2d, self).__init__()
self.conv_r = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.conv_i = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.norm = nn.LayerNorm((kernel_size, kernel_size), elementwise_affine=False)
self.kernel_size = kernel_size
def forward(self, input):
weight_r = self.conv_r.weight - torch.mean(self.conv_r.weight)
weight_i = self.conv_i.weight - torch.mean(self.conv_i.weight)
weight_r = weight_r / torch.sum(weight_r ** 2)
weight_i = weight_i / torch.sum(weight_i ** 2)
real = torch.nn.functional.conv2d(input.real, weight_r, self.conv_r.bias, self.conv_r.stride, self.conv_r.padding, self.conv_r.dilation, self.conv_r.groups)\
- torch.nn.functional.conv2d(input.imag, weight_i, self.conv_i.bias, self.conv_i.stride, self.conv_i.padding, self.conv_i.dilation, self.conv_i.groups)
imag = torch.nn.functional.conv2d(input.imag, weight_r, self.conv_r.bias, self.conv_r.stride, self.conv_r.padding, self.conv_r.dilation, self.conv_r.groups)\
+ torch.nn.functional.conv2d(input.real, weight_i, self.conv_i.bias, self.conv_i.stride, self.conv_i.padding, self.conv_i.dilation, self.conv_i.groups)
return real + 1j*imag
class ComplexConvTranspose2d(nn.Module):
def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros'):
super(ComplexConvTranspose2d, self).__init__()
self.conv_r = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding,
output_padding, groups, bias, dilation, padding_mode)
self.conv_i = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding,
output_padding, groups, bias, dilation, padding_mode)
def forward(self, input):
return apply_complex(self.conv_r, self.conv_i, input)
class NormalizedComplexConvTranspose2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, groups=1, bias=True, dilation=1, padding_mode='zeros'):
super(NormalizedComplexConvTranspose2d, self).__init__()
self.conv_r = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation, padding_mode)
self.conv_i = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation, padding_mode)
self.norm = nn.LayerNorm((kernel_size, kernel_size), elementwise_affine=False)
def forward(self, input):
weight_r = self.conv_r.weight - torch.mean(self.conv_r.weight)
weight_i = self.conv_i.weight - torch.mean(self.conv_i.weight)
weight_r = weight_r / torch.sum(weight_r ** 2)
weight_i = weight_i / torch.sum(weight_i ** 2)
real = torch.nn.functional.conv_transpose2d(input.real, weight_r, self.conv_r.bias, self.conv_r.stride, self.conv_r.padding, self.conv_r.output_padding, self.conv_r.groups, self.conv_r.dilation)\
- torch.nn.functional.conv_transpose2d(input.imag, weight_i, self.conv_i.bias, self.conv_i.stride, self.conv_i.padding, self.conv_i.output_padding, self.conv_i.groups, self.conv_i.dilation)
imag = torch.nn.functional.conv_transpose2d(input.imag, weight_r, self.conv_r.bias, self.conv_r.stride, self.conv_r.padding, self.conv_r.output_padding, self.conv_r.groups, self.conv_r.dilation)\
+ torch.nn.functional.conv_transpose2d(input.real, weight_i, self.conv_i.bias, self.conv_i.stride, self.conv_i.padding, self.conv_i.output_padding, self.conv_i.groups, self.conv_i.dilation)
return real + 1j*imag
class VarNet( nn.Module):
def __init__(self, channels=9, kernel_size=5):
super(VarNet, self).__init__()
self.encoding_layers = nn.ModuleList()
self.decoding_layers = nn.ModuleList()
self.activation_layers = nn.ModuleList()
self.channels = channels
for f in range(channels):
self.encoding_layers.append(NormalizedComplexConv2d(1,1,kernel_size=kernel_size, padding=kernel_size//2, bias=False))
self.decoding_layers.append(NormalizedComplexConvTranspose2d(1,1,kernel_size=kernel_size, padding=kernel_size//2, bias=False))
self.activation_layers.append(RadialBasisActivation(in_channels=1, channels=24))
#self.activation_layers.append(ComplexReLu())
def forward(self, image):
image_temp = torch.zeros_like(image)
for alayer, elayer, dlayer in zip(self.activation_layers, self.encoding_layers, self.decoding_layers):
# Encode image
encoded = elayer(image)
#Activation
encoded = alayer(encoded)
# Decode the layer
decoded = dlayer(encoded)
# Add with scale to image
image_temp += decoded / self.channels
return image_temp
class UnrolledNetwork(nn.Module):
def __init__(self, scale_init=1.0, inner_iter=10, denoiser='VarNet',*args, **kwargs):
super(UnrolledNetwork, self).__init__()
self.inner_iter = inner_iter
self.scale_layers = nn.Parameter(scale_init * torch.ones([inner_iter]), requires_grad=True)
if denoiser is 'VarNet':
self.varnets = nn.ModuleList()
for i in range(self.inner_iter):
self.varnets.append(VarNet())
self.denoiser = None
else:
self.denoiser = denoiser
def forward(self, maps, kspace, mask):
# Initialize
image = sense_adjoint(maps, kspace * mask)
# Loop iterations
for i in range(self.inner_iter):
image_old = image
# Ex
Ex = sense(maps, image_old)
# Ex - d
Ex -= kspace*mask
# image = image - scale*E.H*(Ex-d)
image = image_old - self.scale_layers[i] * sense_adjoint(maps, Ex)
if self.denoiser is None:
image = image + self.varnets[i](image_old)
else:
image = self.denoiser(image)
return image
# + [markdown] id="ZR7unVl4ukOS"
# # Training
# This will train the network using a made up mask.
# + colab={"base_uri": "https://localhost:8080/", "height": 228} id="eILZl32JQEAX" outputId="cad08275-77c5-4b00-ca6c-f03365882f87"
BATCH_SIZE = 10
# Set seed so that code runs everytime
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
'''
We will generate fake data for this, the data is circles, triangles, and squares
'''
# Validation loader called first due to random nature
data_val = ToyDataGenerator()
loader_val = DataLoader(dataset=data_val, batch_size=BATCH_SIZE, shuffle=False, drop_last=True)
# precalc validation data to avoid randomness
vload = iter(loader_val)
vmaps, vdata, image_truth = next(vload)
# Train loader
data_train = ToyDataGenerator()
loader_train = DataLoader(dataset=data_train, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
'''
Add a sampling mask. This will remove a number of points to simulate undersampling
'''
mask = torch.rand((1, 1, vmaps.shape[-2], vmaps.shape[-1]))
sampling_faction = 0.5
mask[mask <= sampling_faction] = 0.0
mask[mask > sampling_faction] = 1.0
mask = 1.0 - mask
mask[:,:,(mask.shape[2]//2-5):(mask.shape[2]//2+5),(mask.shape[3]//2-5):(mask.shape[3]//2+5)] = 1.0
'''
Use mean square error loss. We need a custom function since this is complex
'''
def loss_fcn( truth, input):
loss = torch.mean( torch.abs(truth - input))
return loss
'''
Define the network. You can also pass a nn.Module as a denoiser
'''
#model = UnrolledNetwork( inner_iter=10, denoiser=nn.Identity())
model = UnrolledNetwork( inner_iter=10)
model.cuda()
'''
Define an optimizer.
'''
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
train_loss_avg = []
val_loss_avg = []
for epoch in range(25):
train_avg = RunningAverage()
val_avg = RunningAverage()
model.train()
for idx, (maps, data, image_true) in enumerate(loader_train):
# Zero gradient
optimizer.zero_grad()
maps2 = maps.clone().cuda()
data2 = data.clone().cuda()
mask2 = mask.clone().cuda()
# Get a truth
truth = sense_adjoint(maps2, data2)
# Recon undersampled data with network
image = model(maps2, data2, mask2)
# Calculate loss
loss = loss_fcn(truth, image)
train_avg.update(loss.item())
# Backpropogation
loss.backward()
optimizer.step()
# Compute validation loss for simplicity this is just for one batch
model.eval()
maps2 = vmaps.clone().cuda()
data2 = vdata.clone().cuda()
mask2 = mask.clone().cuda()
# Get the validation data
truth = sense_adjoint(maps2, data2)
image = model(maps2, data2, mask2)
loss = loss_fcn(truth, image)
val_avg.update(loss.item())
val_loss_avg.append(val_avg.avg())
train_loss_avg.append(train_avg.avg())
'''
Some display code.
'''
from IPython.display import clear_output
clear_output()
print(f'{epoch} Train = {train_avg.avg()} Val = {loss.item()}')
plt.figure(figsize=(8,3))
zero_fill = sense_adjoint(maps2, data2*mask2)
im_sl = zero_fill[0,0].detach().cpu().numpy()
plt.subplot(133)
plt.imshow(np.abs(im_sl), cmap='gray')
plt.title('Zero Fill Recon')
plt.axis('off')
im_sl = image[0,0].detach().cpu().numpy()
plt.subplot(132)
plt.imshow(np.abs(im_sl), cmap='gray')
plt.title('VarNet Recon')
plt.axis('off')
plt.subplot(131)
plt.plot(np.array(train_loss_avg), label='Train Loss')
plt.plot(np.array(val_loss_avg), label='Val Loss')
plt.ylim(0.0, np.max(np.array(val_loss_avg)))
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 995} id="asuf4fs3zhWM" outputId="73f854d9-13db-471b-f0be-f8f72caa193d"
plt.figure(figsize=(10,5))
# Check the images
zero_fill = sense_adjoint(maps2, data2*mask2)
im_sl = zero_fill[0,0].detach().cpu().numpy()
plt.subplot(131)
plt.imshow(np.abs(im_sl), cmap='gray')
plt.title('Zero filled reconstruction')
plt.axis('off')
# Check the images
im_sl = image[0,0].detach().cpu().numpy()
plt.subplot(132)
plt.imshow(np.abs(im_sl), cmap='gray')
plt.title('Neural network images')
plt.axis('off')
# Check the images
im_sl = truth[0,0].detach().cpu().numpy()
plt.subplot(133)
plt.imshow(np.abs(im_sl), cmap='gray')
plt.title('Truth image')
plt.axis('off')
plt.show()
plt.figure()
x = np.linspace(-10,10,2000)
vin = torch.tensor(x).view(-1,1,1,1).type(torch.complex64).cuda()
for alayer in model.varnets[0].activation_layers:
vout = alayer(vin)
plt.plot(x,np.squeeze(np.abs(vout.detach().cpu().numpy())))
plt.title('Activation functions')
plt.show()
plt.figure()
for f in range(9):
plt.subplot(3,3,f+1)
plt.imshow(np.squeeze(model.varnets[0].encoding_layers[f].conv_r.weight[0,:,:].detach().cpu().numpy()))
plt.axis('off')
plt.suptitle('Kernels first layer', fontsize=18)
plt.show()
plt.figure()
for f in range(9):
plt.subplot(3,3,f+1)
plt.imshow(np.squeeze(model.varnets[-1].encoding_layers[f].conv_r.weight[0,:,:].detach().cpu().numpy()))
plt.axis('off')
plt.suptitle('Kernels last layer', fontsize=18)
plt.show()
| AdvancedNoteBooks/VarNetToyExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
from IPython.display import display, display_markdown
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import scipy.stats as stats
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.metrics import accuracy_score
# + [markdown] hide_input=false
# # Prediction of Tanzanian water pumps functional status
#
# This study will focus on identifying the functional status (functional, needs repair or non-functional) of Tanzanian water pumps. The possible explanatory variables will be location, construction year, funder, type of extraction, water quality and quantity, population using it, management organization and payment methods.
#
# I picked up this challenge from the [DrivenData](https://www.drivendata.org/) competitions list because it shows a direct and practical application of how statistical analysis can help improve services and products quality. And as an engineer, those goals will be definitely the basis of any data science case I will have to solve. Moreover, as lots of possible explanatory variables are available, this will give me the chance to apply advance tools I learned during the [Data Analysis and Interpretation online Specialization](https://www.coursera.org/specializations/data-analysis).
#
# Predicting accurately the water pumps functional status will help planning maintenance earlier. That in turn will increase the availability of the water point and thus the quality of life for the people depending on those water supplies.
# -
# # Methods
#
# ## Sample
#
# The database contains 74,250 records of water points information from the Tanzania Ministry of Water. The records were made between October 2002 and December 2013. Unfortunately there are no explanation on the techniques used to collect those data.
# +
training_data = pd.read_csv('training_set_values.csv', index_col=0)
training_label = pd.read_csv('training_set_labels.csv', index_col=0)
test_data = pd.read_csv('test_set_values.csv', index_col=0)
# Merge test data and training data to apply same data management operations on them
data = training_data.append(test_data).sort_index()
# + [markdown] hide_input=false
# ## Measures
#
# The functional status of the water points are categorized in three groups: functional, functional needs repair and non functional.
#
# The potential predictors will be:
#
# - The amount of water available; missing data are coded as 0, they will be replaced by the mean value to suppress minimum amount of data.
# - The organization having funded the well
#
# From the various actors, the following categories will be created :
#
# 'organisation' : ('bank', 'msf', 'wwf', 'unicef', 'unisef', 'oxfam', 'oxfarm', 'rotary club', 'lion's club', 'care', 'without', 'action contre la faim', 'rain', 'red cross', 'blue angels', 'fundat', 'foundation'),
# 'church' : ('church', 'churc', 'rcchurch', 'roman', 'missionsry', 'lutheran', 'islamic', 'islam', 'vision'),
# 'private' : ('consulting', 'engineer', 'private', 'ltd', 'co.ltd', 'contractor', 'enterp', 'enterpr', 'company', 'contract'),
# 'community' : ('village', 'community', 'communit', 'district', 'council', 'commu', 'villigers', 'villagers'),
# 'government' : ('government', 'gov', 'govt', 'gover', 'gove', 'governme', 'ministry'),
# 'other' : ('0', 'nan', 'known', 'other', 'unknown'),
# 'danida' : ('danida', 'danid'),
# 'foreign government' : ('netherlands', 'germany', 'european')
#
# Then the 9 most funders will be kept and the others will be gathered in the `other` category.
#
# - The installer of the well; the grouping technique applied on the funders will be applied on the installer categories.
# - The GPS coordinates (height, longitude and latitude); missing data are coded as 0, they will be coded as NaN except for the height for which the missing data will be replaced by the mean values to keep a maximum of records for the analysis.
# - The geographic water basin
# - The geographic region
# - The population around the well; missing data are coded as 0, they will be coded as NaN.
# - Organization of public meeting at the water point; dichotomous variable (True/False)
# - The operator of the water point
# - The management of the water point
# - Does the waterpoint receive a construction permit?
# - Year the waterpoint was constructed; missing data are coded as 0, they will be replaced by the median value to avoid discarding a lot of records in the analysis.
# - The kind of extraction the water point uses
# - How payment are handled?
# - The quality of the water
# - The quantity of the water
# - The source of the water
# - The type of water point
#
# As the Python package `sklearn` cannot handle non-binary categorical variables, those variables will be expanded in as much new dichotomous variables as there are categories. Therefore the number of potential explanatory variables will be huge. So as a prepocess steps, a random forest test will be carried out to select only the variables having a substantial effect.
# + hide_input=false
# As lots of waterpoints are missing a value for amount_tsh. For that field the missing
# data will be replaced by the mean data to drop less data for the model fit
imp = preprocessing.Imputer(missing_values=0, strategy='mean')
imp.fit(data['amount_tsh'].values.reshape(-1, 1))
data['water_amount'] = imp.transform(data['amount_tsh'].values.reshape(-1, 1)).ravel()
imp = preprocessing.Imputer(missing_values=0, strategy='median')
imp.fit(data['construction_year'].values.reshape(-1, 1))
data['construction_year'] = imp.transform(data['construction_year'].values.reshape(-1, 1)).ravel()
imp = preprocessing.Imputer(missing_values=0, strategy='mean')
imp.fit(data['gps_height'].values.reshape(-1, 1))
data['height'] = imp.transform(data['gps_height'].values.reshape(-1, 1)).ravel()
# Recode missing data as NaN
for field in ('longitude', 'latitude'):
data[field] = data[field].map(lambda x: x if x else pd.np.nan)
def group_installer(data):
def gather_installer(x):
installer_map = {
'organisation' : ('bank', 'msf', 'wwf', 'unicef', 'unisef', 'oxfam', 'oxfarm', 'club', 'care', 'without', 'faim', 'rain', 'red', 'angels', 'fundat', 'foundation'),
'church' : ('church', 'churc', 'rcchurch', 'roman', 'missionsry', 'lutheran', 'islamic', 'islam', 'vision'),
'private' : ('consulting', 'engineer', 'private', 'ltd', 'co.ltd', 'contractor', 'enterp', 'enterpr', 'company', 'contract'),
'community' : ('village', 'community', 'communit', 'district', 'council', 'commu', 'villigers', 'villagers'),
'government' : ('government', 'gov', 'govt', 'gover', 'gove', 'governme', 'ministry'),
'other' : ('0', 'nan', 'known', 'other', 'unknown'), # Group 'unknown' data with 'other' as finally this means the same for interpretation
'danida' : ('danida', 'danid'),
'foreign government' : ('netherlands', 'germany', 'european')
}
for substr in x.split():
for subsubstr in substr.split('/'):
for key in installer_map:
if subsubstr in installer_map[key]:
return key
return x
lower_data = data.map(lambda x: str(x).lower())
tmp_data = lower_data.map(gather_installer)
top10 = list(tmp_data.value_counts()[:10].index)
return tmp_data.map(lambda x: x if x in top10 else 'other')
data['installer'] = group_installer(data.installer)
data['funder'] = group_installer(data.funder)
clean_data = (data.iloc[training_data.index]
.join(training_label['status_group'])
.dropna())
# Create two columns one collapsing 'functional' and 'functional needs repair'
# and the other one collapsing 'non functional' and 'functional needs repair'
clean_data['functional'] = clean_data['status_group'].map({'functional' : 1,
'functional needs repair' : 1,
'non functional' : 0})
clean_data['no_repairs'] = clean_data['status_group'].map({'functional' : 1,
'functional needs repair' : 0,
'non functional' : 0})
# +
# Extract predictors and convert categorical variables in dichotomic variables
predictors_name = ['water_amount', 'height', 'longitude', 'latitude',
'basin', 'region', 'population', 'public_meeting', 'management_group',
'permit', 'construction_year', 'extraction_type_class', 'payment_type',
'quality_group', 'quantity_group', 'source_type', 'waterpoint_type_group',
'installer', 'funder']
categorical_predictors = ('basin', 'region', 'management_group', 'extraction_type_class',
'payment_type', 'quality_group', 'quantity_group',
'source_type', 'waterpoint_type_group', 'installer', 'funder')
process_data = pd.DataFrame()
for name in predictors_name:
if name in categorical_predictors:
classes = data[name].unique()
deployed_categories = preprocessing.label_binarize(data[name], classes=classes)
# Avoid class name collision
classe_names = list()
for c in classes:
if c in process_data.columns:
classe_names.append('_'.join((c, name)))
else:
classe_names.append(c)
tmp_df = pd.DataFrame(deployed_categories,
columns=classe_names,
index=data.index)
process_data = process_data.join(tmp_df)
else:
process_data[name] = data[name]
predictors_columns = process_data.columns
deployed_data = (process_data.iloc[training_data.index]
.join(training_label['status_group'])
.dropna())
# Create two columns one collapsing 'functional' and 'functional needs repair'
# and the other one collapsing 'non functional' and 'functional needs repair'
deployed_data['functional'] = deployed_data['status_group'].map({'functional' : 1,
'functional needs repair' : 1,
'non functional' : 0})
deployed_data['no_repairs'] = deployed_data['status_group'].map({'functional' : 1,
'functional needs repair' : 0,
'non functional' : 0})
predictors = deployed_data[predictors_columns]
# -
# ## Analyzes
#
# The distributions of the response and explanatory variables will be evaluated by looking at the frequency tables for categorical variables and by calculating statistical values (mean, standard deviation, minimum and maximum) for quantitative variables.
#
# The response variable being categorical, bivariate associations will be visualized using bar charts after collapsing categories if needed. And the possible bivariate associations will be tested using Chi-Square test.
#
# The random forest method will be applied to identify the best subset of predictors. The DrivenData competition has split the database in a training set containing 80% of the records and 20% are kept for testing by submission on the website. As multiple submissions are allowed for the competition, the accuracy of the model will be tested by submitting the prediction carried out on the test data.
# # Results
#
# ## Explanatory variable selection
#
# First a random tree test was performed to limit the number of explanatory variables. From that first analysis (see the table below), the following explanatory variables are kept:
# - The gps coordinates - longitude, latitude and height - of the waterpoint
# - The quantity of water available
# - The population size next to the waterpoint
# - The year of construction
# - If a permit was issued or not for the waterpoint
# - The type of extraction
# - The water point type
# - The payment methods
#
# Although gps coordinates are important, the administration division (like geographic region) has low importance. It seems also than the way the water point was funded and installed and how it is managed are not of great importances. Some natural guesses like the quantity, the population living around and the year of construction come forward in the random forest test.
# + hide_input=true
# fit an Extra Trees model to the data and look at the first 15 important fields
model = ExtraTreesClassifier()
model.fit(predictors, deployed_data['status_group'])
# display the relative importance of each attribute
cm = sns.light_palette("yellow", as_cmap=True)
display(pd.Series(model.feature_importances_, index=predictors.columns, name='importance')
.sort_values(ascending=False)
.to_frame()
.iloc[:15])
display_markdown("> Table 1 : The 15 most important features in the dataset.", raw=True)
# + hide_input=false
# Extract predictors and convert categorical variables in dichotomic variables
predictors_name = ['height', 'longitude', 'latitude', 'population',
'permit', 'construction_year', 'extraction_type_class', 'payment_type',
'quantity_group', 'waterpoint_type_group']
categorical_predictors = ('extraction_type_class', 'payment_type', 'quantity_group',
'waterpoint_type_group')
process_data = pd.DataFrame()
for name in predictors_name:
if name in categorical_predictors:
classes = data[name].unique()
deployed_categories = preprocessing.label_binarize(data[name], classes=classes)
# Avoid class name collision
classe_names = list()
for c in classes:
if c in process_data.columns or c == 'other':
classe_names.append('_'.join((c, name)))
else:
classe_names.append(c)
tmp_df = pd.DataFrame(deployed_categories,
columns=classe_names,
index=data.index)
process_data = process_data.join(tmp_df)
else:
process_data[name] = data[name]
predictors_columns = process_data.columns
deployed_data = (process_data.iloc[training_data.index]
.join(training_label['status_group'])
.dropna())
# Create two columns one collapsing 'functional' and 'functional needs repair'
# and the other one collapsing 'non functional' and 'functional needs repair'
deployed_data['functional'] = deployed_data['status_group'].map({'functional' : 1,
'functional needs repair' : 1,
'non functional' : 0})
deployed_data['no_repairs'] = deployed_data['status_group'].map({'functional' : 1,
'functional needs repair' : 0,
'non functional' : 0})
predictors = deployed_data[predictors_columns]
# -
# ## Descriptive Statistics
#
# In the training data set, 54.3% (N=32259) of the water point are functional, 7.3% (N=4317) need repair and 38.4% (N=22824) are non functional.
#
# For those water points, the quantity of water available is *enough* for 55.9% (N=41522), *insufficient* for 25.4% (N=18896) and *dry* for 10.5% (N=7782). The quantity is unknown for 1.3% of the data (N=975).
#
# The majority of the point are communal standpipes (58.2%, N=43239). The second most important type is hand pump type (29.5%, N=21884).
#
# The method to extract the data are mostly gravity (44.8%, N=33263) and hand pumps (27.7%, N=20612).
#
# To get water, people are usually never paying (42.7%, N=31712). For the points for which people pay, they are doing so on bucket basis (15.2%, N=11266) or by recurrent payment; monthly for 14% (N=10397) or annually for 6.1% (N=4570). The payment method is unknown for 13.7% of the cases (N=10149).
#
# The majority of the water points were constructed with a permit (65.4%, N=48606). But 29.4% (N=21851) were not built having one. And the permit status is unknown for 5.1% of the water points (N=3793).
#
# The distribution of the quantitative variables are presented in the table below.
# +
pd.set_option('display.float_format', lambda x: '{:.5g}'.format(x))
quantitative_var = dict()
for field in ('gps_height', 'latitude', 'longitude', 'construction_year', 'population'):
if field == 'gps_height':
field_name = 'height'
else:
field_name = ' '.join(field.split('_'))
clean_field = training_data[field].map(lambda x: x if abs(x)>1e-8 else pd.np.nan)
clean_field = clean_field.dropna()
quantitative_var[field_name] = clean_field.describe()
(pd.DataFrame(quantitative_var)
.loc[['count', 'mean', 'std', 'min', 'max']]
.T)
# -
# ## Bivariate analyzes
#
# The figures below show the mean value of the *functional* variable (0 = non functional, 1 otherwise) for the different categorical variables.
#
# Using post hoc chi-square tests, the major conclusions drawn are :
# - Water points working with gravity have significantly more chance to be functional (max(p-value) = 1.4 < 0.05/21). And non-mentioned extraction are the more likely to be non functional.
# - Water points type *cattle trough* and *improved spring* have no significant differences. And they are the two types having the highest probability to be functional. No conclusion can be drawn for the *dam* type as only 5 functional points are reported. The waterpoints of type *other* are the most likely to be non functional.
# - Water points for which people are paying annually are the most likely to be functional. And the one free of charges or of unknown payment method are not significantly different and both have 50% chances to be non functional.
# - *Dry* water points are most likely to be non functional. And those with *enough* or *seasonal* water are not significantly different and are the more likely to be functional.
# - Water points having a construction permit have a significantly more chance to be functional than those not having a permit (p-value = 1.4e-26).
# + hide_input=true
fig, axes = plt.subplots(3, 2,
sharey=True,
gridspec_kw=dict(hspace=0.285),
figsize=(10, 16.5))
axes = axes.ravel()
for i, field in enumerate(('extraction_type_class', 'waterpoint_type_group', 'payment_type',
'quantity_group', 'permit')):
field_name = ' '.join(field.split('_'))
var_analysis = clean_data[['status_group', 'functional', 'no_repairs', field]]
ax = sns.barplot(x=field, y='functional', data=var_analysis, ci=None, ax=axes[i])
ax.set_xlabel(field_name)
if i % 2 == 0:
ax.set_ylabel('functional vs non functional')
else:
ax.set_ylabel('')
lbls = ['\n'.join(l.get_text().split()) for l in ax.get_xticklabels()]
if len(lbls) > 5:
ax.set_xticklabels(lbls, rotation=60)
axes[5].set_visible(False)
fig.suptitle('Functional waterpoint proportion per categorical fields', fontsize=14)
plt.subplots_adjust(top=0.97)
plt.show();
# -
# To visualize the influence of the quantitative variables on the functional status
# of the water points, the quantitative variables have been collapsed in two bins; the median value being the separation.
#
# Using chi-square test, all variables have a significant relationship with the response variable.
# Waterpoints with higher altitude are more likely to be functional (p-value = 2e-57). Those more in the eastern side of Tanzania have a lesser chance to be functional (p-value = 0.003). The water points constructed after 2000 are in better functional condition (p-value = 0). And those sustaining higher population tend to be less functional (p-value = 2.5e-13).
# + hide_input=true
fig, axes = plt.subplots(2, 2,
sharey=True,
gridspec_kw=dict(hspace=0.12),
figsize=(10, 11))
axes = axes.ravel()
for i, field in enumerate(('gps_height', 'longitude', 'construction_year', 'population')):
if field == 'gps_height':
field_name = 'height'
else:
field_name = ' '.join(field.split('_'))
var_analysis = clean_data[['status_group', 'functional', 'no_repairs']]
clean_field = clean_data[field].map(lambda x: x if abs(x)>1e-8 else pd.np.nan)
var_analysis = var_analysis.join(clean_field).dropna()
var_analysis[field+'grp2'] = pd.qcut(var_analysis[field],
2,
labels=["50th%tile",
"100th%tile"])
# 4,
# labels=["25th%tile", "50th%tile",
# "75th%tile", "100th%tile"])
ax = sns.barplot(x=field+'grp2', y='functional', data=var_analysis, ci=None, ax=axes[i])
ax.set_xlabel(field_name)
if i % 2 == 0:
ax.set_ylabel('functional vs non functional')
else:
ax.set_ylabel('')
fig.suptitle('Functional waterpoint proportion per quantitative field quartile', fontsize=14)
plt.subplots_adjust(top=0.95)
plt.show();
# -
# ## Random Forest Test
#
# With the subset of explanatory variables selected, we can split the data to estimate the number of trees needed to stabilize the accuracy. By taking 60% of the available data as training set, the accuracy of the random forest test stabilizes for a number of trees superior to 23 as shown in the figure below.
# + hide_input=true
pd.np.random.seed(12345)
pred_train, pred_test, tar_train, tar_test = train_test_split(predictors,
deployed_data['status_group'],
test_size=.4)
trees=range(1, 31)
accuracy=pd.np.zeros(len(trees))
for idx in trees:
classifier=RandomForestClassifier(n_estimators=idx)
classifier=classifier.fit(pred_train,tar_train)
predictions=classifier.predict(pred_test)
accuracy[idx-1]=accuracy_score(tar_test, predictions)
plt.plot(trees, accuracy)
plt.xlabel("Number of trees")
plt.ylabel("Accuracy score")
plt.show();
# -
# So I run a random forest test with 25 trees with all training data and submitted on DrivenData.org the resulting prediction. I got an accuracy score of 76.86%.
# +
model = RandomForestClassifier(n_estimators=25)
model = model.fit(predictors, deployed_data['status_group'])
clean_test_data = process_data.iloc[test_data.index].dropna()
predictions = model.predict(process_data.iloc[test_data.index].dropna())
pred = pd.Series(predictions, index=clean_test_data.index, name='status_group')
missing_index = list()
for i in test_data.index:
if i not in clean_test_data.index:
missing_index.append(i)
data_list = list()
pd.np.random.seed(12345)
for rnd in pd.np.random.rand(len(missing_index)):
if rnd < 0.072677:
data_list.append('functional needs repair')
elif rnd < 0.384242 + 0.072677:
data_list.append('non functional')
else:
data_list.append('functional')
fill = pd.Series(data_list, index=missing_index)
pred = pred.append(fill)
to_file = pred[test_data.index]
to_file.to_csv('randomForest.csv', index_label='id', header=('status_group',))
# -
# # Conclusion
#
# This project used random forest test to identify the variables influencing the most the functional status of Tanzanian water pumps from N=74250 water points characteristics recorded between October 2002 and December 2013 by the Tanzanian Ministry of Water. There are around 55% of pumps working properly, 7% in needs of repair and 38% non functional.
#
# Applying the random forest test, the number of potential explanatory variables was reduced from 20 to 10 by looking at the importance of each features. The most influential variables are the gps coordinates (longitude, latitude and height). Then comes the quantity of water available, the population living around the pumps, the type of extraction and the year of construction.
#
# The random forest test using 25 trees had an accuracy score of 76.9% when tested against the DrivenData test set. The optimal number of trees was found by optimizing the accuracy score with the number of trees after dividing the provided data in two groups; 60% to train the method and 40% to test it. As the best score obtain was around 78.9%, it can be said that the model will predict fairly well new dataset.
#
# From the feature importance calculation, it can be concluded that an improved water reparation policy should focus on dispatching teams not evently in the country as the gps coordinates influence greatly the water pumps status. And the primarily target should be based on the population size living around the waterpoint and its year of construction.
#
# Although lots of parameters have been recorded for this analysis, it is possible that a factor non considered here is important and is confounding other factors reported here.
#
# From the analysis, the funder and the installer do not seem to have a big impact on the functional status. But as those two categories contain a wide variety of answers (some containing spelling mistakes or abbreviations), a deeper analysis of those two categories should be carried out to gather in meaningful categories the various actors. Right now some doubts remain on a potential confounder effect. Some parameters statistically important (population, height and construction year) have lots of missing data. In this study, the missing data of those variables were filled by their mean or their median values to avoid dropping to many records. Trying to fulfill the missing data will help improving the accuracy.
# Therefore, adding additional records and fulfilling the missing value should be the priority of any additional effort to improve the predictive algorithm.
#
#
# + [markdown] hide_input=false
# > The Jupyter notebook used to generate this final report is available there: https://github.com/fcollonval/coursera_data_visualization/blob/master/WaterPumpsPrediction.ipynb.
# -
# !jupyter nbconvert --to html --template full_nice WaterPumpsPrediction.ipynb
| WaterPumpsPrediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# +
import logging
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import foolbox
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from lib.dataset_utils import *
from lib.mnist_model import *
from lib.adv_model import *
from lib.dknn_attack_v2 import DKNNAttackV2
from lib.cwl2_attack import CWL2Attack
from lib.dknn import DKNNL2
from lib.knn_defense import *
from lib.utils import *
from lib.lip_model import *
# -
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# +
# Set all random seeds
seed = 2020
np.random.seed(seed)
torch.manual_seed(seed)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# -
(x_train, y_train), (x_valid, y_valid), (x_test, y_test) = load_mnist_all(
'/data', val_size=0.1, shuffle=True, seed=seed)
# +
# model_name = 'mnist_basic.h5'
# net = BasicModel()
model_name = 'adv_mnist_exp6.h5'
basic_net = BasicModel()
config = {'epsilon': 0.3,
'num_steps': 40,
'step_size': 0.01,
'random_start': True,
'loss_func': 'xent'}
net = PGDL2Model(basic_net, config)
# +
# Set up model directory
# save_dir = os.path.join(os.getcwd(), 'saved_models/mnist/')
save_dir = os.path.join(os.getcwd(), 'saved_models/')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
net = net.to(device)
# if device == 'cuda':
# net = torch.nn.DataParallel(net)
# cudnn.benchmark = True
net.load_state_dict(torch.load(model_path))
# net = net.module
net = net.basic_net
net.eval()
# -
layers = ['conv1', 'conv2', 'conv3']
knn = CVPR_Defense(net, x_train, y_train, layers,
k=50, num_classes=10)
with torch.no_grad():
y_pred = knn.get_output(x_test)
ind = np.where(y_pred.argmax(1) == y_test.numpy())[0]
print((y_pred.argmax(1) == y_test.numpy()).sum() / y_test.size(0))
# +
def attack_batch(attack, x, y, init_mode, init_mode_k, batch_size):
x_adv = torch.zeros_like(x)
total_num = x.size(0)
num_batches = total_num // batch_size
for i in range(num_batches):
begin = i * batch_size
end = (i + 1) * batch_size
x_adv[begin:end] = attack(
x[begin:end], y[begin:end], 2, m=200,
init_mode=init_mode, init_mode_k=init_mode_k,
binary_search_steps=10, max_iterations=1000, learning_rate=1e-1,
initial_const=1e-2, max_linf=None, random_start=True,
thres_steps=200, check_adv_steps=200, verbose=False)
return x_adv
num = 100
def full_eval(knn):
with torch.no_grad():
y_pred = knn.get_output(x_test)
ind = np.where(y_pred.argmax(1) == y_test.numpy())[0]
print((y_pred.argmax(1) == y_test.numpy()).sum() / y_test.size(0))
dist_all = np.zeros(num) + 1e9
attack = CVPR_Attack(knn)
x_adv = attack_batch(
attack, x_test[ind][:num].cuda(), y_test[ind][:num], 1, 1, 100)
with torch.no_grad():
y_pred = knn.get_output(x_adv)
ind_adv = y_pred.argmax(1) != y_test[ind][:num].numpy()
dist = (x_adv.cpu() - x_test[ind][:num]).view(
num, -1).norm(2, 1).numpy()
for i in range(num):
if ind_adv[i] and (dist[i] < dist_all[i]):
dist_all[i] = dist[i]
for k in range(1, 6):
x_adv = attack_batch(
attack, x_test[ind][:num].cuda(), y_test[ind][:num], 2, k, 100)
with torch.no_grad():
y_pred = knn.get_output(x_adv)
ind_adv = y_pred.argmax(1) != y_test[ind][:num].numpy()
dist = (x_adv.cpu() - x_test[ind][:num]).view(
num, -1).norm(2, 1).numpy()
for i in range(num):
if ind_adv[i] and (dist[i] < dist_all[i]):
dist_all[i] = dist[i]
adv_acc = (dist_all == 1e9).mean()
print('adv accuracy: %.4f, mean dist: %.4f' % (
adv_acc, dist_all[dist_all < 1e9].mean()))
return dist_all
# -
start = time.time()
dist = full_eval(knn)
print(time.time() - start)
attack = DKNNAttackV2(dknn)
num = 100
x_adv = attack_batch(
attack, x_test[ind][:num].cuda(), y_test[ind][:num], 1, 1, 100)
with torch.no_grad():
y_pred = knn.get_output(x_adv)
ind_adv = np.where(y_pred.argmax(1) != y_test[ind][:num].numpy())[0]
adv_acc = (y_pred.argmax(1) == y_test[ind][:num].numpy()).sum() \
/ y_pred.shape[0]
dist = (x_adv.cpu() - x_test[ind][:num]).view(
num, -1).norm(2, 1)[ind_adv].mean()
print('adv accuracy: %.4f, mean dist: %.4f' % (adv_acc, dist.item()))
# ---
attack = CVPR_Attack(knn)
num = 100
x_adv = attack(
x_test[ind][:num].cuda(), y_test[ind][:num], 2, m=100,
init_mode=1, init_mode_k=1,
binary_search_steps=10, max_iterations=1000, learning_rate=1e-1,
initial_const=1e-2, max_linf=None, random_start=True,
thres_steps=1000, check_adv_steps=1000, verbose=True)
2.6985
| test_scripts/test_knn_defense.ipynb |