code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="eqZsGncVOEWu" executionInfo={"status": "ok", "timestamp": 1637835730277, "user_tz": -60, "elapsed": 9845, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhW7B0gq37_0vNPNMwXzEXtWim28IjSIiqwX5JTfw=s64", "userId": "10511128109520355558"}} outputId="6c3eecf8-8cc7-4304-f53e-378e05166062"
# !pip install torch torchvision
# !pip install wavio
# !pip install sounddevice
# + colab={"base_uri": "https://localhost:8080/"} id="1EZUukZ-OFiw" executionInfo={"status": "ok", "timestamp": 1637835730278, "user_tz": -60, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhW7B0gq37_0vNPNMwXzEXtWim28IjSIiqwX5JTfw=s64", "userId": "10511128109520355558"}} outputId="d6ede881-4abe-436a-dad8-d35e15cdc0a5"
from google.colab import drive
drive.mount('/content/drive')
# !ls "/content/drive/My Drive/IMT Atlantique/Projet 3A /master/kitchen20"
# %cd /content/drive/My Drive/IMT Atlantique/Projet 3A /master/kitchen20
# + colab={"base_uri": "https://localhost:8080/"} id="aCx1FR42NWFS" outputId="3441396f-b1a2-4f8f-dfbb-9715f4125a65"
from envnet import EnvNet
from kitchen20 import Kitchen20
from torch.utils.data import DataLoader
import torch.nn as nn
import utils as U
import torch
# Model
model = EnvNet(20, True)
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)
# Dataset
batchSize = 32
inputLength = 48000
transforms = []
transforms += [U.random_scale(1.25)] # Strong augment
transforms += [U.padding(inputLength // 2)] # Padding
transforms += [U.random_crop(inputLength)] # Random crop
transforms += [U.normalize(float(2 ** 16 / 2))] # 16 bit signed
transforms += [U.random_flip()] # Random +-
trainData = Kitchen20(root='../',
transforms=transforms,
folds=[1,2,3,4,5,6,7,8],
overwrite=False,
audio_rate=44100,
use_bc_learning=False)
trainIter = DataLoader(trainData, batch_size=batchSize,
shuffle=True, num_workers=2)
inputLength = 64000
transforms = []
transforms += [U.padding(inputLength // 2)] # Padding
transforms += [U.random_crop(inputLength)] # Random crop
transforms += [U.normalize(float(2 ** 16 / 2))] # 16 bit signed
transforms += [U.random_flip()] # Random +-
valData = Kitchen20(root='../',
transforms=transforms,
folds=[9,],
audio_rate=44100,
overwrite=False,
use_bc_learning=False)
valIter = DataLoader(valData, batch_size=batchSize,
shuffle=True, num_workers=2)
for epoch in range(600):
tAcc = tLoss = 0
vAcc = vLoss = 0
for x, y in trainIter: # Train epoch
model.train()
x = x[:, None, None, :]
x = x.to('cuda:0')
y = y.to('cuda:0')
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x)
y_pred = y_pred[:, :, 0, 0]
# Compute and print loss
loss = criterion(y_pred, y.long())
acc = (y_pred.argmax(dim=1).long() == y.long()).sum()
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
tLoss += loss.item()
tAcc += acc.item()/len(trainData)
for x, y in valIter: # Test epoch
model.eval()
x = x[:, None, None, :]
x = x.to('cuda:0')
y = y.to('cuda:0')
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x)
y_pred = y_pred[:, :, 0, 0]
loss = criterion(y_pred, y.long())
acc = (y_pred.argmax(dim=1).long() == y.long()).sum()
vLoss += loss.item()
vAcc += acc.item()/len(valData)
# loss = loss / len(dataset)
# acc = acc / float(len(dataset))
print('epoch {} -- train: {}/{} -- val:{}/{}'.format(
epoch, tAcc, tLoss, vAcc, vLoss))
# + id="MxKZ3Y8RIdaz"
testData = Kitchen20(root='../',
transforms=transforms,
folds=[10,],
audio_rate=44100,
overwrite=False,
use_bc_learning=False)
testIter = DataLoader(testData, batch_size=1,
shuffle=True, num_workers=2)
# + id="FwloAMnYlJ5J"
testAcc = 0
for x, y in testIter: # Test epoch
model.eval()
x = x[:, None, None, :]
x = x.to('cuda:0')
y = y.to('cuda:0')
# Forward pass: Compute predicted y by passing x to the model
y_test = model(x)
y_test = y_test[:, :, 0, 0]
print(y_test)
print(y_test.argmax(dim=1))
#loss = criterion(y_pred, y.long())
acc = (y_test.argmax(dim=1).long() == y.long()).sum()
#vLoss += loss.item()
testAcc += acc.item()/len(testData)
# + id="fO9eJD5Tl1ME"
testAcc
# + id="kublTWzRleO2"
len(testData)
# + id="RIJ06p5iYCGx"
y
# + id="SAZ5b5Z2aTkE"
y_pred.argmax(dim=1).long()
# + id="-XF6yvzuawKz"
import numpy as np
data = np.load('../audio/44100.npz', allow_pickle=True)
lst = data.files
for item in lst:
print(item)
print(data[item])
# + id="zVtBXwoA4nmp"
len(trainData)
# + id="-QrG5y6w5Iw6"
acc.item()
# + id="M67bcbIPoBrw"
for i in range(10):
print(len(data[data.files[i]].item()['sounds']))
# + id="vD0lVA_uoCY4"
| Old-k20-model/kitchen20/training_script.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 作業
# - 新增一個欄位 `customized_age_grp`,把 `age` 分為 (0, 10], (10, 20], (20, 30], (30, 50], (50, 100] 這五組,
# '(' 表示不包含, ']' 表示包含
# - Hints: 執行 ??pd.cut(),了解提供其中 bins 這個參數的使用方式
# # [作業目標]
# - 請同學試著查詢 pandas.cut 這個函數還有哪些參數, 藉由改動參數以達成目標
# - 藉由查詢與改動參數的過程, 熟悉查詢函數的方法與理解參數性質, 並了解數值的離散化的調整工具
# # [作業重點]
# - 仿照 In[3], In[4] 的語法, 並設定 pd.cut 的參數以指定間距
# +
# 載入套件
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# 初始設定 Ages 的資料
ages = pd.DataFrame({"age": [18,22,25,27,7,21,23,37,30,61,45,41,9,18,80,100]})
# #### 等寬劃分
# 新增欄位 "equal_width_age", 對年齡做等寬劃分
ages["equal_width_age"] = pd.cut(ages["age"], 4)
# 觀察等寬劃分下, 每個種組距各出現幾次
ages["equal_width_age"].value_counts() # 每個 bin 的值的範圍大小都是一樣的
# #### 等頻劃分
# 新增欄位 "equal_freq_age", 對年齡做等頻劃分
ages["equal_freq_age"] = pd.qcut(ages["age"], 4)
# 觀察等頻劃分下, 每個種組距各出現幾次
ages["equal_freq_age"].value_counts() # 每個 bin 的資料筆數是一樣的
# ### 作業
bin_cut = [0,10,20,30,50,100]
ages["customized_age_grp"] = pd.cut(ages["age"], bins=bin_cut)
ages["customized_age_grp"].value_counts()
| 2nd-ML100Days/homework/D-012/Day_012_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import sys
import math
import numpy as np
import pandas as pd
import scipy.stats as stat
from itertools import groupby
from datetime import timedelta,datetime
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import time
R = 6.371*10**6
# +
## 1. projection: distorted distance
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
def cartesian(lat,lon):
lat = lat/180*math.pi
lon = lon/180*math.pi
z = R*np.sin(lat)
u = R*np.cos(lat)
x = u*np.cos(lon)
y = u*np.sin(lon)
return x,y,z
def great_circle_dist(lat1,lon1,lat2,lon2):
lat1 = lat1/180*math.pi
lon1 = lon1/180*math.pi
lat2 = lat2/180*math.pi
lon2 = lon2/180*math.pi
temp = np.cos(lat1)*np.cos(lat2)*np.cos(lon1-lon2)+np.sin(lat1)*np.sin(lat2)
if isinstance(temp,np.ndarray):
temp[temp>1]=1
temp[temp<-1]=-1
else:
if temp>1:
temp=1
if temp<-1:
temp=-1
theta = np.arccos(temp)
d = theta*R
return d
# -
def LatLong2XY(Lat,Lon):
latitude = Lat/180*math.pi
longitude = Lon/180*math.pi
lam_min=min(latitude)
lam_max=max(latitude)
phi_min=min(longitude)
phi_max=max(longitude)
R=6.371*10**6
d1=(lam_max-lam_min)*R
d2=(phi_max-phi_min)*R*math.sin(math.pi/2-lam_max)
d3=(phi_max-phi_min)*R*math.sin(math.pi/2-lam_min)
w1=(latitude-lam_min)/(lam_max-lam_min)
w2=(longitude-phi_min)/(phi_max-phi_min)
x=np.array(w1*(d3-d2)/2+w2*(d3*(1-w1)+d2*w1))
y=np.array(w1*d1*math.sin(math.acos((d3-d2)/(2*d1))))
return np.reshape(np.concatenate((x,y)),(len(x),2),order="F")
## helsinki and san francisco
lat0 = 37.61
lon0 = -122.40
lat1 = 60.32
lon1 = 24.95
d1_vec = []
d2_vec = []
d3_vec = []
for i in range(100):
lat = np.array([lat0,lat0+(lat1-lat0)/100*(i+1),37.82])
lon = np.array([lon0,lon0+(lon1-lon0)/100*(i+1),-122.48])
d2 = great_circle_dist(lat[0],lon[0],lat[-1],lon[-1])
trapezoid = LatLong2XY(lat,lon)
temp = np.sqrt((trapezoid[-1,0]-trapezoid[0,0])**2+(trapezoid[-1,1]-trapezoid[0,1])**2)
d2_vec.append(temp)
lat = np.array([lat0,lat0+(lat1-lat0)/100*(i+1),37.45])
lon = np.array([lon0,lon0+(lon1-lon0)/100*(i+1),-122.16])
d1 = great_circle_dist(lat[0],lon[0],lat[-1],lon[-1])
trapezoid = LatLong2XY(lat,lon)
temp = np.sqrt((trapezoid[-1,0]-trapezoid[0,0])**2+(trapezoid[-1,1]-trapezoid[0,1])**2)
d1_vec.append(temp)
lat = np.array([lat0,lat0+(lat1-lat0)/100*(i+1),37.79])
lon = np.array([lon0,lon0+(lon1-lon0)/100*(i+1),-122.36])
d3 = great_circle_dist(lat[0],lon[0],lat[-1],lon[-1])
trapezoid = LatLong2XY(lat,lon)
temp = np.sqrt((trapezoid[-1,0]-trapezoid[0,0])**2+(trapezoid[-1,1]-trapezoid[0,1])**2)
d3_vec.append(temp)
d3_vec[-1]
# +
plt.figure(figsize=(7,14))
plt.subplot(3, 1, 1)
plt.plot(np.arange(1,101),d2_vec,label = "projected distance")
plt.plot(np.arange(1,101),np.ones(100)*d2,"r--",label = "great circle distance")
plt.xlabel('Destination Latitude/Longitude')
plt.xticks(np.arange(101,step=20), ('37/-122', '41.6/-92.6', '46.2/-63.2', '50.8/-33.8', '55.4/-4.4','60/25'))
plt.ylabel('Distance between SFO and Golden Gate Bridge(m)')
plt.legend(loc='lower left', borderaxespad=0.3)
plt.subplot(3, 1, 2)
plt.plot(np.arange(1,101),d1_vec,label = "projected distance")
plt.plot(np.arange(1,101),np.ones(100)*d1,"r--",label = "great circle distance")
plt.xlabel('Destination Latitude/Longitude')
plt.xticks(np.arange(101,step=20), ('37/-122', '41.6/-92.6', '46.2/-63.2', '50.8/-33.8', '55.4/-4.4','60/25'))
plt.ylabel('Distance between SFO and Downtown Palo Alto(m)')
plt.legend(loc='lower left', borderaxespad=0.3)
plt.subplot(3, 1, 3)
plt.plot(np.arange(1,101),d3_vec,label = "projected distance")
plt.plot(np.arange(1,101),np.ones(100)*d3,"r--",label = "great circle distance")
plt.xlabel('Destination Latitude/Longitude')
plt.xticks(np.arange(101,step=20), ('37/-122', '41.6/-92.6', '46.2/-63.2', '50.8/-33.8', '55.4/-4.4','60/25'))
plt.ylabel('Distance between SFO and Bay Bridge(m)')
plt.legend(loc='upper left', borderaxespad=0.3)
plt.savefig("distance.pdf")
# -
d1_vec
[d2,d1,d3]
# +
def shortest_dist_to_great_circle(lat,lon,lat_start,lon_start,lat_end,lon_end):
if abs(lat_start-lat_end)<1e-6 and abs(lon_start-lon_end)<1e-6:
return np.zeros(len(lat))
else:
x,y,z = cartesian(lat,lon)
x_start,y_start,z_start = cartesian(lat_start,lon_start)
x_end,y_end,z_end = cartesian(lat_end,lon_end)
cross_product = np.cross(np.array([x_start,y_start,z_start]),np.array([x_end,y_end,z_end]))
N = cross_product/(np.linalg.norm(cross_product)+1e-6)
C = np.array([x,y,z])/R
temp = np.dot(N,C)
if isinstance(temp,np.ndarray):
temp[temp>1]=1
temp[temp<-1]=-1
else:
if temp>1:
temp=1
if temp<-1:
temp=-1
NOC = np.arccos(temp)
d = abs(math.pi/2-NOC)*R
return d
def pairwise_great_circle_dist(latlon_array):
dist = []
k = np.shape(latlon_array)[0]
for i in range(k-1):
for j in np.arange(i+1,k):
dist.append(great_circle_dist(latlon_array[i,0],latlon_array[i,1],latlon_array[j,0],latlon_array[j,1]))
return dist
def ExistKnot(mat,r,w):
n = mat.shape[0]
if n>1:
lat_start = mat[0,2]
lon_start = mat[0,3]
lat_end = mat[n-1,2]
lon_end = mat[n-1,3]
lat = mat[:,2]
lon = mat[:,3]
d = shortest_dist_to_great_circle(lat,lon,lat_start,lon_start,lat_end,lon_end)
if max(d)<w:
return 0, None
else:
return 1, np.argmax(d)
else:
return 0, None
def ExtractFlights(mat,itrvl,r,w,h):
if len(mat.shape)==1:
out = np.array([3,mat[2],mat[3],mat[1]-itrvl/2,None,None,mat[1]+itrvl/2])
elif len(mat.shape)==2 and mat.shape[0]==1:
out = np.array([3,mat[0,2],mat[0,3],mat[0,1]-itrvl/2,None,None,mat[0,1]+itrvl/2])
else:
n = mat.shape[0]
mat = np.hstack((mat,np.arange(n).reshape((n,1))))
if n>1 and max(pairwise_great_circle_dist(mat[:,2:4]))<r:
m_lon = (mat[0,2]+mat[n-1,2])/2
m_lat = (mat[0,3]+mat[n-1,3])/2
out = np.array([2,m_lon,m_lat,mat[0,1]-itrvl/2,m_lon,m_lat,mat[n-1,1]+itrvl/2])
else:
complete = 0
knots = [0,n-1]
mov = np.array([great_circle_dist(mat[i,2],mat[i,3],mat[i+1,2],mat[i+1,3]) for i in range(n-1)])
pause_index = np.arange(0,n-1)[mov<h]
temp = []
for j in range(len(pause_index)-1):
if pause_index[j+1]-pause_index[j]==1:
temp.append(pause_index[j])
temp.append(pause_index[j+1])
## all the consequential numbers in between are inserted twice, but start and end are inserted once
long_pause = np.unique(temp)[np.array([len(list(group)) for key, group in groupby(temp)])==1]
## pause 0,1,2, correspond to point [0,1,2,3], so the end number should plus 1
long_pause[np.arange(1,len(long_pause),2)] = long_pause[np.arange(1,len(long_pause),2)]+1
knots.extend(long_pause.tolist())
knots.sort()
knots = unique(knots)
while complete == 0:
mat_list = []
for i in range(len(knots)-1):
mat_list.append(mat[knots[i]:min(knots[i+1]+1,n-1),:])
knot_yes = np.empty(len(mat_list))
knot_pos = np.empty(len(mat_list))
for i in range(len(mat_list)):
knot_yes[i] , knot_pos[i] = ExistKnot(mat_list[i],r,w)
if sum(knot_yes)==0:
complete = 1
else:
for i in range(len(mat_list)):
if knot_yes[i]==1:
knots.append(int((mat_list[i])[int(knot_pos[i]),4]))
knots.sort()
out = []
for j in range(len(knots)-1):
start = knots[j]
end = knots[j+1]
mov = np.array([great_circle_dist(mat[i,2],mat[i,3],mat[i+1,2],mat[i+1,3]) for i in np.arange(start,end)])
if sum(mov>=h)==0:
m_lon = (mat[start,2]+mat[end,2])/2
m_lat = (mat[start,3]+mat[end,3])/2
nextline = [2, m_lon,m_lat,mat[start,1],m_lon,m_lat,mat[end,1]]
else:
nextline = [1, mat[start,2],mat[start,3],mat[start,1],mat[end,2],mat[end,3],mat[end,1]]
out.append(nextline)
out = np.array(out)
return out
def GPS2MobMat(filelist,itrvl=10,accuracylim=51, r=None, w=None,h=None):
if r is None:
r = itrvl
#r = np.sqrt(itrvl)
if h is None:
h = r
data = pd.DataFrame()
sys.stdout.write("Read in all GPS csv files..." + '\n')
for i in range(len(filelist)):
df = pd.read_csv(filelist[i])
data = data.append(df)
data = data[data.accuracy<accuracylim]
if w is None:
w = np.mean(data.accuracy)
#w = np.mean(data.accuracy)+itrvl
t_start = np.array(data.timestamp)[0]/1000
t_end = np.array(data.timestamp)[-1]/1000
avgmat = np.empty([int(np.ceil((t_end-t_start)/itrvl))+2,4])
sys.stdout.write("Collapse data within " +str(itrvl)+" second intervals..."+'\n')
IDam = 0
count = 0
nextline=[1,t_start+itrvl/2,data.iloc[0,2],data.iloc[0,3]]
numitrvl=1
for i in np.arange(1,data.shape[0]):
if data.iloc[i,0]/1000 < t_start+itrvl:
nextline[2]=nextline[2]+data.iloc[i,2]
nextline[3]=nextline[3]+data.iloc[i,3]
numitrvl=numitrvl+1
else:
nextline[2]=nextline[2]/numitrvl
nextline[3]=nextline[3]/numitrvl
avgmat[IDam,:]=nextline
count=count+1
IDam=IDam+1
nummiss=int(np.floor((data.iloc[i,0]/1000-(t_start+itrvl))/itrvl))
if nummiss>0:
avgmat[IDam,:] = [4,t_start+itrvl,t_start+itrvl*(nummiss+1),None]
count=count+1
IDam=IDam+1
t_start=t_start+itrvl*(nummiss+1)
nextline[0]=1
nextline[1]=t_start+itrvl/2
nextline[2]=data.iloc[i,2]
nextline[3]=data.iloc[i,3]
numitrvl=1
avgmat = avgmat[0:count,:]
ID1 = avgmat[:,0]==1
outmat = np.zeros(7)
curind = 0
sys.stdout.write("Extract flights and pauses ..."+'\n')
for i in range(avgmat.shape[0]):
if avgmat[i,0]==4:
#print(curind,i)
temp = ExtractFlights(avgmat[np.arange(curind,i),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
curind=i+1
if curind<avgmat.shape[0]:
#print(np.arange(curind,avgmat.shape[0]))
temp = ExtractFlights(avgmat[np.arange(curind,avgmat.shape[0]),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
mobmat = np.delete(outmat,0,0)
return mobmat
def InferMobMat(mobmat,itrvl=10,r=None):
## infer those unclassified pieces
sys.stdout.write("Infer unclassified windows ..."+'\n')
if r is None:
r = itrvl
#r = np.sqrt(itrvl)
code = mobmat[:,0]
x0 = mobmat[:,1]; y0 = mobmat[:,2]; t0 = mobmat[:,3]
x1 = mobmat[:,4]; y1 = mobmat[:,5]; t1 = mobmat[:,6]
for i in range(len(code)):
if code[i]==3 and i==0:
code[i]=2
x1[i] = x0[i]
y1[i] = y0[i]
if code[i]==3 and i>0:
d = great_circle_dist(x0[i],y0[i],x1[i-1],y1[i-1])
if t0[i]-t1[i-1]<=itrvl*3:
if d<r:
code[i]=2
x1[i] = x0[i]
y1[i] = y0[i]
else:
code[i]=1
s_x = x0[i]-itrvl/2/(t0[i]-t1[i-1])*(x0[i]-x1[i-1])
s_y = y0[i]-itrvl/2/(t0[i]-t1[i-1])*(y0[i]-y1[i-1])
e_x = x0[i]+itrvl/2/(t0[i]-t1[i-1])*(x0[i]-x1[i-1])
e_y = y0[i]+itrvl/2/(t0[i]-t1[i-1])*(y0[i]-y1[i-1])
x0[i] = s_x; x1[i]=e_x
y0[i] = s_y; y1[i]=e_y
if t0[i]-t1[i-1]>itrvl*3:
if (i+1)<len(code):
f = great_circle_dist(x0[i],y0[i],x0[i+1],y0[i+1])
if t0[i+1]-t1[i]<=itrvl*3:
if f<r:
code[i]=2
x1[i] = x0[i]
y1[i] = y0[i]
else:
code[i]=1
s_x = x0[i]-itrvl/2/(t0[i+1]-t1[i])*(x0[i+1]-x0[i])
s_y = y0[i]-itrvl/2/(t0[i+1]-t1[i])*(y0[i+1]-y0[i])
e_x = x0[i]+itrvl/2/(t0[i+1]-t1[i])*(x0[i+1]-x0[i])
e_y = y0[i]+itrvl/2/(t0[i+1]-t1[i])*(y0[i+1]-y0[i])
x0[i] = s_x; x1[i]=e_x
y0[i] = s_y; y1[i]=e_y
else:
code[i]=2
x1[i] = x0[i]
y1[i] = y0[i]
else:
code[i]=2
x1[i] = x0[i]
y1[i] = y0[i]
mobmat[i,:] = [code[i],x0[i],y0[i],t0[i],x1[i],y1[i],t1[i]]
## merge consecutive pauses
sys.stdout.write("Merge consecutive pauses and bridge gaps ..."+'\n')
k = []
for j in np.arange(1,len(code)):
if code[j]==2 and code[j-1]==2 and t0[j]==t1[j-1]:
k.append(j-1)
k.append(j)
## all the consequential numbers in between are inserted twice, but start and end are inserted once
rk = np.unique(k)[np.array([len(list(group)) for key, group in groupby(k)])==1]
for j in range(int(len(rk)/2)):
start = rk[2*j]
end = rk[2*j+1]
mx = np.mean(x0[np.arange(start,end+1)])
my = np.mean(y0[np.arange(start,end+1)])
mobmat[start,:] = [2,mx,my,t0[start],mx,my,t1[end]]
mobmat[np.arange(start+1,end+1),0]=5
mobmat = mobmat[mobmat[:,0]!=5,:]
## check missing intervals, if starting and ending point are close, make them same
new_pauses = []
for j in np.arange(1,mobmat.shape[0]):
if mobmat[j,3] > mobmat[j-1,6]:
d = great_circle_dist(mobmat[j,1],mobmat[j,2],mobmat[j-1,4],mobmat[j-1,5])
if d<10:
if mobmat[j,0]==2 and mobmat[j-1,0]==2:
initial_x = mobmat[j-1,4]
initial_y = mobmat[j-1,5]
mobmat[j,1] = mobmat[j,4] = mobmat[j-1,1] = mobmat[j-1,4] = initial_x
mobmat[j,2] = mobmat[j,5] = mobmat[j-1,2] = mobmat[j-1,5] = initial_y
if mobmat[j,0]==1 and mobmat[j-1,0]==2:
mobmat[j,1] = mobmat[j-1,4]
mobmat[j,2] = mobmat[j-1,5]
if mobmat[j,0]==2 and mobmat[j-1,0]==1:
mobmat[j-1,4] = mobmat[j,1]
mobmat[j-1,5] = mobmat[j,2]
if mobmat[j,0]==1 and mobmat[j-1,0]==1:
mean_x = (mobmat[j,1] + mobmat[j-1,4])/2
mean_y = (mobmat[j,2] + mobmat[j-1,5])/2
mobmat[j-1,4] = mobmat[j,1] = mean_x
mobmat[j-1,5] = mobmat[j,2] = mean_y
new_pauses.append([2,mobmat[j,1],mobmat[j,2],mobmat[j-1,6],mobmat[j,1],mobmat[j,2],mobmat[j,3],0])
new_pauses = np.array(new_pauses)
## connect flights and pauses
for j in np.arange(1,mobmat.shape[0]):
if mobmat[j,0]*mobmat[j-1,0]==2 and mobmat[j,3]==mobmat[j-1,6]:
if mobmat[j,0]==1:
mobmat[j,1] = mobmat[j-1,4]
mobmat[j,2] = mobmat[j-1,5]
if mobmat[j-1,0]==1:
mobmat[j-1,4] = mobmat[j,1]
mobmat[j-1,5] = mobmat[j,2]
mobmat = np.hstack((mobmat,np.ones(mobmat.shape[0]).reshape(mobmat.shape[0],1)))
### check if new pauses are empty
if len(new_pauses)>0:
mobmat = np.vstack((mobmat,new_pauses))
mobmat = mobmat[mobmat[:,3].argsort()].astype(float)
return mobmat
def locate_home(MobMat):
ObsTraj = MobMat[MobMat[:,0]==2,:]
hours = [datetime.fromtimestamp((ObsTraj[i,3]+ObsTraj[i,6])/2).hour for i in range(ObsTraj.shape[0])]
hours = np.array(hours)
home_pauses = ObsTraj[((hours>=19)+(hours<=9))*ObsTraj[:,0]==2,:]
loc_x,loc_y,num_xy,t_xy = num_sig_places(home_pauses,20)
home_index = num_xy.index(max(num_xy))
home_x, home_y = loc_x[home_index],loc_y[home_index]
return home_x,home_y
def K0(x1,x2):
k1 = np.exp(-abs(x1[0]-x2[0])/l1)*np.exp(-(np.sin(abs(x1[0]-x2[0])/86400*math.pi))**2/a1)
k2 = np.exp(-abs(x1[0]-x2[0])/l2)*np.exp(-(np.sin(abs(x1[0]-x2[0])/604800*math.pi))**2/a2)
k3 = np.exp(-abs(x1[1]-x2[1])/l3)
return b1*k1+b2*k2+b3*k3
## similarity matrix between bv's
def update_K(bv,t,K,X):
if t==0:
mat = np.array([1])
else:
d = np.shape(K)[0]
row = np.ones(d)
column = np.ones([d+1,1])
if X.ndim==1:
for i in range(d):
row[i] = column[i,0] = K0(X[t],X[bv[i]])
else:
for i in range(d):
row[i] = column[i,0] = K0(X[t,:],X[bv[i],:])
mat = np.hstack([np.vstack([K,row]),column])
return mat
## similarity vector between the t'th input with all bv's, t starts from 0 here
def update_k(bv,t,X):
d = len(bv)
if d==0:
out = np.array([0])
if d>=1:
out = np.zeros(d)
if X.ndim==1:
for i in range(d):
out[i] = K0(X[t],X[bv[i]])
else:
for i in range(d):
out[i] = K0(X[t,:],X[bv[i],:])
return out
def update_e_hat(Q,k):
if np.shape(Q)[0]==0:
out = np.array([0])
else:
out = np.dot(Q,k)
return out
def update_gamma(k,e_hat):
return 1-np.dot(k,e_hat)
def update_q(t,k,alpha,sigmax,Y):
if t==0:
out = Y[t]/sigmax
else:
out = (Y[t]-np.dot(k,alpha))/sigmax
return out
def update_s_hat(C,k,e_hat):
return np.dot(C,k)+e_hat
def update_eta(gamma,sigmax):
r = -1/sigmax
return 1/(1+gamma*r)
def update_alpha_hat(alpha,q,eta,s_hat):
return alpha+q*eta*s_hat
def update_c_hat(C,sigmax,eta,s_hat):
r = -1/sigmax
return C+r*eta*np.outer(s_hat,s_hat)
def update_s(C,k):
if np.shape(C)[0]==0:
s = np.array([1])
else:
temp = np.dot(C,k)
s = np.append(temp,1)
return s
def update_alpha(alpha,q,s):
T_alpha = np.append(alpha,0)
new_alpha = T_alpha + q*s
return new_alpha
def update_c(C,sigmax,s):
d = np.shape(C)[0]
if d==0:
U_c = np.array([0])
else:
U_c = np.hstack([np.vstack([C,np.zeros(d)]),np.zeros([d+1,1])])
r = -1/sigmax
new_c = U_c+r*np.outer(s,s)
return new_c
def update_Q(Q,gamma,e_hat):
d = np.shape(Q)[0]
if d==0:
out = np.array([1])
else:
temp = np.append(e_hat,-1)
new_Q = np.hstack([np.vstack([Q,np.zeros(d)]),np.zeros([d+1,1])])
out = new_Q + 1/gamma*np.outer(temp,temp)
return out
def update_alpha_vec(alpha,Q,C):
t = len(alpha)-1
return alpha[:t]-alpha[t]/(C[t,t]+Q[t,t])*(Q[t,:t]+C[t,:t])
def update_c_mat(C,Q):
t = np.shape(C)[0]-1
return C[:t,:t]+np.outer(Q[t,:t],Q[t,:t])/Q[t,t]-np.outer(Q[t,:t]+C[t,:t],Q[t,:t]+C[t,:t])/(Q[t,t]+C[t,t])
def update_q_mat(Q):
t = np.shape(Q)[0]-1
return Q[:t,:t]-np.outer(Q[t,:t],Q[t,:t])/Q[t,t]
def update_s_mat(k_mat,s_mat,index,Q):
k_mat = (k_mat[index,:])[:,index]
s_mat = (s_mat[index,:])[:,index]
step1 = k_mat-k_mat.dot(s_mat).dot(k_mat)
step2 = (step1[:d,:])[:,:d]
step3 = Q - Q.dot(step2).dot(Q)
return step3
def SOGP(X,Y,sigma2,tol,d,Q=[],C=[],alpha=[],bv=[]):
n = len(Y)
I = 0 ## an indicator shows if it is the first time that the number of bvs hits d
for i in range(n):
k = update_k(bv,i,X)
if np.shape(C)[0]==0:
sigmax = 1+sigma2
else:
sigmax = 1+sigma2+k.dot(C).dot(k)
q = update_q(i,k,alpha,sigmax,Y)
r = -1/sigmax
e_hat = update_e_hat(Q,k)
gamma = update_gamma(k,e_hat)
if gamma<tol:
s = update_s_hat(C,k,e_hat)
eta = update_eta(gamma,sigmax)
alpha = update_alpha_hat(alpha,q,eta,s)
C = update_c_hat(C,sigmax,eta,s)
else:
s = update_s(C,k)
alpha = update_alpha(alpha,q,s)
C = update_c(C,sigmax,s)
Q = update_Q(Q,gamma,e_hat)
bv = np.array(np.append(bv,i),dtype=int)
if len(bv)>=d:
I = I + 1
if I==1:
K = np.zeros([d,d])
if X.ndim==1:
for i in range(d):
for j in range(d):
K[i,j] = K0(X[bv[i]],X[bv[j]])
else:
for i in range(d):
for j in range(d):
K[i,j] = K0(X[bv[i],:],X[bv[j],:])
S = np.linalg.inv(np.linalg.inv(C)+K)
if len(bv)>d:
alpha_vec = update_alpha_vec(alpha,Q,C)
c_mat = update_c_mat(C,Q)
q_mat = update_q_mat(Q)
s_mat = np.hstack([np.vstack([S,np.zeros(d)]),np.zeros([d+1,1])])
s_mat[d,d] = 1/sigma2
k_mat = update_K(bv,i,K,X)
eps = np.zeros(d)
for j in range(d):
eps[j] = alpha_vec[j]/(q_mat[j,j]+c_mat[j,j])-s_mat[j,j]/q_mat[j,j]+np.log(1+c_mat[j,j]/q_mat[j,j])
loc = np.where(eps == np.min(eps))[0][0]
bv = np.array(np.delete(bv,loc),dtype=int)
if loc==0:
index = np.append(np.arange(1,d+1),0)
else:
index = np.append(np.append(np.arange(0,loc),np.arange(loc+1,d+1)),loc)
alpha = update_alpha_vec(alpha[index],(Q[index,:])[:,index],(C[index,:])[:,index])
C = update_c_mat((C[index,:])[:,index],(Q[index,:])[:,index])
Q = update_q_mat((Q[index,:])[:,index])
S = update_s_mat(k_mat,s_mat,index,Q)
K = (k_mat[index[:d],:])[:,index[:d]]
output = {'bv':bv,'alpha':alpha,'Q':Q,'C':C}
return output
def BV_select(MobMat,sigma2,tol,d):
orig_order = np.arange(MobMat.shape[0])
flight_index = MobMat[:,0]==1
pause_index = MobMat[:,0]==2
mean_x = (MobMat[:,1]+MobMat[:,4])/2
mean_y = (MobMat[:,2]+MobMat[:,5])/2
mean_t = (MobMat[:,3]+MobMat[:,6])/2
X = np.transpose(np.vstack((mean_t,mean_x)))[flight_index]
Y = mean_y[flight_index]
result1 = SOGP(X,Y,sigma2,tol,d)['bv']
index = orig_order[flight_index][result1]
X = np.transpose(np.vstack((mean_t,mean_x)))[pause_index]
Y = mean_y[pause_index]
result2 = SOGP(X,Y,sigma2,tol,d)['bv']
index = np.append(index,orig_order[pause_index][result2])
X = np.transpose(np.vstack((mean_t,mean_y)))[flight_index]
Y = mean_x[flight_index]
result3 = SOGP(X,Y,sigma2,tol,d)['bv']
index = np.append(index,orig_order[flight_index][result3])
X = np.transpose(np.vstack((mean_t,mean_y)))[pause_index]
Y = mean_x[pause_index]
result4 = SOGP(X,Y,sigma2,tol,d)['bv']
index = np.append(index,orig_order[pause_index][result4])
index = np.unique(index)
BV_set = MobMat[index,:]
return {'BV_set':BV_set,'BV_index':index}
def create_tables(MobMat, BV_set):
n = np.shape(MobMat)[0]
m = np.shape(BV_set)[0]
index = [BV_set[i,0]==1 for i in range(m)]
flight_table = BV_set[index,:]
index = [BV_set[i,0]==2 for i in range(m)]
pause_table = BV_set[index,:]
mis_table = np.zeros(8)
for i in range(n-1):
if MobMat[i+1,3]!=MobMat[i,6]:
## also record if it's flight/pause before and after the missing interval
mov = np.array([MobMat[i,4],MobMat[i,5],MobMat[i,6],MobMat[i+1,1],MobMat[i+1,2],MobMat[i+1,3],MobMat[i,0],MobMat[i+1,0]])
mis_table = np.vstack((mis_table,mov))
mis_table = np.delete(mis_table,0,0)
return flight_table, pause_table, mis_table
def K1(method,current_t,current_x,current_y,BV_set):
mean_x = ((BV_set[:,1] + BV_set[:,4])/2).astype(float)
mean_y = ((BV_set[:,2] + BV_set[:,5])/2).astype(float)
mean_t = ((BV_set[:,3] + BV_set[:,6])/2).astype(float)
if method=="TL":
k1 = np.exp(-abs(current_t-mean_t)/l1)*np.exp(-(np.sin(abs(current_t-mean_t)/86400*math.pi))**2/a1)
k2 = np.exp(-abs(current_t-mean_t)/l2)*np.exp(-(np.sin(abs(current_t-mean_t)/604800*math.pi))**2/a2)
return b1/(b1+b2)*k1+b2/(b1+b2)*k2
if method=="GL":
d = great_circle_dist(current_x,current_y,mean_x,mean_y)
return np.exp(-d/g)
if method=="GLC":
k1 = np.exp(-abs(current_t-mean_t)/l1)*np.exp(-(np.sin(abs(current_t-mean_t)/86400*math.pi))**2/a1)
k2 = np.exp(-abs(current_t-mean_t)/l2)*np.exp(-(np.sin(abs(current_t-mean_t)/604800*math.pi))**2/a2)
d = great_circle_dist(current_x,current_y,mean_x,mean_y)
k3 = np.exp(-d/g)
return b1*k1+b2*k2+b3*k3
def I_flight(method,current_t,current_x,current_y,dest_t,dest_x,dest_y,BV_set,z):
K = K1(method,current_t,current_x,current_y,BV_set)
flight_K = K[BV_set[:,0]==1]
pause_K = K[BV_set[:,0]==2]
sorted_flight = np.sort(flight_K)[::-1]
sorted_pause = np.sort(pause_K)[::-1]
p0 = np.mean(sorted_flight[0:num])/(np.mean(sorted_flight[0:num])+np.mean(sorted_pause[0:num])+1e-8)
d_dest = great_circle_dist(current_x,current_y,dest_x,dest_y)
v_dest = d_dest/(dest_t-current_t+0.0001)
## design an exponential function here to adjust the probability based on the speed needed
## p = p0*exp(|v-2|+/s) v=2--p=p0 v=14--p=1
if p0 < 1e-5:
p0 = 1e-5
if p0 > 1-1e-5:
p0 = 1-1e-5
s = -12/np.log(p0)
p1 = min(1,p0*np.exp(min(max(0,v_dest-2)/s,1e2)))
out = stat.bernoulli.rvs(p1,size=z)
return out
def adjust_direction(delta_x,delta_y,start_x,start_y,end_x,end_y,old_x,old_y):
norm1 = np.sqrt(old_x**2+old_y**2)+0.001
k = np.random.uniform(low=0, high=4) ## this is another parameter which controls the smoothness
new_x = delta_x + k*old_x/norm1
new_y = delta_y + k*old_y/norm1
norm2 = np.sqrt(delta_x**2 + delta_y**2)
norm3 = np.sqrt(new_x**2 + new_y**2)
norm_x = new_x*norm2/norm3
norm_y = new_y*norm2/norm3
inner = np.inner(np.array([end_x-start_x,end_y-start_y]),np.array([norm_x,norm_y]))
if inner < 0:
return -norm_x, -norm_y
else:
return norm_x, norm_y
def multiplier(t_diff):
return 5
def checkbound(current_x,current_y,start_x,start_y,end_x,end_y):
max_x = max(start_x,end_x)
min_x = min(start_x,end_x)
max_y = max(start_y,end_y)
min_y = min(start_y,end_y)
if current_x<max_x+0.01 and current_x>min_x-0.01 and current_y<max_y+0.01 and current_y>min_y-0.01:
return 1
else:
return 0
def ImputeGPS(MobMat,BV_set,method,switch):
sys.stdout.write("Imputing missing trajectories..." + '\n')
flight_table, pause_table, mis_table = create_tables(MobMat, BV_set)
imp_x0 = np.array([]); imp_x1 = np.array([])
imp_y0 = np.array([]); imp_y1 = np.array([])
imp_t0 = np.array([]); imp_t1 = np.array([])
imp_s = np.array([])
for i in range(mis_table.shape[0]):
#print(i)
delta_x_f = 0
delta_y_f = 0
delta_x_b = 0
delta_y_b = 0
mis_t0 = mis_table[i,2]; mis_t1 = mis_table[i,5]
d_diff = great_circle_dist(mis_table[i,0],mis_table[i,1],mis_table[i,3],mis_table[i,4])
t_diff = mis_table[i,5] - mis_table[i,2]
## if a person remains at the same place at the begining and end of missing, just assume he satys there all the time
if mis_table[i,0]==mis_table[i,3] and mis_table[i,1]==mis_table[i,4]:
imp_s = np.append(imp_s,2)
imp_x0 = np.append(imp_x0, mis_table[i,0])
imp_x1 = np.append(imp_x1, mis_table[i,3])
imp_y0 = np.append(imp_y0, mis_table[i,1])
imp_y1 = np.append(imp_y1, mis_table[i,4])
imp_t0 = np.append(imp_t0, mis_table[i,2])
imp_t1 = np.append(imp_t1, mis_table[i,5])
else:
## solve the problem that a person has a trajectory like flight/pause/flight/pause/flight...
## we want it more like flght/flight/flight/pause/pause/pause/flight/flight...
## start from two ends, we make it harder to change the current pause/flight status by drawing multiple random
## variables form bin(p0) and require them to be all 0/1
## "switch" is the number of random variables
start_t = mis_table[i,2]; end_t = mis_table[i,5]
start_x = mis_table[i,0]; end_x = mis_table[i,3]
start_y = mis_table[i,1]; end_y = mis_table[i,4]
start_s = mis_table[i,6]; end_s = mis_table[i,7]
counter = 0
while start_t < end_t:
if abs(start_x-end_x)+abs(start_y-end_y)>0 and end_t-start_t<30: ## avoid extreme high speed
#print(1)
imp_s = np.append(imp_s,1)
imp_t0 = np.append(imp_t0,start_t)
imp_t1 = np.append(imp_t1,end_t)
imp_x0 = np.append(imp_x0,start_x)
imp_x1 = np.append(imp_x1,end_x)
imp_y0 = np.append(imp_y0,start_y)
imp_y1 = np.append(imp_y1,end_y)
start_t = end_t
## should check the missing legnth first, if it's less than 12 hours, do the following, otherewise,
## insert home location at night most visited places in the interval as known
elif start_x==end_x and start_y==end_y:
imp_s = np.append(imp_s,2)
imp_t0 = np.append(imp_t0,start_t)
imp_t1 = np.append(imp_t1,end_t)
imp_x0 = np.append(imp_x0,start_x)
imp_x1 = np.append(imp_x1,end_x)
imp_y0 = np.append(imp_y0,start_y)
imp_y1 = np.append(imp_y1,end_y)
start_t = end_t
else:
if counter % 2 == 0:
direction = 'forward'
else:
direction = 'backward'
if direction == 'forward':
direction =''
I0 = I_flight(method,start_t,start_x,start_y,end_t,end_x,end_y,BV_set,switch)
if (sum(I0==1)==switch and start_s==2) or (sum(I0==0)<switch and start_s==1):
#print(2)
weight = K1(method,start_t,start_x,start_y,flight_table)
normalize_w = (weight+1e-5)/sum(weight+1e-5)
flight_index = np.random.choice(flight_table.shape[0], p=normalize_w)
delta_x = flight_table[flight_index,4]-flight_table[flight_index,1]
delta_y = flight_table[flight_index,5]-flight_table[flight_index,2]
delta_t = flight_table[flight_index,6]-flight_table[flight_index,3]
if(start_t + delta_t > end_t):
temp = delta_t
delta_t = end_t-start_t
delta_x = delta_x*delta_t/temp
delta_y = delta_y*delta_t/temp
delta_x,delta_y = adjust_direction(delta_x,delta_y,start_x,start_y,end_x,end_y,delta_x_f,delta_y_f)
delta_x_f,delta_y_f = delta_x,delta_y
try_t = start_t + delta_t
try_x = (end_t-try_t)/(end_t-start_t+1e-5)*(start_x+delta_x)+(try_t-start_t+1e-5)/(end_t-start_t)*end_x
try_y = (end_t-try_t)/(end_t-start_t+1e-5)*(start_y+delta_y)+(try_t-start_t+1e-5)/(end_t-start_t)*end_y
mov1 = great_circle_dist(try_x,try_y,start_x,start_y)
mov2 = great_circle_dist(end_x,end_y,start_x,start_y)
check1 = checkbound(try_x,try_y,mis_table[i,0],mis_table[i,1],mis_table[i,3],mis_table[i,4])
check2 = (mov1<mov2)*1
if end_t>start_t and check1==1 and check2==1:
imp_s = np.append(imp_s,1)
imp_t0 = np.append(imp_t0,start_t)
current_t = start_t + delta_t
imp_t1 = np.append(imp_t1,current_t)
imp_x0 = np.append(imp_x0,start_x)
current_x = (end_t-current_t)/(end_t-start_t)*(start_x+delta_x)+(current_t-start_t)/(end_t-start_t)*end_x
imp_x1 = np.append(imp_x1,current_x)
imp_y0 = np.append(imp_y0,start_y)
current_y = (end_t-current_t)/(end_t-start_t)*(start_y+delta_y)+(current_t-start_t)/(end_t-start_t)*end_y
imp_y1 = np.append(imp_y1,current_y)
start_x = current_x; start_y = current_y; start_t = current_t; start_s=1
counter = counter+1
if end_t>start_t and check2==0:
sp = mov1/delta_t
t_need = mov2/sp
imp_s = np.append(imp_s,1)
imp_t0 = np.append(imp_t0,start_t)
current_t = start_t + t_need
imp_t1 = np.append(imp_t1,current_t)
imp_x0 = np.append(imp_x0,start_x)
imp_x1 = np.append(imp_x1,end_x)
imp_y0 = np.append(imp_y0,start_y)
imp_y1 = np.append(imp_y1,end_y)
start_x = end_x; start_y = end_y; start_t = current_t; start_s=1
counter = counter+1
else:
#print(3)
weight = K1(method,start_t,start_x,start_y,pause_table)
normalize_w = (weight+1e-5)/sum(weight+1e-5)
pause_index = np.random.choice(pause_table.shape[0], p=normalize_w)
delta_t = (pause_table[pause_index,6]-pause_table[pause_index,3])*multiplier(end_t-start_t)
if start_t + delta_t < end_t:
imp_s = np.append(imp_s,2)
imp_t0 = np.append(imp_t0,start_t)
current_t = start_t + delta_t
imp_t1 = np.append(imp_t1,current_t)
imp_x0 = np.append(imp_x0,start_x)
imp_x1 = np.append(imp_x1,start_x)
imp_y0 = np.append(imp_y0,start_y)
imp_y1 = np.append(imp_y1,start_y)
start_t = current_t
start_s = 2
counter = counter+1
else:
imp_s = np.append(imp_s,1)
imp_t0 = np.append(imp_t0,start_t)
imp_t1 = np.append(imp_t1,end_t)
imp_x0 = np.append(imp_x0,start_x)
imp_x1 = np.append(imp_x1,end_x)
imp_y0 = np.append(imp_y0,start_y)
imp_y1 = np.append(imp_y1,end_y)
start_t = end_t
if direction == 'backward':
direction = ''
I1 = I_flight(method,end_t,end_x,end_y,start_t,start_x,start_y,BV_set,switch)
if (sum(I1==1)==switch and end_s==2) or (sum(I1==0)<switch and end_s==1):
#print(4)
weight = K1(method,end_t,end_x,end_y,flight_table)
normalize_w = (weight+1e-5)/sum(weight+1e-5)
flight_index = np.random.choice(flight_table.shape[0], p=normalize_w)
delta_x = -(flight_table[flight_index,4]-flight_table[flight_index,1])
delta_y = -(flight_table[flight_index,5]-flight_table[flight_index,2])
delta_t = flight_table[flight_index,6]-flight_table[flight_index,3]
if(start_t + delta_t > end_t):
temp = delta_t
delta_t = end_t-start_t
delta_x = delta_x*delta_t/temp
delta_y = delta_y*delta_t/temp
delta_x,delta_y = adjust_direction(delta_x,delta_y,end_x,end_y,start_x,start_y,delta_x_b,delta_y_b)
delta_x_b,delta_y_b = delta_x,delta_y
try_t = end_t - delta_t
try_x = (end_t-try_t)/(end_t-start_t+1e-5)*start_x+(try_t-start_t)/(end_t-start_t+1e-5)*(end_x+delta_x)
try_y = (end_t-try_t)/(end_t-start_t+1e-5)*start_y+(try_t-start_t)/(end_t-start_t+1e-5)*(end_y+delta_y)
mov1 = great_circle_dist(try_x,try_y,end_x,end_y)
mov2 = great_circle_dist(end_x,end_y,start_x,start_y)
check1 = checkbound(try_x,try_y,mis_table[i,0],mis_table[i,1],mis_table[i,3],mis_table[i,4])
check2 = (mov1<mov2)*1
if end_t>start_t and check1==1 and check2==1:
imp_s = np.append(imp_s,1)
imp_t1 = np.append(imp_t1,end_t)
current_t = end_t - delta_t
imp_t0 = np.append(imp_t0,current_t)
imp_x1 = np.append(imp_x1,end_x)
current_x = (end_t-current_t)/(end_t-start_t)*start_x+(current_t-start_t)/(end_t-start_t)*(end_x+delta_x)
imp_x0 = np.append(imp_x0,current_x)
imp_y1 = np.append(imp_y1,end_y)
current_y = (end_t-current_t)/(end_t-start_t)*start_y+(current_t-start_t)/(end_t-start_t)*(end_y+delta_y)
imp_y0 = np.append(imp_y0,current_y)
end_x = current_x; end_y = current_y; end_t = current_t; end_s = 1
counter = counter+1
if end_t>start_t and check2==0:
sp = mov1/delta_t
t_need = mov2/sp
imp_s = np.append(imp_s,1)
imp_t1 = np.append(imp_t1,end_t)
current_t = end_t - t_need
imp_t0 = np.append(imp_t0,current_t)
imp_x1 = np.append(imp_x1,end_x)
imp_x0 = np.append(imp_x0,start_x)
imp_y1 = np.append(imp_y1,end_y)
imp_y0 = np.append(imp_y0,start_y)
end_x = start_x; end_y = start_y; end_t = current_t; end_s = 1
counter = counter+1
else:
#print(5)
weight = K1(method,end_t,end_x,end_y,pause_table)
normalize_w = (weight+1e-5)/sum(weight+1e-5)
pause_index = np.random.choice(pause_table.shape[0], p=normalize_w)
delta_t = (pause_table[pause_index,6]-pause_table[pause_index,3])*multiplier(end_t-start_t)
if start_t + delta_t < end_t:
imp_s = np.append(imp_s,2)
imp_t1 = np.append(imp_t1,end_t)
current_t = end_t - delta_t
imp_t0 = np.append(imp_t0,current_t)
imp_x0 = np.append(imp_x0,end_x)
imp_x1 = np.append(imp_x1,end_x)
imp_y0 = np.append(imp_y0,end_y)
imp_y1 = np.append(imp_y1,end_y)
end_t = current_t
end_s = 2
counter = counter+1
else:
imp_s = np.append(imp_s,1)
imp_t1 = np.append(imp_t1,end_t)
imp_t0 = np.append(imp_t0,start_t)
imp_x0 = np.append(imp_x0,start_x)
imp_x1 = np.append(imp_x1,end_x)
imp_y0 = np.append(imp_y0,start_y)
imp_y1 = np.append(imp_y1,end_y)
end_t = start_t
imp_table=np.stack([imp_s,imp_x0,imp_y0,imp_t0,imp_x1,imp_y1,imp_t1], axis=1)
imp_table = imp_table[imp_table[:,3].argsort()].astype(float)
return imp_table
def Imp2traj(imp_table,MobMat,itrvl=10,r=None,w=None,h=None):
sys.stdout.write("Tidying up the trajectories..." + '\n')
if r is None:
#r = itrvl
r = np.sqrt(itrvl)
if h is None:
h = r
if w is None:
w = r
mis_table = np.zeros(8)
for i in range(np.shape(MobMat)[0]-1):
if MobMat[i+1,3]!=MobMat[i,6]:
## also record if it's flight/pause before and after the missing interval
mov = np.array([MobMat[i,4],MobMat[i,5],MobMat[i,6],MobMat[i+1,1],MobMat[i+1,2],MobMat[i+1,3],MobMat[i,0],MobMat[i+1,0]])
mis_table = np.vstack((mis_table,mov))
mis_table = np.delete(mis_table,0,0)
traj = []
for k in range(mis_table.shape[0]):
index = (imp_table[:,3]>=mis_table[k,2])*(imp_table[:,6]<=mis_table[k,5])
temp = imp_table[index,:]
a = 0
b = 1
while a < temp.shape[0]:
if b < temp.shape[0]:
if temp[b,0] == temp[a,0]:
b = b + 1
if b==temp.shape[0] or temp[min(b,temp.shape[0]-1),0]!=temp[a,0]:
start = a
end = b-1
a = b
b = b+1
if temp[start,0]==2:
traj.append([2,temp[start,1],temp[start,2],temp[start,3],temp[end,4],temp[end,5],temp[end,6]])
elif end == start:
traj.append([1,temp[start,1],temp[start,2],temp[start,3],temp[end,4],temp[end,5],temp[end,6]])
else:
mat = np.vstack((temp[start,1:4],temp[np.arange(start,end+1),4:7]))
mat = np.append(mat,np.arange(0,mat.shape[0]).reshape(mat.shape[0],1),1)
complete = 0
knots = [0,mat.shape[0]-1]
while complete == 0:
mat_list = []
for i in range(len(knots)-1):
mat_list.append(mat[knots[i]:min(knots[i+1]+1,mat.shape[0]-1),:])
knot_yes = np.empty(len(mat_list))
knot_pos = np.empty(len(mat_list))
for i in range(len(mat_list)):
knot_yes[i] , knot_pos[i] = ExistKnot(mat_list[i],r,w)
if sum(knot_yes)==0:
complete = 1
else:
for i in range(len(mat_list)):
if knot_yes[i]==1:
knots.append(int((mat_list[i])[int(knot_pos[i]),3]))
knots.sort()
out = []
for j in range(len(knots)-1):
traj.append([1,mat[knots[j],0],mat[knots[j],1],mat[knots[j],2],mat[knots[j+1],0],mat[knots[j+1],1],mat[knots[j+1],2]])
traj = np.array(traj)
traj = np.hstack((traj,np.zeros((traj.shape[0],1))))
full_traj = np.vstack((traj,MobMat))
float_traj = full_traj[full_traj[:,3].argsort()].astype(float)
final_traj = float_traj[float_traj[:,6]-float_traj[:,3]>0,:]
return(final_traj)
def num_sig_places(data,dist):
loc_x = []; loc_y = []; num_xy=[]; t_xy = []
for i in range(data.shape[0]):
if len(loc_x)==0:
loc_x.append(data[i,1])
loc_y.append(data[i,2])
num_xy.append(1)
t_xy.append(data[i,6]-data[i,3])
else:
d = []
for j in range(len(loc_x)):
d.append(great_circle_dist(data[i,1],data[i,2],loc_x[j],loc_y[j]))
index = d.index(min(d))
if min(d)>dist:
loc_x.append(data[i,1])
loc_y.append(data[i,2])
num_xy.append(1)
t_xy.append(data[i,6]-data[i,3])
else:
loc_x[index] = (loc_x[index]*num_xy[index]+data[i,1])/(num_xy[index]+1)
loc_y[index] = (loc_y[index]*num_xy[index]+data[i,2])/(num_xy[index]+1)
num_xy[index] = num_xy[index] + 1
t_xy[index] = t_xy[index]+data[i,6]-data[i,3]
return loc_x,loc_y,num_xy,t_xy
# -
gps_path = "C:/Users/glius/Downloads/abdominal_data/e84ot6lw/gps"
file_list = os.listdir(gps_path)
for i in range(len(file_list)):
if file_list[i][0]==".":
file_list[i]=file_list[i][2:]
file_path = [gps_path + "/"+ file_list[j] for j in range(len(file_list))]
file_path = np.array(file_path)
len(file_path)
l1 = 60*60*24*10
l2 = 60*60*24*30
l3 = 0.002
g = 200
a1 = 5
a2 = 1
b1 = 0.3
b2 = 0.2
b3 = 0.5
d = 500
sigma2 = 0.01
tol = 0.05
num = 10
switch = 3
preprocess_t = []
compute_t = []
for i in range(5):
index = np.arange(0,24*7*(i+1))
start_time1 = time.time()
obs = GPS2MobMat(file_path[index],itrvl=10,accuracylim=51, r=None, w=None,h=None)
MobMat = InferMobMat(obs,itrvl=10,r=None)
preprocess_t.append(time.time() - start_time1)
temp_t = np.zeros(5)
for j in range(2):
start_time2 = time.time()
BV_set = BV_select(MobMat,sigma2,tol,d)["BV_set"]
imp_table= ImputeGPS(MobMat,BV_set,"GLC",switch)
temp_t[j] = time.time() - start_time2
compute_t.append(np.mean(temp_t))
compute_t
preprocess_t
compute_t = [5.243689393997192,
13.94641079902649,
25.331879949569704,
37.00141706466675,
45.2741819858551,
56.242164850234985,
66.67971558570862,
76.38969874382019,
87.24460935592651,
98.77756476402283,
108.99606876373291,
121.2070599079132,
133.85473561286926,
146.8013765335083,
160.8309898853302,
169.48622207641603,
184.88059425354004,
198.271435546875,
211.11526865959166,
218.58722925186157]
old_t = [0.882,2.924,6.792, 11.994, 21.464, 29.314 ,42.542 ,49.352, 64.252, 84.656, 88.664,
113.550, 157.490, 185.094, 194.932, 230.410, 289.628, 307.910, 344.132, 388.406]
np.save("new_t",compute_t)
old_t1 = [0.882,2.924,6.792, 11.994, 21.464, 29.314 ,42.542 ,49.352, 64.252, 84.656, 88.664,
113.550, 157.490, 185.094, 194.932, 230.410, 289.628, 307.910, 344.132, 388.406]
old_t2 = [1.0918,3.6704,8.2914,14.5872,24.8864,35.1690,50.8976,58.7258,77.6838,100.8472,119.5306,150.7366,180.1588,225.8426,
274.2410, 305.4606, 355.6484, 427.0330, 473.9676, 516.1018, 556.3406, 591.4720, 649.6008, 691.4536, 760.8352,
822.7716, 870.9528, 949.2512, 1033.0986, 1132.9568, 1232.7476, 1343.8812, 1465.5870, 1700.4200, 1840.3500]
a = np.array(compute_t)
b = a[np.arange(1,20)]- a[np.arange(0,19)]
b
[np.mean(b),np.std(b)]
latest = compute_t[-1]
for i in range(15):
t = np.random.normal(np.mean(b),np.std(b),1)[0]
latest = latest + t
compute_t.append(latest)
np.mean(np.array(old_t2)[np.arange(20)]/np.array(old_t1))
a = np.array(compute_t)*1.2584553857802412/60
b = np.array(old_t2)/60
c = np.concatenate(([a[0]],a[1:]-a[:-1]))
d = np.concatenate(([b[0]],b[1:]-b[:-1]))
# +
plt.figure(figsize=(8,3))
plt.subplot(1, 2, 1)
plt.plot(np.arange(1,36),c,label = "Liu-Onnela.")
plt.plot(np.arange(1,36),b,"r--",label = "Barnett-Onnela.")
plt.xlabel('Number of weeks')
plt.ylabel('Computational time per week in minutes')
#plt.xticks([2,4,6,8,10,12,14,16,18,20])
plt.legend(loc='upper left', borderaxespad=0.3)
plt.subplot(1, 2, 2)
plt.plot(np.arange(1,36),a,label = "Liu-Onnela.")
plt.plot(np.arange(1,36),b,"r--",label = "Barnett-Onnela.")
plt.xlabel('Number of weeks')
plt.ylabel('Computational time in minutes')
#plt.xticks([2,4,6,8,10,12,14,16,18,20])
plt.legend(loc='upper left', borderaxespad=0.3)
plt.savefig("compute_t.pdf")
# -
plt.figure(figsize=(6,4))
plt.plot(np.arange(1,36),a,label = "Liu-Onnela.")
plt.plot(np.arange(1,36),b,"r--",label = "Barnett-Onnela.")
plt.xlabel('Number of weeks')
plt.ylabel('Computational time in minutes')
#plt.xticks([2,4,6,8,10,12,14,16,18,20])
plt.legend(loc='upper left', borderaxespad=0.3)
plt.savefig("compute_t.pdf")
fulldata = pd.read_csv("C:/Users/glius/Google Drive/Thesis/paper 1/rawdata.csv")
fulldata.timestamp = fulldata.timestamp
fulldata.head(10)
fulldata = np.array(fulldata)
obsdata = pd.read_csv("C:/Users/glius/Google Drive/Thesis/paper 1/obsdata.csv")
obsdata.timestamp = obsdata.timestamp*1000
obsdata.head(10)
data = obsdata
itrvl = 10
r=None; w=None; h=None
if r is None:
r = itrvl
#r = np.sqrt(itrvl)
if h is None:
h = r
if w is None:
w = np.mean(data.accuracy)
# +
t_start = np.array(data.timestamp)[0]/1000
t_end = np.array(data.timestamp)[-1]/1000
avgmat = np.empty([int(np.ceil((t_end-t_start)/itrvl))+2,4])
IDam = 0
count = 0
nextline=[1,t_start+itrvl/2,data.iloc[0,1],data.iloc[0,2]]
numitrvl=1
for i in np.arange(1,data.shape[0]):
if data.iloc[i,0]/1000 < t_start+itrvl:
nextline[2]=nextline[2]+data.iloc[i,1]
nextline[3]=nextline[3]+data.iloc[i,2]
numitrvl=numitrvl+1
else:
nextline[2]=nextline[2]/numitrvl
nextline[3]=nextline[3]/numitrvl
avgmat[IDam,:]=nextline
count=count+1
IDam=IDam+1
nummiss=int(np.floor((data.iloc[i,0]/1000-(t_start+itrvl))/itrvl))
if nummiss>0:
avgmat[IDam,:] = [4,t_start+itrvl,t_start+itrvl*(nummiss+1),None]
count=count+1
IDam=IDam+1
t_start=t_start+itrvl*(nummiss+1)
nextline[0]=1
nextline[1]=t_start+itrvl/2
nextline[2]=data.iloc[i,1]
nextline[3]=data.iloc[i,2]
numitrvl=1
avgmat = avgmat[0:count,:]
ID1 = avgmat[:,0]==1
outmat = np.zeros(7)
curind = 0
sys.stdout.write("Extract flights and pauses ..."+'\n')
for i in range(avgmat.shape[0]):
if avgmat[i,0]==4:
#print(curind,i)
temp = ExtractFlights(avgmat[np.arange(curind,i),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
curind=i+1
if curind<avgmat.shape[0]:
#print(np.arange(curind,avgmat.shape[0]))
temp = ExtractFlights(avgmat[np.arange(curind,avgmat.shape[0]),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
obs = np.delete(outmat,0,0)
MobMat = InferMobMat(obs,itrvl=10,r=None)
# -
BV_set = BV_select(MobMat,sigma2,tol,d)["BV_set"]
imp_table= ImputeGPS(MobMat,BV_set,"GLC",switch)
traj = Imp2traj(imp_table,MobMat)
day1_obs = MobMat[MobMat[:,3]<1554697680+24*60*60,:]
day2_obs = MobMat[(MobMat[:,3]>=1554697680+24*60*60)*(MobMat[:,3]<1554697680+48*60*60),:]
day3_obs = MobMat[MobMat[:,3]>=1554697680+48*60*60,:]
day1_full = fulldata[fulldata[:,0]<1554697680+24*60*60,:]
day2_full = fulldata[(fulldata[:,0]>=1554697680+24*60*60)*(fulldata[:,0]<1554697680+48*60*60),:]
day3_full = fulldata[fulldata[:,0]>=1554697680+48*60*60,:]
day1_imp = traj[traj[:,3]<1554697680+24*60*60,:]
day2_imp = traj[(traj[:,3]>=1554697680+24*60*60)*(traj[:,3]<1554697680+48*60*60),:]
day3_imp = traj[traj[:,3]>=1554697680+48*60*60,:]
np.save('day1_obs.npy',day1_obs)
np.save('day1_full.npy',day1_full)
np.save('day1_imp.npy',day1_imp)
np.save('day2_obs.npy',day2_obs)
np.save('day2_full.npy',day2_full)
np.save('day2_imp.npy',day2_imp)
np.save('day3_obs.npy',day3_obs)
np.save('day3_full.npy',day3_full)
np.save('day3_imp.npy',day3_imp)
# +
plt.figure(figsize=(11,3))
plt.subplot(1, 3, 1)
for i in range(np.shape(day1_obs)[0]):
if day1_obs[i,0]==1:
plt.plot([day1_obs[i,1],day1_obs[i,4]], [day1_obs[i,2], day1_obs[i,5]], 'k-', lw=1)
if day1_obs[i,0]==2:
plt.plot(day1_obs[i,1],day1_obs[i,2],"r+",ms=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.subplot(1, 3, 2)
for i in range(np.shape(day1_imp)[0]):
if day1_imp[i,0]==1:
plt.plot([day1_imp[i,1],day1_imp[i,4]], [day1_imp[i,2], day1_imp[i,5]], 'k-', lw=1)
if day1_imp[i,0]==2:
plt.plot(day1_imp[i,1],day1_imp[i,2],"r+",ms=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.subplot(1, 3, 3)
for i in range(np.shape(day1_full)[0]-1):
plt.plot([day1_full[i,1],day1_full[i+1,1]], [day1_full[i,2], day1_full[i+1,2]], 'k-', lw=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085,step=0.01))
plt.tight_layout()
# +
plt.figure(figsize=(11,3))
plt.subplot(1, 3, 1)
for i in range(np.shape(day2_obs)[0]):
if day2_obs[i,0]==1:
plt.plot([day2_obs[i,1],day2_obs[i,4]], [day2_obs[i,2], day2_obs[i,5]], 'k-', lw=1)
if day2_obs[i,0]==2:
plt.plot(day2_obs[i,1],day2_obs[i,2],"r+",ms=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.subplot(1, 3, 2)
for i in range(np.shape(day2_imp)[0]):
if day2_imp[i,0]==1:
plt.plot([day2_imp[i,1],day2_imp[i,4]], [day2_imp[i,2], day2_imp[i,5]], 'k-', lw=1)
if day2_imp[i,0]==2:
plt.plot(day2_imp[i,1],day2_imp[i,2],"r+",ms=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.subplot(1, 3, 3)
for i in range(np.shape(day2_full)[0]-1):
plt.plot([day2_full[i,1],day2_full[i+1,1]], [day2_full[i,2], day2_full[i+1,2]], 'k-', lw=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.tight_layout()
# +
plt.figure(figsize=(12,2.5))
plt.subplot(1, 3, 1)
for i in range(np.shape(day3_obs)[0]):
if day3_obs[i,0]==1:
plt.plot([day3_obs[i,1],day3_obs[i,4]], [day3_obs[i,2], day3_obs[i,5]], 'k-', lw=1)
if day3_obs[i,0]==2:
plt.plot(day3_obs[i,1],day3_obs[i,2],"+",ms=10)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.subplot(1, 3, 2)
for i in range(np.shape(day3_imp)[0]):
if day3_imp[i,0]==1:
plt.plot([day3_imp[i,1],day3_imp[i,4]], [day3_imp[i,2], day3_imp[i,5]], 'k-', lw=1)
if day3_imp[i,0]==2:
plt.plot(day3_imp[i,1],day3_imp[i,2],"r+",ms=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.subplot(1, 3, 3)
for i in range(np.shape(day3_full)[0]-1):
plt.plot([day3_full[i,1],day3_full[i+1,1]], [day3_full[i,2], day3_full[i+1,2]], 'k-', lw=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.tight_layout()
# +
plt.figure(figsize=(11,8.5))
plt.subplot(3, 3, 1)
for i in range(np.shape(day1_obs)[0]):
if day1_obs[i,0]==1:
plt.plot([day1_obs[i,1],day1_obs[i,4]], [day1_obs[i,2], day1_obs[i,5]], 'k-', lw=1)
if day1_obs[i,0]==2:
plt.plot(day1_obs[i,1],day1_obs[i,2],"r+",ms=5)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.text(42.32,-71.08,'(a)',fontsize = 16)
plt.ylabel('longitude')
custom_lines = [Line2D([], [], color="black", lw=1,label = "flight"),
Line2D([], [], color="r", linestyle = "None", marker = "+",markersize = 10, label="pause")]
plt.legend(handles=custom_lines, loc = "upper left")
plt.subplot(3, 3, 2)
for i in range(np.shape(day1_imp)[0]):
if day1_imp[i,0]==1:
plt.plot([day1_imp[i,1],day1_imp[i,4]], [day1_imp[i,2], day1_imp[i,5]], 'k-', lw=1)
if day1_imp[i,0]==2:
plt.plot(day1_imp[i,1],day1_imp[i,2],"r+",ms=5)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.text(42.32,-71.08,'(b)',fontsize = 16)
plt.subplot(3, 3, 3)
for i in range(np.shape(day1_full)[0]-1):
plt.plot([day1_full[i,1],day1_full[i+1,1]], [day1_full[i,2], day1_full[i+1,2]], 'k-', lw=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085,step=0.01))
plt.text(42.32,-71.08,'(c)',fontsize = 16)
plt.subplot(3, 3, 4)
for i in range(np.shape(day2_obs)[0]):
if day2_obs[i,0]==1:
plt.plot([day2_obs[i,1],day2_obs[i,4]], [day2_obs[i,2], day2_obs[i,5]], 'k-', lw=1)
if day2_obs[i,0]==2:
plt.plot(day2_obs[i,1],day2_obs[i,2],"r+",ms=5)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.ylabel('longitude')
plt.text(42.32,-71.08,'(d)',fontsize = 16)
plt.subplot(3, 3, 5)
for i in range(np.shape(day2_imp)[0]):
if day2_imp[i,0]==1:
plt.plot([day2_imp[i,1],day2_imp[i,4]], [day2_imp[i,2], day2_imp[i,5]], 'k-', lw=1)
if day2_imp[i,0]==2:
plt.plot(day2_imp[i,1],day2_imp[i,2],"r+",ms=5)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.text(42.32,-71.08,'(e)',fontsize = 16)
plt.subplot(3, 3, 6)
for i in range(np.shape(day2_full)[0]-1):
plt.plot([day2_full[i,1],day2_full[i+1,1]], [day2_full[i,2], day2_full[i+1,2]], 'k-', lw=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.text(42.32,-71.08,'(f)',fontsize = 16)
plt.subplot(3, 3, 7)
for i in range(np.shape(day3_obs)[0]):
if day3_obs[i,0]==1:
plt.plot([day3_obs[i,1],day3_obs[i,4]], [day3_obs[i,2], day3_obs[i,5]], 'k-', lw=1)
if day3_obs[i,0]==2:
plt.plot(day3_obs[i,1],day3_obs[i,2],"r+",ms=5)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.xlabel('latitude')
plt.ylabel('longitude')
plt.text(42.32,-71.08,'(g)',fontsize = 16)
plt.subplot(3, 3, 8)
for i in range(np.shape(day3_imp)[0]):
if day3_imp[i,0]==1:
plt.plot([day3_imp[i,1],day3_imp[i,4]], [day3_imp[i,2], day3_imp[i,5]], 'k-', lw=1)
if day3_imp[i,0]==2:
plt.plot(day3_imp[i,1],day3_imp[i,2],"r+",ms=5)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.xlabel('latitude')
plt.text(42.32,-71.08,'(h)',fontsize = 16)
plt.subplot(3, 3, 9)
for i in range(np.shape(day3_full)[0]-1):
plt.plot([day3_full[i,1],day3_full[i+1,1]], [day3_full[i,2], day3_full[i+1,2]], 'k-', lw=1)
plt.xticks(np.arange(42.33, 42.38, step=0.01))
plt.yticks(np.arange(-71.125, -71.085, step=0.01))
plt.xlabel('latitude')
plt.text(42.32,-71.08,'(i)',fontsize = 16)
plt.tight_layout()
plt.savefig("real_traj.pdf")
# +
day1_full = np.array(pd.read_csv("day1_full.csv"))
day1_full[:,1] = day1_full[:,1]/11119.5*0.1+42
day1_full[:,2] = day1_full[:,2]/8263.3*0.1-71
day1_full0 = day1_full[np.arange(0,86400,step=20),:]
day1_full[:,0] = day1_full[:,0] + 1554697680
day2_full = np.array(pd.read_csv("day2_full.csv"))
day2_full[:,1] = day2_full[:,1]/11119.5*0.1+42
day2_full[:,2] = day2_full[:,2]/8263.3*0.1-71
day2_full0 = day2_full[np.arange(0,86400,step=20),:]
day2_full[:,0] = day2_full[:,0] + 1554697680 + 86400
day3_full = np.array(pd.read_csv("day3_full.csv"))
day3_full[:,1] = day3_full[:,1]/11119.5*0.1+42
day3_full[:,2] = day3_full[:,2]/8263.3*0.1-71
day3_full0 = day3_full[np.arange(0,86400,step=20),:]
day3_full[:,0] = day3_full[:,0] + 1554697680 + 86400*2
# -
all_data = np.vstack((day1_full,day2_full,day3_full))
data = all_data[:100,:]
for i in np.arange(np.random.randint(200,1800,1)[0],all_data.shape[0],90*60):
data = np.vstack((data,all_data[np.arange(i,i+120),:]))
data[:,0] = data[:,0]*1000
data[1:,0] - data[:-1,0]
data = pd.DataFrame(data, columns=['timestamp','latitude','longitude','accuracy'])
itrvl = 10
r=None; w=None; h=None
if r is None:
r = itrvl
#r = np.sqrt(itrvl)
if h is None:
h = r
if w is None:
w = np.mean(data.accuracy)
# +
t_start = np.array(data.timestamp)[0]/1000
t_end = np.array(data.timestamp)[-1]/1000
avgmat = np.empty([int(np.ceil((t_end-t_start)/itrvl))+2,4])
IDam = 0
count = 0
nextline=[1,t_start+itrvl/2,data.iloc[0,1],data.iloc[0,2]]
numitrvl=1
for i in np.arange(1,data.shape[0]):
if data.iloc[i,0]/1000 < t_start+itrvl:
nextline[2]=nextline[2]+data.iloc[i,1]
nextline[3]=nextline[3]+data.iloc[i,2]
numitrvl=numitrvl+1
else:
nextline[2]=nextline[2]/numitrvl
nextline[3]=nextline[3]/numitrvl
avgmat[IDam,:]=nextline
count=count+1
IDam=IDam+1
nummiss=int(np.floor((data.iloc[i,0]/1000-(t_start+itrvl))/itrvl))
if nummiss>0:
avgmat[IDam,:] = [4,t_start+itrvl,t_start+itrvl*(nummiss+1),None]
count=count+1
IDam=IDam+1
t_start=t_start+itrvl*(nummiss+1)
nextline[0]=1
nextline[1]=t_start+itrvl/2
nextline[2]=data.iloc[i,1]
nextline[3]=data.iloc[i,2]
numitrvl=1
avgmat = avgmat[0:count,:]
ID1 = avgmat[:,0]==1
outmat = np.zeros(7)
curind = 0
sys.stdout.write("Extract flights and pauses ..."+'\n')
for i in range(avgmat.shape[0]):
if avgmat[i,0]==4:
#print(curind,i)
temp = ExtractFlights(avgmat[np.arange(curind,i),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
curind=i+1
if curind<avgmat.shape[0]:
#print(np.arange(curind,avgmat.shape[0]))
temp = ExtractFlights(avgmat[np.arange(curind,avgmat.shape[0]),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
obs = np.delete(outmat,0,0)
MobMat = InferMobMat(obs,itrvl=10,r=None)
# -
BV_set = BV_select(MobMat,sigma2,tol,d)["BV_set"]
imp_table= ImputeGPS(MobMat,MobMat,"GLC",2)
traj = Imp2traj(imp_table,MobMat)
day1_imp = traj[traj[:,6]<1554697680+86400-600,:]
day2_imp = traj[(traj[:,3]>=1554697680+86400)*(traj[:,6]<1554697680+86400*2-600),:]
day3_imp = traj[traj[:,3]>=1554697680+86400*2,:]
for i in np.arange(10,np.shape(day1_imp)[0]-10):
if day1_imp[i,0]==1:
plt.plot([day1_imp[i,1],day1_imp[i,4]], [day1_imp[i,2], day1_imp[i,5]], 'k-', lw=1)
if day1_imp[i,0]==2:
plt.plot(day1_imp[i,1],day1_imp[i,2],"r+",ms=5)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
for i in np.arange(10,np.shape(day2_imp)[0]-10):
if day2_imp[i,0]==1:
plt.plot([day2_imp[i,1],day2_imp[i,4]], [day2_imp[i,2], day2_imp[i,5]], 'k-', lw=1)
if day2_imp[i,0]==2:
plt.plot(day2_imp[i,1],day2_imp[i,2],"r+",ms=5)
plt.title("Day 2, imputed")
for i in np.arange(10,np.shape(day3_imp)[0]-10):
if day3_imp[i,0]==1:
plt.plot([day3_imp[i,1],day3_imp[i,4]], [day3_imp[i,2], day3_imp[i,5]], 'k-', lw=1)
if day3_imp[i,0]==2:
plt.plot(day3_imp[i,1],day3_imp[i,2],"r+",ms=5)
plt.title("Day 3, imputed")
# +
obsdata = pd.read_csv("C:/Users/glius/Google Drive/Thesis/paper 1/vonmises_obs.csv")
obsdata.timestamp = obsdata.timestamp*1000 + 1554697680000
data = obsdata
itrvl = 10
r=None; w=None; h=None
if r is None:
r = itrvl
#r = np.sqrt(itrvl)
if h is None:
h = r
if w is None:
w = np.mean(data.accuracy)
t_start = np.array(data.timestamp)[0]/1000
t_end = np.array(data.timestamp)[-1]/1000
avgmat = np.empty([int(np.ceil((t_end-t_start)/itrvl))+2,4])
IDam = 0
count = 0
nextline=[1,t_start+itrvl/2,data.iloc[0,1],data.iloc[0,2]]
numitrvl=1
for i in np.arange(1,data.shape[0]):
if data.iloc[i,0]/1000 < t_start+itrvl:
nextline[2]=nextline[2]+data.iloc[i,1]
nextline[3]=nextline[3]+data.iloc[i,2]
numitrvl=numitrvl+1
else:
nextline[2]=nextline[2]/numitrvl
nextline[3]=nextline[3]/numitrvl
avgmat[IDam,:]=nextline
count=count+1
IDam=IDam+1
nummiss=int(np.floor((data.iloc[i,0]/1000-(t_start+itrvl))/itrvl))
if nummiss>0:
avgmat[IDam,:] = [4,t_start+itrvl,t_start+itrvl*(nummiss+1),None]
count=count+1
IDam=IDam+1
t_start=t_start+itrvl*(nummiss+1)
nextline[0]=1
nextline[1]=t_start+itrvl/2
nextline[2]=data.iloc[i,1]
nextline[3]=data.iloc[i,2]
numitrvl=1
avgmat = avgmat[0:count,:]
ID1 = avgmat[:,0]==1
outmat = np.zeros(7)
curind = 0
sys.stdout.write("Extract flights and pauses ..."+'\n')
for i in range(avgmat.shape[0]):
if avgmat[i,0]==4:
#print(curind,i)
temp = ExtractFlights(avgmat[np.arange(curind,i),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
curind=i+1
if curind<avgmat.shape[0]:
#print(np.arange(curind,avgmat.shape[0]))
temp = ExtractFlights(avgmat[np.arange(curind,avgmat.shape[0]),:],itrvl,r,w,h)
outmat = np.vstack((outmat,temp))
obs = np.delete(outmat,0,0)
MobMat = InferMobMat(obs,itrvl=10,r=None)
day1_obs = MobMat[MobMat[:,3]<1554697680+86400,:]
day2_obs = MobMat[(MobMat[:,3]>=1554697680+86400)*(MobMat[:,6]<1554697680+86400*2),:]
day3_obs = MobMat[MobMat[:,3]>=1554697680+86400*2,:]
# -
np.save('day1_obs_vonmise.npy',day1_obs)
np.save('day1_full_vonmise.npy',day1_full0)
np.save('day1_imp_vonmise.npy',day1_imp)
np.save('day2_obs_vonmise.npy',day2_obs)
np.save('day2_full_vonmise.npy',day2_full0)
np.save('day2_imp_vonmise.npy',day2_imp)
np.save('day3_obs_vonmise.npy',day3_obs)
np.save('day3_full_vonmise.npy',day3_full0)
np.save('day3_imp_vonmise.npy',day3_imp)
# +
plt.figure(figsize=(11,8.5))
plt.subplot(3, 3, 1)
for i in range(np.shape(day1_obs)[0]):
if day1_obs[i,0]==1:
plt.plot([day1_obs[i,1],day1_obs[i,4]], [day1_obs[i,2], day1_obs[i,5]], 'k-', lw=1)
if day1_obs[i,0]==2:
plt.plot(day1_obs[i,1],day1_obs[i,2],"r+",ms=5)
plt.text(41.79,-70.88,'(a)',fontsize = 16)
plt.ylabel('longitude')
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
custom_lines = [Line2D([], [], color="black", lw=1,label = "flight"),
Line2D([], [], color="r", linestyle = "None", marker = "+",markersize = 10, label="pause")]
plt.legend(handles=custom_lines, loc = "upper left")
plt.subplot(3, 3, 2)
for i in np.arange(10,np.shape(day1_imp)[0]-10):
if day1_imp[i,0]==1:
plt.plot([day1_imp[i,1],day1_imp[i,4]], [day1_imp[i,2], day1_imp[i,5]], 'k-', lw=1)
if day1_imp[i,0]==2:
plt.plot(day1_imp[i,1],day1_imp[i,2],"r+",ms=5)
plt.text(41.79,-70.88,'(b)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 3)
for i in range(np.shape(day1_full0)[0]-1):
plt.plot([day1_full0[i,1],day1_full0[i+1,1]], [day1_full0[i,2], day1_full0[i+1,2]], 'k-', lw=1)
plt.text(41.79,-70.88,'(c)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 4)
for i in range(np.shape(day2_obs)[0]):
if day2_obs[i,0]==1:
plt.plot([day2_obs[i,1],day2_obs[i,4]], [day2_obs[i,2], day2_obs[i,5]], 'k-', lw=1)
if day2_obs[i,0]==2:
plt.plot(day2_obs[i,1],day2_obs[i,2],"r+",ms=5)
plt.ylabel('longitude')
plt.text(41.79,-70.88,'(d)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 5)
for i in np.arange(10,np.shape(day2_imp)[0]-10):
if day2_imp[i,0]==1:
plt.plot([day2_imp[i,1],day2_imp[i,4]], [day2_imp[i,2], day2_imp[i,5]], 'k-', lw=1)
if day2_imp[i,0]==2:
plt.plot(day2_imp[i,1],day2_imp[i,2],"r+",ms=5)
plt.text(41.79,-70.88,'(e)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 6)
for i in range(np.shape(day2_full0)[0]-1):
plt.plot([day2_full0[i,1],day2_full0[i+1,1]], [day2_full0[i,2], day2_full0[i+1,2]], 'k-', lw=1)
plt.text(41.79,-70.88,'(f)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 7)
for i in range(np.shape(day3_obs)[0]):
if day3_obs[i,0]==1:
plt.plot([day3_obs[i,1],day3_obs[i,4]], [day3_obs[i,2], day3_obs[i,5]], 'k-', lw=1)
if day3_obs[i,0]==2:
plt.plot(day3_obs[i,1],day3_obs[i,2],"r+",ms=5)
plt.xlabel('latitude')
plt.ylabel('longitude')
plt.text(41.79,-70.88,'(g)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 8)
for i in np.arange(10,np.shape(day3_imp)[0]-10):
if day3_imp[i,0]==1:
plt.plot([day3_imp[i,1],day3_imp[i,4]], [day3_imp[i,2], day3_imp[i,5]], 'k-', lw=1)
if day3_imp[i,0]==2:
plt.plot(day3_imp[i,1],day3_imp[i,2],"r+",ms=5)
plt.xlabel('latitude')
plt.text(41.79,-70.88,'(h)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.subplot(3, 3, 9)
for i in range(np.shape(day3_full0)[0]-1):
plt.plot([day3_full0[i,1],day3_full0[i+1,1]], [day3_full0[i,2], day3_full0[i+1,2]], 'k-', lw=1)
plt.xlabel('latitude')
plt.text(41.79,-70.88,'(i)',fontsize = 16)
plt.xticks(np.arange(41.82, 42.03, step=0.04))
plt.yticks(np.arange(-71.11, -70.89, step=0.03))
plt.tight_layout()
plt.savefig("sim_traj.pdf")
# -
| simulations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 06 - Model Deployment
#
# The purpose of this notebook is to execute a CI/CD routine to test and deploy the trained model to `Vertex AI` as an `Endpoint` for online prediction serving. The notebook covers the following steps:
# 1. Run the test steps locally.
# 2. Execute the model deployment `CI/CD` steps using `Cloud Build`.
#
#
# ## Setup
# ### Import libraries
# +
import os
import logging
logging.getLogger().setLevel(logging.INFO)
# -
# ### Setup Google Cloud project
# +
PROJECT = '[your-project-id]' # Change to your project id.
REGION = 'us-central1' # Change to your region.
if PROJECT == "" or PROJECT is None or PROJECT == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = shell_output[0]
print("Project ID:", PROJECT)
print("Region:", REGION)
# -
# ### Set configurations
# +
VERSION = 'v01'
DATASET_DISPLAY_NAME = 'chicago-taxi-tips'
MODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}'
ENDPOINT_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier'
CICD_IMAGE_NAME = 'cicd:latest'
CICD_IMAGE_URI = f"gcr.io/{PROJECT}/{CICD_IMAGE_NAME}"
# -
# ## 1. Run CI/CD steps locally
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['MODEL_DISPLAY_NAME'] = MODEL_DISPLAY_NAME
os.environ['ENDPOINT_DISPLAY_NAME'] = ENDPOINT_DISPLAY_NAME
# ### Run the model artifact testing
# !py.test src/tests/model_deployment_tests.py::test_model_artifact -s
# ### Run create endpoint
# !python build/utils.py \
# --mode=create-endpoint\
# --project={PROJECT}\
# --region={REGION}\
# --endpoint-display-name={ENDPOINT_DISPLAY_NAME}
# ### Run deploy model
# !python build/utils.py \
# --mode=deploy-model\
# --project={PROJECT}\
# --region={REGION}\
# --endpoint-display-name={ENDPOINT_DISPLAY_NAME}\
# --model-display-name={MODEL_DISPLAY_NAME}
# ### Test deployed model endpoint
# !py.test src/tests/model_deployment_tests.py::test_model_endpoint
# ## 2. Execute the Model Deployment CI/CD routine in Cloud Build
#
# The CI/CD routine is defined in the [model-deployment.yaml](model-deployment.yaml) file, and consists of the following steps:
# 1. Load and test the the trained model interface.
# 2. Create and endpoint in Vertex AI if it doesn't exists.
# 3. Deploy the model to the endpoint.
# 4. Test the endpoint.
# ### Build CI/CD container Image for Cloud Build
#
# This is the runtime environment where the steps of testing and deploying model will be executed.
# !echo $CICD_IMAGE_URI
# !gcloud builds submit --tag $CICD_IMAGE_URI build/. --timeout=15m
# ### Run CI/CD from model deployment using Cloud Build
REPO_URL = "https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai.git" # Change to your github repo.
BRANCH = "main"
# +
SUBSTITUTIONS=f"""\
_REPO_URL='{REPO_URL}',\
_BRANCH={BRANCH},\
_CICD_IMAGE_URI={CICD_IMAGE_URI},\
_PROJECT={PROJECT},\
_REGION={REGION},\
_MODEL_DISPLAY_NAME={MODEL_DISPLAY_NAME},\
_ENDPOINT_DISPLAY_NAME={ENDPOINT_DISPLAY_NAME},\
"""
# !echo $SUBSTITUTIONS
# -
# !gcloud builds submit --no-source --config build/model-deployment.yaml --substitutions {SUBSTITUTIONS} --timeout=30m
| 06-model-deployment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## GitHub Data Analysis
#
# ## Introduction
# Every software engineer uses GitHub. Being a open source distributed version control tool, GitHub has thoundsands of new repositorys in every hour. Thus, GitHub could also be used as a huge dynamic data source to analyze technology status quo and trend.
#
# In this project, we will be looking into serveral things like who is the most popular person in certain field, what is the current hottest project and how much does different programming languages being used.
#
# ### GitHub API
#
# We will use GitHub API from [here](https://developer.github.com/v3/).
# All the API calls are using HTTPS requests and it will return in JSON format.
#
# Steps to use GitHub API:
# 1. Install `pygithub` by
# `-pip install pygithub`
# 2. Generate a GitHub Personal access token required for `GitHub API`
# 3. Test You API in local terminal using the following command. It is expected to return a list of dictionary contains your account info
# ##### - curl https://api.github.com/user\?access_token\={YOUR_TOKEN}
#
#
#
# ### NetworkX
# NetworkX is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks. It provides tools to work with large dataset with network strucutres. With NetworkX, we can load and store neyworks in standard data format easily. It can also helps us to generate classic networks, analyze network strucutre, build network models and much more.
#
# You can install `NetworkX` by `-pip install networkx`
#
# +
import sys
from github import Github
import networkx as nx
from operator import itemgetter
# Global Variables
ACCESS_TOKEN = '05bb4eb867b152be20dd11f4fa292107c839931c'
USER = 'minrk' # Define the GitHub User Name
REPO = 'findspark' # Define the Repo name
client = Github(ACCESS_TOKEN)
graph = nx.DiGraph()
# -
# ### Set Up NetworkX Graph
#
#
# After defined the user and repo name that we are going to explore, we can then set up the NetworkX graph.
#
# We will add the repo and each user who starred the repo as nodes, and build edges between them. After this, we also add edges between users and their followers.
# +
def buildRepoRelations(REPO):
user = client.get_user(USER)
repo = user.get_repo(REPO) # Get a specific repo
REPOS = user.get_repos()
stargazers = list(repo.get_stargazers()) # The list of users who starred this REPO
graph.add_node(repo.name + '(repo)', type='repo', lang=repo.language, owner=user.login)
for stargazer in stargazers:
graph.add_node(stargazer.login + '(user)', type='user')
graph.add_edge(stargazer.login + '(user)', repo.name + '(repo)', type='gazes')
# print(len(stargazers))#See if it return a correct list
return stargazers
def buildUserRelations(stargazers):
for i, stargazer in enumerate(stargazers):
followers = stargazer.get_followers()
try:
for follower in followers:
if follower.login + '(user)' in graph:
graph.add_edge(follower.login + '(user)', stargazer.login + '(user)', type='follows')
except Exception:
print("Encountered an error when finding follower for user: ", stargazer.login)
#See How many available API calls remaining
print ("API Calls Remaining", client.rate_limiting)
# -
stargazers = buildRepoRelations(REPO)
buildUserRelations(stargazers)
# ### Find Hottest User
# In this step, we use the graph initialized above to find the hottest users. The hottest user is defined as the GitHub user followed by most of the people who starred the repo we defined previously. This can also be interpreted as those who starred this repo also follows ...
#
# +
from collections import Counter
from operator import itemgetter
def getHottestUser(stargazers):
temp_list = []
for edge in graph.edges(data = True):
if edge[2]['type'] == 'follows':
temp_list.append(edge[1])
counter = Counter(temp_list)
popular_users = []
for u, f in counter.most_common():
popular_users.append((u,f))
print ("Number of popular users", len(popular_users))
print ("Top popular users:", popular_users[:10])
getHottestUser(stargazers)
# -
# The result above shows the most popular users. However, we care more about some centralities that NetworkX provided.
# #### Degree Centrality
# First, the Degree Centrality for a node v is the fraction of nodes it is connected to.
# #### Betweenness Centrality
# Also, the Betweenness Centrality compute the shortest path for nodes. It is the sum of the fraction of all-pairs shortest paths that pass through the node v.
# #### Closeness Centrality
# Lastly, the Closeness Centrality of a node u is the reciprocal of the sum of the shortest path distances from u to all n-1 other nodes. Since the sum of distances depends on the number of nodes in the graph, closeness is normalized by the sum of minimum possible distances n-1.
#
#
#
# +
def formatResult(graph):
graph_copy = graph.copy()
# Remove center node
graph_copy.remove_node('findspark(repo)')
dc = sorted(nx.degree_centrality(graph_copy).items(),
key=itemgetter(1), reverse=True)
bc = sorted(nx.betweenness_centrality(graph_copy).items(),
key=itemgetter(1), reverse=True)
cc = sorted(nx.closeness_centrality(graph_copy).items(),
key=itemgetter(1), reverse=True)
return (dc, bc, cc)
dc, bc, cc = formatResult(graph)
print ("Degree Centrality")
print (dc[:5],'\n')
print ("Betweenness Centrality")
print (bc[:5],'\n')
print ("Closeness Centrality")
print (cc[:5])
# -
# ### Find Hottest Repository
# Next, we go through each user for their starred repos and then add these repos into the network. After that, it is easy for us to get the popular repositories. Moreover, we can also get to know the language preference of one certain user.
def buildRepoNet(stargazers, limit_repo):
for i, v in enumerate(stargazers):
print(v.login)
try:
for starred in v.get_starred()[:limit_repo]: # Slice to avoid supernodes
graph.add_node(starred.name + '(repo)', type='repo', lang=starred.language, \
owner=starred.owner.login)
graph.add_edge(v.login + '(user)', starred.name + '(repo)', type='gazes')
except Exception: # ssl.SSLError:
print("Encountered an error fetching starred repos for", v.login, "Skipping.")
print("Num nodes/edges in graph", graph.number_of_nodes(), "/", graph.number_of_edges())
print(nx.info(graph), '\n')
# Sometimes a user marks too many repos and it takes a lot of time to build the net. So here the limit_repo parameter could define the maximum of the repos of one user
buildRepoNet(stargazers,5)
def getTopNRepos(n):
print("Top "+str(n)+" Popular repositories:")
repos = []
for (v, i) in graph.in_degree_iter():
if graph.node[v]['type'] == 'repo':
repos.append((v,i))
repos = sorted(repos, key = lambda x:x[1], reverse=True)
print(repos[:n])
getTopNRepos(10)
# +
def getUserPreference(username):
print("Respositories that "+ username+" has starred")
for v in graph[username+"(user)"]:
if graph[username+"(user)"][v]['type'] == 'gazes':
print(v)
print("Programming languages "+ username+" is interested in")
langs = set()
for v in graph[username+"(user)"]:
if graph[username+"(user)"][v]['type'] == 'gazes':
langs.add(graph.node[v]['lang'])
print(langs)
# -
getUserPreference('luzhijun')
| .ipynb_checkpoints/code-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="CL9d1xURYbco" colab={"base_uri": "https://localhost:8080/"} outputId="bccb6cfb-87b8-48c6-c469-fbbfa96c20c7"
import torch
if torch.cuda.is_available():
device = torch.device("cuda")
print(f'There are {torch.cuda.device_count()} GPU(s) available.')
print('Device name:', torch.cuda.get_device_name(0))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# + id="iuA88tMfzDFK" colab={"base_uri": "https://localhost:8080/"} outputId="ff9d71fb-ccb8-43ef-8e55-8425a5f8442b"
# !pip install transformers
# + id="gaWcP-P7qfiK"
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
# from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
import matplotlib.pyplot as plt
# + id="UTg_X25TrsrY" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} outputId="54b7775d-d6db-499d-d19e-37cb92646437"
# Upload the train file from your local drive
from google.colab import files
uploaded = files.upload()
# + id="LAo3B9uWKAe3" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 37} outputId="283bc685-f8dd-410a-9741-c26f6f069212"
uploaded = files.upload()
# + id="IAA4M2iYQZrf" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} outputId="a09d8c12-ee84-491c-d165-0703bc635ba7"
uploaded = files.upload()
# + id="7OngPGEJruwS"
# load trainset
df_train = pd.read_csv("stsa.binary.train.txt", header=None, sep='\n')
# load dev set
df_dev = pd.read_csv("stsa.binary.dev.txt", header=None, sep='\n')
# load testset
df_test = pd.read_csv("stsa.binary.test.txt", header=None, sep='\n')
# separate row number = 6920
# append two dataset first for ease of preprocessing
df = df_train.append(df_test)
#df = df_train.append([df_dev, df_test])
# + id="FT1nf1d6Kv0X"
# create dataframe with correct columns
df['sentence'] = df[0].astype(str).str[1:]
df['sentiment'] = df[0].astype(str).str[0]
df = df.drop(columns=[0])
# + id="-Y5JweA_J-4h" colab={"base_uri": "https://localhost:8080/", "height": 407} outputId="5a2511cf-d2c7-4656-b455-0e93b5fe0c23"
df
# + id="3cbkSa-wsUqk" colab={"base_uri": "https://localhost:8080/"} outputId="c4af7b32-f9d8-4c1e-d18a-b571dad9a553"
# check shape
df.shape
# + id="KDQEOcL-r3HT" colab={"base_uri": "https://localhost:8080/"} outputId="a22106b3-26d1-4438-a074-0c42abc435f7"
# convert sentiment to correct types
df.sentence = df.sentence.astype(str)
df.sentiment = df.sentiment.astype(int)
df.dtypes
# + id="flfithQXEzUp"
# progress check for apply function
# progress bar
# instantiate
tqdm.pandas(desc="Progress Bar")
# + id="JQ0PRQsaNEWe" colab={"base_uri": "https://localhost:8080/"} outputId="74e3ee7c-15fb-47e1-8248-34053d63c7e0"
# download nltk
import nltk
nltk.download('punkt')
nltk.download('stopwords')
# + id="KKP4UGJNEaQL"
import string
from nltk import word_tokenize
from nltk.corpus import stopwords
# remove ascii, digits and punctuations for one string
def preprocess(text):
# remove digits
text = ''.join([i for i in text if not i.isdigit()])
# remove punctuation
punctuations = list(string.punctuation)
stop_words = list(stopwords.words('english'))
text = [i for i in word_tokenize(text) if i not in punctuations and i not in stop_words]
text = ' '.join(text)
# remove non-ascii
printable = set(string.printable)
return ''.join(filter(lambda x: x in printable, text))
# + id="l2qGDjvZeKRW"
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# calculate cosine similarity of sentences by tf-idf
def get_tf_idf_query_similarity(sentences, query):
sentences = sentences.progress_apply(lambda x: preprocess(x))
query = query.progress_apply(lambda x: preprocess(x))
vectorizer = TfidfVectorizer()
docs_tfidf = vectorizer.fit_transform(sentences)
cos_sim = np.array([0])
for i, q in enumerate(query):
query_tfidf = vectorizer.transform([str(q)])
cos_sim_temp = cosine_similarity(query_tfidf, docs_tfidf)
if i == 0:
cos_sim = cos_sim_temp
else:
cos_sim = np.vstack((cos_sim, cos_sim_temp.flatten()))
return cos_sim
# + id="cUxoxlfDEupR"
#df.sentence = df.sentence.progress_apply(lambda x: preprocess(x))
# + id="bwPPybuMXbOW" colab={"base_uri": "https://localhost:8080/"} outputId="f8ed5ea6-6437-4151-ce17-a50edcbdaec2"
df.sentence
# + id="y8nVLLRtEvJr"
df.index = range(len(df))
df.sentence = df.sentence.astype(str)
# + id="QB1AZcxBE33R"
# remove items with less than four words
# df.drop(df[df.sentence.str.split().str.len() < 4].index, inplace=True)
# df.index = range(len(df))
# + id="TdAa7nHDE_zc" colab={"base_uri": "https://localhost:8080/"} outputId="43bdb5b1-2dd1-4b2c-b389-f05962b768a2"
# check number of sentiment for each class
df['sentiment'].value_counts()
# + id="Tcwx1Bcsd7bO"
sentences = df['sentence']
labels = df['sentiment']
# + id="w1B2DA-2sRNY"
# Create sentence and label lists
sentences_raw = df.sentence.values
# We need to add special tokens at the beginning and end of each sentence for BERT to work properly
labels = df.sentiment.values
# + id="qXRD8aqKsi62"
from transformers import BertTokenizer, BertForPreTraining
# import the BERT tokenizer, used to convert our text into tokens that correspond to BERT's vocabulary
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased', do_lower_case=True)
# + id="q0ihOFlRO55i"
# tokenize all sentences and see sample output
tokenized_texts = [tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sent))) for sent in sentences]
# + id="7hLiOvC7P9Cm" colab={"base_uri": "https://localhost:8080/"} outputId="084e54a6-2d42-4e28-cba8-f9860119ffbc"
# check tokenization
print("Tokenize the first sentence:")
print(tokenized_texts[0])
# + id="HYT5gWxd1_fc" colab={"base_uri": "https://localhost:8080/"} outputId="9d7bfa87-596c-42b4-e27b-13dc38e8c1bc"
len(tokenized_texts[0])
# + id="FIJmptLe6ndw"
# store length of tokenized sentences
texts_len = []
for text in tokenized_texts:
texts_len.append(len(text))
# + id="o3V7LV6F7MQ8" colab={"base_uri": "https://localhost:8080/", "height": 572} outputId="4da9842e-f18d-451c-b354-6763e261658d"
# check distribution of tokens
import seaborn as sns
import matplotlib.pyplot as plt
fig_dims = (10, 8)
fig, ax = plt.subplots(figsize=fig_dims)
sns.distplot(texts_len)
plt.xlim([0, 120])
plt.xlabel('Token count')
# + id="LN6kkZWl7Kkm" colab={"base_uri": "https://localhost:8080/"} outputId="c9bb206b-774e-4c6d-8673-a7fed81ab813"
# check max length
max(texts_len)
# + id="ImRTqIkKsoNl"
# Set the maximum sequence length
# In the original paper, the authors used a length of 512
MAX_LEN = 80
# + id="vL_sey7vnekh"
# Defining BERT tokinizer
def tokenize(sentences, tokenizer):
input_ids, input_masks, input_segments = [],[],[]
for sentence in tqdm(sentences):
inputs = tokenizer.encode_plus(sentence, add_special_tokens=True, max_length=MAX_LEN, padding='max_length',
return_attention_mask=True, return_token_type_ids=True, truncation=True)
input_ids.append(inputs['input_ids'])
input_masks.append(inputs['attention_mask'])
input_segments.append(inputs['token_type_ids'])
return np.asarray(input_ids, dtype='int32'), np.asarray(input_masks, dtype='int32'), np.asarray(input_segments, dtype='int32')
# + id="mmLCNoCxn3Te" colab={"base_uri": "https://localhost:8080/"} outputId="1072d84a-b704-4afe-a9df-42388f1eecfa"
# tokenize sentences
input_ids, attention_masks, input_segments = tokenize(sentences_raw, tokenizer)
# + id="GZkWupJgqQkZ" colab={"base_uri": "https://localhost:8080/"} outputId="96b43d30-f7e5-430e-aec6-0693fa638cd7"
input_ids[0]
# + id="DnTw-zJy5dgq" colab={"base_uri": "https://localhost:8080/"} outputId="1b93a940-a76b-479f-dca2-0fbf8d9fb53d"
attention_masks[0]
# + id="RAIaveJ_5dgs" colab={"base_uri": "https://localhost:8080/"} outputId="cb6c1c11-576a-413a-f30b-23099a7de60f"
input_segments[0]
# + id="f7JCVyz2tfkt"
train_inputs, validation_inputs, train_labels, validation_labels = input_ids[:6920], input_ids[6920:], labels[:6920], labels[6920:]
train_masks, validation_masks = attention_masks[:6920], attention_masks[6920:]
# Use train_test_split to split our data into train and validation sets for training
# train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
# random_state=42, test_size=0.1)
# train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids,
# random_state=42, test_size=0.1)
# + id="JV9O0sUttkIc"
# Convert all of our data into torch tensors
train_inputs = torch.tensor(train_inputs)
train_labels = torch.tensor(train_labels)
train_masks = torch.tensor(train_masks)
validation_inputs = torch.tensor(validation_inputs)
validation_labels = torch.tensor(validation_labels)
validation_masks = torch.tensor(validation_masks)
# + id="pq0OH0EatnQr"
# Select a batch size for training. For fine-tuning BERT on a specific task, the authors recommend a batch size of 16 or 32
batch_size = 32
# Create an iterator of data with torch DataLoader
# train set
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# val set
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# + id="xJCdIRnAF5Nb"
import torch
import torch.nn as nn
from transformers import BertModel, BertConfig, BertForSequenceClassification
# + id="imL4aP-LWeWf"
# Create the BertClassfier class
class BertForSequenceClassification(nn.Module):
"""Bert Model for Classification Tasks.
Default pooler layer is full connected (possible choices: finetune_type = 'fc', 'cnn2d')
Default finetune layer is full connected (possible choices: finetune_type = 'fc', 'lstm', 'cnn1d')
Default pooling layer is last layer (can choose self defined layer by [index of different layers]
(e.g. [-1, -2] for last two layers))
By default bert layers are not frozen, set freeze_bert to True to freeze bert layers
"""
def __init__(self, pooler_type='fc', finetune_type='fc', pooling_layers=[-1], freeze_bert=False):
super(BertForSequenceClassification, self).__init__()
# Specify hidden size of BERT, hidden size of our classifier, and number of labels
self.D_in, self.H_lstm, self.H_fc, self.D_out = 1024, 256, 1024, 2
# Instantiate BERT model
self.config = BertConfig.from_pretrained("bert-large-uncased", output_hidden_states=True)
self.bert = BertModel.from_pretrained("bert-large-uncased", config=self.config)
# pooler layer type
self.pooler_type = pooler_type
# finetune layer type
self.finetune_type = finetune_type
# define pooling layers ([-1, -2] etc.)
self.bert_layers = pooling_layers
# lstm finetune layer
self.lstm = nn.LSTM(self.D_in, self.H_lstm, num_layers=1, bidirectional=True, batch_first=True)
# cnn1d finetune layer
kernel_size_cnn1d, stride_cnn1d = 4, 2
self.cnn1d = nn.Conv1d(1, 1, kernel_size=kernel_size_cnn1d,
stride=stride_cnn1d)
# fc layer for cnn1d
fc_cnn1d_width = ((self.D_in * len(self.bert_layers) - kernel_size_cnn1d) / stride_cnn1d) + 1
#print(cnn1d_width)
self.fc_cnn1d_fc = nn.Linear((int)(fc_cnn1d_width), self.D_out)
# layernorm
#self.layerNorm = nn.LayerNorm(self.H_lstm*2, elementwise_affine = False)
# relu
self.relu = nn.ReLU()
# fc1 layers
self.fc1 = nn.Linear(self.D_in*len(self.bert_layers), self.H_fc*len(self.bert_layers))
# dropout
self.dropout = nn.Dropout(0.1)
# fc2 layer
self.fc2 = nn.Linear(self.H_fc*len(self.bert_layers), self.D_out)
# fc layer for bilstm output
self.fc_lstm = nn.Linear(self.H_lstm*2 , self.D_out)
# cnn layer
in_channels, out_channels, kernel_size, stride, padding = 1, 1, (len(self.bert_layers), 4), 4, 0
self.cnn2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding)
# W1
cnn2d_width1 = ((len(self.bert_layers)-kernel_size[0]+2*padding)/stride)+1
# W2
cnn2d_width2 = ((self.D_in-kernel_size[1]+2*padding)/stride)+1
# print(conv1_width1)
# print(conv1_width2)
# maxpooling2d
# kernel_size_pool, stride_size_pool = 3, 3
# self.mp2d = nn.MaxPool2d(kernel_size=kernel_size_pool,
# stride=stride_size_pool)
# conv1_width1 = ((conv1_width1-kernel_size_pool) / stride_size_pool) + 1
# conv1_width2 = ((conv1_width2-kernel_size_pool) / stride_size_pool) + 1
flatten_size = cnn2d_width1 * cnn2d_width2
#print(flatten_size)
# fc layer for cnn2d pooler
self.fc_cnn2d_fc = nn.Linear((int)(flatten_size), self.D_out)
# fc layer for cnn1d output
cnn2d_cnn1d_width = ((flatten_size - kernel_size_cnn1d) / stride_cnn1d) + 1
self.cnn2d_cnn1d_fc = nn.Linear((int)(cnn2d_cnn1d_width), self.D_out)
# lstm for cnn pooler
self.lstm2 = nn.LSTM((int)(flatten_size), 256, num_layers=1, bidirectional=True, batch_first=True)
self.fc_lstm2 = nn.Linear(256*2 , self.D_out)
# batchnorm
self.bn = nn.BatchNorm2d(out_channels)
# max pooling
#self.max_pool = nn.Conv2d(5)
# Freeze the BERT encoder layers
if freeze_bert:
for param in self.bert.parameters():
param.requires_grad = False
def fc_pooler(self, pooled_hidden_states):
# fc layer + tanh pooler
pooler = self.fc1(pooled_hidden_states)
pooler = torch.tanh(pooler)
return pooler
def cnn2d_pooler(self, pooled_hidden_states):
# cnn2d pooler layer
pooler = self.cnn2d(pooled_hidden_states.unsqueeze(1))
pooler = self.bn(pooler)
pooler = self.relu(pooler)
#pooler = self.mp2d(pooler)
return pooler
def fc_fc_layer(self, pooled_hidden_states):
# pool with fc layer + tanh
pooler = self.fc_pooler(pooled_hidden_states)
# fine tune layer with dropout + fc layer
finetune_out = self.dropout(pooler)
logits = self.fc2(finetune_out)
return logits
def fc_lstm_layer(self, pooled_hidden_states):
# pool with fc layer + tanh
pooler = self.fc_pooler(pooled_hidden_states)
pooler = self.dropout(pooler)
# finetune layer with dropout + bilstm layer
outputs, (ht, ct)= self.lstm(pooler.view(pooler.size()[0], -1, self.D_in))
# concatenate hidden states for bilstm
finetune_out = torch.cat([ht[0],ht[-1]],dim=1)
# dropout
#finetune_out = self.dropout(finetune_out)
# fc
logits = self.fc_lstm(finetune_out)
return logits
def fc_cnn1d_layer(self, pooled_hidden_states):
# pool with fc layer + tanh
pooler = self.fc_pooler(pooled_hidden_states)
pooler = self.dropout(pooler)
# cnn1d finetune
finetune_out = self.cnn1d(pooler.unsqueeze(1))
finetune_out = torch.flatten(finetune_out, start_dim=1)
logits = self.fc_cnn1d_fc(finetune_out)
return logits
def cnn2d_fc_layer(self, pooled_hidden_states):
# cnn pooler layer
pooler = self.cnn2d_pooler(pooled_hidden_states)
# fc finetune
pooler = torch.flatten(pooler, start_dim=1)
logits = self.fc_cnn2d_fc(pooler.view(pooler.size()[0], -1))
return logits
def cnn2d_cnn1d_layer(self, pooled_hidden_states):
# cnn pooler layer
pooler = self.cnn2d_pooler(pooled_hidden_states)
pooler = torch.flatten(pooler, start_dim=1)
# cnn1d finetune
finetune_out = self.cnn1d(pooler.unsqueeze(1))
finetune_out = torch.flatten(finetune_out, start_dim=1)
logits = self.cnn2d_cnn1d_fc(finetune_out)
return logits
def cnn2d_lstm_layer(self, pooled_hidden_states):
# cnn2d pooler layer
pooler = self.cnn2d_pooler(pooled_hidden_states)
pooler = torch.flatten(pooler, start_dim=1)
# finetune layer with dropout + bilstm layer
outputs, (ht, ct)= self.lstm2(pooler.view(pooler.size()[0], -1, pooler.size()[1]))
# concatenate hidden states for bilstm
finetune_out = torch.cat([ht[0],ht[-1]],dim=1)
# dropout
finetune_out = self.dropout(finetune_out)
# fc
logits = self.fc_lstm2(finetune_out)
return logits
def forward(self, input_ids, attention_mask):
# Feed input to BERT and get hidden states from all layers as ouput
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)[2]
# concatenate pooled hidden states
if self.pooler_type == 'cnn2d':
# Pooling by also setting masked items to zero
bert_mask = attention_mask.unsqueeze(2).unsqueeze(2)
# get layers
pooled_hidden_states = torch.stack(tuple([outputs[i] for i in self.bert_layers]), dim=2)
# Multiply output with mask to only retain non-paddding tokens
pooled_hidden_states = torch.mul(pooled_hidden_states, bert_mask)
# Get hidden states for [CLS] token
pooled_hidden_states = pooled_hidden_states[:, 0, :, :]
else:
# Pooling by also setting masked items to zero
bert_mask = attention_mask.unsqueeze(2)
# get layers
#print(tuple([outputs[i] for i in self.bert_layers]))
pooled_hidden_states = torch.cat(tuple([outputs[i] for i in self.bert_layers]), dim=-1)
# Multiply output with mask to only retain non-paddding tokens
pooled_hidden_states = torch.mul(pooled_hidden_states, bert_mask)
# Get hidden states for [CLS] token
pooled_hidden_states = pooled_hidden_states[:, 0, :]
# choose finetune/pooler layers by class input
if self.pooler_type == 'fc':
if self.finetune_type == 'fc':
logits = self.fc_fc_layer(pooled_hidden_states)
elif self.finetune_type == 'lstm':
logits = self.fc_lstm_layer(pooled_hidden_states)
elif self.finetune_type == 'cnn1d':
logits = self.fc_cnn1d_layer(pooled_hidden_states)
elif self.pooler_type == 'cnn2d':
if self.finetune_type == 'fc':
logits = self.cnn2d_fc_layer(pooled_hidden_states)
elif self.finetune_type == 'lstm':
logits = self.cnn2d_lstm_layer(pooled_hidden_states)
elif self.finetune_type == 'cnn1d':
logits = self.cnn2d_cnn1d_layer(pooled_hidden_states)
return logits
# + id="-Wd6uyHo5dg4" colab={"base_uri": "https://localhost:8080/"} outputId="78d87f3a-1165-4b5c-a0ec-e95574a77772"
# edit finetune_type and pooling_layers to produce results. bert layers are selected to be frozen for this task
model = BertForSequenceClassification(pooler_type='fc', finetune_type='cnn1d', pooling_layers=[-1], freeze_bert=False)
model.cuda()
# + id="A-Nhs5xVvNoM"
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup
# this variable contains all of the hyperparemeter information our training loop needs
epochs = 4
# create the optimizer
optimizer = AdamW(model.parameters(),
lr=2e-5,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.01
)
# total step
total_steps = len(train_dataloader) * epochs
# warmup step
num_warmup_steps = total_steps * 0.06
# scheduler
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps)
# + id="iSey9ZGBJflX"
import torch.nn.functional as F
# roc curve for logistic regression model with optimal threshold
from numpy import sqrt
from numpy import argmax
from sklearn.metrics import roc_curve
from matplotlib import pyplot
# + [markdown] id="bT7UgvUExxiO"
# # Training Function
# + colab={"base_uri": "https://localhost:8080/"} id="V0rw59dfMzUv" outputId="ff2f03ca-9d4d-43fe-a53d-a6c1cfe572b6"
# !python -m nltk.downloader punkt
# + colab={"base_uri": "https://localhost:8080/"} id="edm9bkt6Qew7" outputId="14272250-9cc8-4ff4-c673-c76a6aafa620"
pip install py-readability-metrics
# + id="eMU30Ssuppdz"
# concatenate text of sentences within corresponding indices
def concatenate_texts(sentences, indices):
tot_sentences = []
end_token = ['.', '!', '?']
for idx, sentence in enumerate(sentences):
if idx in indices:
if sentence[-1] not in end_token:
sentence = sentence + '.'
tot_sentences.append(sentence)
return tot_sentences
# + id="xD8_kg44_B9M"
import numpy as np
import time
# + id="rIwkkq60vkjs"
from sklearn.metrics import f1_score, roc_auc_score
import torch.nn.functional as F
from readability import Readability
# training function, return trained model
def train_model(model, train_dataloader, validation_dataloader, validation_labels, optimizer, scheduler, val_sentences, epoch):
t = []
loss_fn = nn.CrossEntropyLoss()
# create a list to store cosine similarity matrices
corr_cos_sims = []
incorr_cos_sims = []
# Store loss and accuracy for plotting
train_loss_set = []
# Number of training epochs (authors recommend between 2 and 4)
epochs = epoch
# trange is a tqdm wrapper around the normal python range
for _ in trange(epochs, desc="Epoch"):
# Training
# Set model to training mode (as opposed to evaluation mode)
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
# Unpack the inputs from our dataloader
b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)
b_input_ids = b_input_ids.clone().detach().to(torch.int64)
b_attn_mask = b_attn_mask.clone().detach().to(torch.int64)
# Clear out the gradients (by default they accumulate)
model.zero_grad()
# Forward pass
logits = model(b_input_ids, b_attn_mask)
# Compute loss and accumulate the loss values
loss = loss_fn(logits, b_labels)
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Clip the norm of the gradients to 1.0 to prevent "exploding gradients"
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and the learning rate
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
# Validation
# Put model in evaluation mode to evaluate loss on the validation set
model.eval()
# Tracking variables
val_accuracy = []
val_loss = []
y_preds = []
y_scores = []
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# convert to torch.Long
b_input_ids = b_input_ids.clone().detach().to(torch.int64)
b_input_mask = b_input_mask.clone().detach().to(torch.int64)
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, b_input_mask)
# Move logits and labels to CPU
# loss
loss = loss_fn(logits, b_labels)
val_loss.append(loss.item())
# get scores by softmax
y_scores.append(F.softmax(logits,dim=1)[:,1].cpu().detach().numpy())
# Get the predictions
preds = torch.argmax(logits, dim=1).flatten()
y_preds.append(preds.cpu().numpy())
# Calculate the accuracy rate
accuracy = (preds == b_labels).cpu().numpy().mean()
val_accuracy.append(accuracy)
# Compute the average accuracy and loss over the validation set.
val_loss = np.mean(val_loss)
val_accuracy = np.mean(val_accuracy)
# flatten y_preds and y_scores
y_preds = [val for sublist in y_preds for val in sublist]
y_scores = [val for sublist in y_scores for val in sublist]
# compare val labels/preds and get their corresponding indices
corr_idx = []
incorr_idx = []
bool_arr = np.equal(np.array(validation_labels), np.array(y_preds))
for idx, val in enumerate(bool_arr):
if val == True:
corr_idx.append(idx)
else:
incorr_idx.append(idx)
# calculate readability for correct vs. incorrect sentences
corr_sentences = concatenate_texts(val_sentences, corr_idx)
incorr_sentences = concatenate_texts(val_sentences, incorr_idx)
# calculate similarity
cos_sim_corr = get_tf_idf_query_similarity(pd.Series(corr_sentences), pd.Series(corr_sentences))
cos_sim_incorr = get_tf_idf_query_similarity(pd.Series(incorr_sentences), pd.Series(incorr_sentences))
corr_cos_sims.append(cos_sim_corr)
incorr_cos_sims.append(cos_sim_incorr)
# print("\ntf-idf cosine similarity for correctly classified sentences: {}".format(cos_sim_corr))
# print("\ntf-idf cosine similarity for ubcorrectly classified sentences: {}".format(cos_sim_incorr))
# calculate readability scores
corr_sentences = ''.join(str(s) for s in corr_sentences)
incorr_sentences = ''.join(str(s) for s in incorr_sentences)
r = Readability(corr_sentences)
dc = r.dale_chall()
gf = r.gunning_fog()
f = r.flesch()
print("\nDale Chall readability score for correctly classified sentences: {}".format(dc.score))
print("Gunning Fog readability score for correctly classified sentences: {}".format(gf.score))
print("Flesch readability score for correctly classified sentences: {}".format(f.score))
r = Readability(incorr_sentences)
dc = r.dale_chall()
gf = r.gunning_fog()
f = r.flesch()
print("\nDale Chall readability score for incorrectly classified sentences: {}".format(dc.score))
print("Gunning Fog readability score for incorrectly classified sentences: {}".format(gf.score))
print("Flesch readability score for incorrectly classified sentences: {}".format(f.score))
# print evaluation results
print("\nValidation Accuracy: {}".format(val_accuracy))
print("Validation Loss: {}".format(val_loss))
print("Validation ROC AUC: {}".format(roc_auc_score(validation_labels, y_scores)))
print("Validation F1 score: {}".format(f1_score(validation_labels, y_preds)))
return model, corr_cos_sims, incorr_cos_sims
# + [markdown] id="XSDrK2E6SD_A"
# # Output
# + id="SVfPWyhmYPIm" colab={"base_uri": "https://localhost:8080/"} outputId="067c540e-9aa1-4361-edd7-8e4bb635a8d3"
# pooler: cnn, finetune layer: bilstm, pooled layers: [-1, -2, -3, -4] hidden states
# get validation sentences from validation set indices
val_sentences = df.sentence[:6920]
epoch = 4
model, corr_cos_sims, incorr_cos_sims = train_model(model, train_dataloader, validation_dataloader, validation_labels, optimizer, scheduler, val_sentences, epoch)
# + id="_X1JZiQbnQKl"
# calculate the sum of under diagonal similarity
def calc_mean_sim(corr_cos_sims, incorr_cos_sims, indice):
n, m= corr_cos_sims[indice].shape
m = np.tril_indices(n=n, k=-1, m=m)
print(f'corr similarity of under diagonal mean: {corr_cos_sims[indice][m].mean()}')
n, m= incorr_cos_sims[indice].shape
m = np.tril_indices(n=n, k=-1, m=m)
print(f'incorr similarity of under diagonal mean: {incorr_cos_sims[indice][m].mean()}')
# + colab={"base_uri": "https://localhost:8080/"} id="QQXeCfMux_zG" outputId="87510488-b17b-4b41-8c46-001bbb4e25b5"
calc_mean_sim(corr_cos_sims, incorr_cos_sims, 0)
# + id="BctZlN-7yTxc"
# plot similarity matrices
import seaborn as sns; sns.set_theme()
def plot_sim(corr_cos_sims, incorr_cos_sims, indice):
plt.subplots(figsize = (15, 12))
ax1 = sns.heatmap(corr_cos_sims[indice])
ax1.set_title('corr similarity')
plt.subplots(figsize = (15, 12))
ax2 = sns.heatmap(incorr_cos_sims[indice])
ax2.set_title('incorr similarity')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="qtmVr_NBzILk" outputId="2f2e361e-b66e-4f3b-d1c2-c0213a5965ed"
plot_sim(corr_cos_sims, incorr_cos_sims, 0)
# + id="Lg-ypGb1RDC7"
# !ps -aux|grep python
# + id="JA_m8AksjxAe"
# !kill -9 1919 2790 2871 2873
# + id="yCBptQOL7t_0"
from google.colab import drive
drive.mount('/content/drive')
# + id="_ZNF4Gdmgun7"
model_save_name = 'roberta_large_finetuned'
path = F"/content/drive/My Drive/Bert_saved/{model_save_name}"
torch.save(model.state_dict(), path)
# + id="Y0Z0Dxj0gvQ7"
# check parameters
for param in model.parameters():
print(param)
# + id="1W27XZ62eDnz"
torch.cuda.empty_cache()
# + id="Wglvb8UYjLBM"
del model
# + id="NbxoHrR3zDdT" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="0eeea5f0-07ea-4290-93f9-901709997b08"
# !nvidia-smi
| Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import nose.tools
# Write your imports here
# # Data Tidying and Cleaning Lab
# ## Reading, tidying and cleaning data. Preparing data for exploration, mining, analysis and learning
# ### Problem 1. Read the dataset (2 points)
# The dataset [here](http://archive.ics.uci.edu/ml/datasets/Auto+MPG) contains information about fuel consumption in different cars.
#
# Click the "Data Folder" link and read `auto_mpg.data` into Python. You can download it, if you wish, or you can read it directly from the link.
#
# Give meaningful (and "Pythonic") column names, as per the `auto_mpg.names` file:
# 1. mpg
# 2. cylinders
# 3. displacement
# 4. horsepower
# 5. weight
# 6. acceleration
# 7. model_year
# 8. origin
# 9. car_name
# + deletable=false nbgrader={"checksum": "01dd7404c375d7c55e078528f4f2e82a", "grade": false, "grade_id": "read_data", "locked": false, "schema_version": 1, "solution": true}
mpg_data = None
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "2ce6158989415e079a009ae021e4fa62", "grade": true, "grade_id": "read_data_tests", "locked": true, "points": 2, "schema_version": 1, "solution": false}
nose.tools.assert_is_not_none(mpg_data)
# -
# Print the first 4 rows in the dataset to get a feel of what it looks like:
# + deletable=false nbgrader={"checksum": "95870dca1942307927d17b24f8058909", "grade": false, "grade_id": "cell-80f1e6004aaafef8", "locked": false, "schema_version": 1, "solution": true}
# YOUR CODE HERE
raise NotImplementedError()
# -
# ### Problem 2. Inspect the dataset (1 point)
# Write a function which accepts a dataset and returns the number of observations and features in it, like so:
#
# ``` 10 observations on 15 features```
#
# Where 10 and 15 should be replaced with the real numbers. Test your function with the `auto_mpg` dataset.
#
# Make sure the function works with other datasets (don't worry about "1 features" or "1 observations", just leave it as it is).
# + deletable=false nbgrader={"checksum": "b1d7dcc8748a015d88620eaaa5c9f954", "grade": false, "grade_id": "get_shape_function", "locked": false, "schema_version": 1, "solution": true}
def observations_and_features(dataset):
"""
Returns the number of observations and features in the provided dataset
"""
observations = None
features = None
# YOUR CODE HERE
raise NotImplementedError()
return "{} observations on {} features".format(observations, features)
# + deletable=false editable=false nbgrader={"checksum": "f7dbfd9dc8c499bfd5a29ea97c4ff14f", "grade": true, "grade_id": "get_shape_function_tests", "locked": true, "points": 1, "schema_version": 1, "solution": false}
print(observations_and_features(mpg_data))
# -
# Inspect the data types for each column.
# + deletable=false nbgrader={"checksum": "2104930892a2916289f265b192e17f8f", "grade": false, "grade_id": "cell-152f652655c53f2a", "locked": false, "schema_version": 1, "solution": true}
# YOUR CODE HERE
raise NotImplementedError()
# -
# ### Problem 3. Correct errors (1 point)
# The `horsepower` column looks strange. It's a string but it must be a floating-point number. Find out why this is so and convert it to floating-point number.
# + deletable=false nbgrader={"checksum": "752895e02c8832dd852dde6ec3f15782", "grade": false, "grade_id": "convert_to_numeric", "locked": false, "schema_version": 1, "solution": true}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "67c159bf5ec29e072929da20b161b75a", "grade": true, "grade_id": "convert_to_numeric_tests", "locked": true, "points": 1, "schema_version": 1, "solution": false}
nose.tools.assert_equal(mpg_data.horsepower.dtype, "float64")
# -
# ### Problem 4. Missing values: inspection (1 point)
# We saw that the `horsepower` column contained null values. Display the rows which contain those values. Assign the resulting dataframe to the `unknown_hp` variable.
# + deletable=false nbgrader={"checksum": "0753f2d418958209cba57f14e3ca1394", "grade": false, "grade_id": "unknown_hp", "locked": false, "schema_version": 1, "solution": true}
def get_unknown_hp(dataframe):
"""
Returns the rows in the provided dataframe where the "horsepower" column is NaN
"""
unknown_hp = None
# YOUR CODE HERE
raise NotImplementedError()
return unknown_hp
# + deletable=false editable=false nbgrader={"checksum": "de0f3eb13e9fd31c82611031b77e3993", "grade": true, "grade_id": "unknown_hp_tests", "locked": true, "points": 1, "schema_version": 1, "solution": false}
cars_with_unknown_hp = get_unknown_hp(mpg_data)
print(cars_with_unknown_hp)
# -
# ### Problem 5. Missing data: correction (1 point)
# It seems like the `NaN` values are a small fraction of all values. We can try one of several things:
# * Remove them
# * Replace them (e.g. with the mean power of all cars)
# * Look up the models on the internet and try our best guess on the power
#
# The third one is probably the best but the first one will suffice since these records are too few. Remove those values. Save the dataset in the same `mpg_data` variable. Ensure there are no more `NaN`s.
# + deletable=false nbgrader={"checksum": "e6c2d5f7577105ee6e010482c29c6f94", "grade": false, "grade_id": "remove_nulls", "locked": false, "schema_version": 1, "solution": true}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "6e7e2f4e6fefe2cc58221893b5d7b3aa", "grade": true, "grade_id": "remove_nulls_test", "locked": true, "points": 1, "schema_version": 1, "solution": false}
nose.tools.assert_equal(len(get_unknown_hp(mpg_data)), 0)
# -
# ### Problem 6. Years of production (1 + 1 points)
# Display all unique model years. Assign them to the variable `model_years`.
# + deletable=false nbgrader={"checksum": "8ba2235d4a8f83ea9434fc90a1ddc80a", "grade": false, "grade_id": "model_years", "locked": false, "schema_version": 1, "solution": true}
def get_unique_model_years(dataframe):
"""
Returns the unique values of the "model_year" column
of the dataframe
"""
model_years = None
# YOUR CODE HERE
raise NotImplementedError()
return model_years
# + deletable=false editable=false nbgrader={"checksum": "4c94cfe872ceb02a22837ccfe4703449", "grade": true, "grade_id": "model_years_test", "locked": true, "points": 1, "schema_version": 1, "solution": false}
model_years = get_unique_model_years(mpg_data)
print(model_years)
# -
# These don't look so good. Convert them to real years, like `70 -> 1970, 71 -> 1971`. Replace the column values in the dataframe.
# + deletable=false nbgrader={"checksum": "f147ac3f6d2a1eb4de54e68b6e9f4ad4", "grade": false, "grade_id": "model_year", "locked": false, "schema_version": 1, "solution": true}
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "aa8901ac15f7ada7c47953896674e4ce", "grade": true, "grade_id": "model_year_test", "locked": true, "points": 1, "schema_version": 1, "solution": false}
model_years = get_unique_model_years(mpg_data)
print(model_years)
# -
# ### Problem 7. Exploration: low-power cars (1 point)
# The data looks quite good now. Let's try some exploration.
#
# Write a function to find the cars which have the smallest number of cylinders and print their model names. Return a list of car names.
# + deletable=false nbgrader={"checksum": "49553dd5a9ef2cea7c1501bff02f8827", "grade": false, "grade_id": "car_names", "locked": false, "schema_version": 1, "solution": true}
def get_model_names_smallest_cylinders(dataframe):
"""
Returns the names of the cars with the smallest number of cylinders
"""
car_names = None
# YOUR CODE HERE
raise NotImplementedError()
return car_names
# + deletable=false editable=false nbgrader={"checksum": "5bb4f01801d149605589d8e5bdec056f", "grade": true, "grade_id": "car_names_test", "locked": true, "points": 1, "schema_version": 1, "solution": false}
car_names = get_model_names_smallest_cylinders(mpg_data)
print(car_names)
nose.tools.assert_true(car_names.shape == (4,) or car_names.shape == (4, 1))
# -
# ### Problem 8. Exploration: correlations (1 point)
# Finally, let's see some connections between variables. These are also called **correlations**.
#
# Find how to calculate correlations between different columns using `pandas`.
#
# **Hint:** The correlation function in `pandas` returns a `DataFrame` by default. You need only one value from it.
#
# Create a function which accepts a dataframe and two columns and prints the correlation coefficient between those two columns.
# + deletable=false nbgrader={"checksum": "83c635c1652bb22eae247fe4db073fd8", "grade": false, "grade_id": "correlation", "locked": false, "schema_version": 1, "solution": true}
def calculate_correlation(dataframe, first_column, second_column):
"""
Calculates and returns the correlation coefficient between the two columns in the dataframe.
"""
correlation = None
# YOUR CODE HERE
raise NotImplementedError()
return correlation
# + deletable=false editable=false nbgrader={"checksum": "95da71238c62b1ceaf48f316ce32d747", "grade": true, "grade_id": "cell-457c5946f2350991", "locked": true, "points": 1, "schema_version": 1, "solution": false}
hp_weight = calculate_correlation(mpg_data, "horsepower", "weight")
print("Horsepower:Weight correlation coefficient:", hp_weight)
nose.tools.assert_almost_equal(hp_weight, 0.864537737574, delta = 0.01)
| 02.Data_Science/02.Data_Tidying_and_Cleaning/Data Tidying and Cleaning Lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Monetary Economics: Chapter 6
# ### Preliminaries
# +
# This line configures matplotlib to show figures embedded in the notebook,
# instead of opening a new window for each figure. More about that later.
# If you are using an old version of IPython, try using '%pylab inline' instead.
# %matplotlib inline
from pysolve3.model import Model
from pysolve3.utils import is_close,round_solution
import matplotlib.pyplot as plt
# -
# ### Model OPENM3
# +
def create_openm3_model():
model = Model()
model.set_var_default(0)
model.var('BcbN', desc='Bills held by the Central Bank in Country N')
model.var('BcbS', desc='Bills held by the Central Bank in Country S')
model.var('BhN', desc='Bills held by households, Country N')
model.var('BhS', desc='Bills held by households, Country S')
model.var('BsN', desc='Supply of government bills in Country N')
model.var('BsS', desc='Supply of government bills in Country S')
model.var('CN', desc='Consumption, Country N')
model.var('CS', desc='Consumption, Country S')
model.var('HhN', desc='Cash held by households, Country N')
model.var('HhS', desc='Cash held by households, Country S')
model.var('HsN', desc='Supply of cash in Country N')
model.var('HsS', desc='Supply of cash in Country S')
model.var('IMN', desc='Imports, Region N')
model.var('IMS', desc='Imports, Region S')
model.var('ORN', desc='Gold holding by Central bank in Country N')
model.var('ORS', desc='Gold holding by Central bank in Country S')
model.var('PgN', desc='Price of gold in Country N')
model.var('PgS', desc='Price of gold in Country S')
model.var('RN', desc='Interest rate on bills in Country N')
model.var('RS', desc='Interest rate on bills in Country S')
model.var('TN', desc='Tax payments, Country N')
model.var('TS', desc='Tax payments, Country S')
model.var('VN', desc='Household wealth, Country N')
model.var('VS', desc='Household wealth, Country S')
model.var('XN', desc='Exports, Country N')
model.var('XS', desc='Exports, Country S')
model.var('XR', desc='Exchange rate (units of currency S for one unit of currency N)')
model.var('YN', desc='National income, Country N')
model.var('YS', desc='National income, Country S')
model.var('YDN', desc='National disposable income, Country N')
model.var('YDS', desc='National disposable income, Country S')
model.var('alpha1N', desc='Propensity to consume out of income, Country N')
model.var('alpha1S', desc='Propensity to consume out of income, Country S')
model.set_param_default(0)
model.param('alpha10N', desc='Propensity to consume out of income, Country N, exogenous')
model.param('alpha10S', desc='Propensity to consume out of income, Country S, exogenous')
model.param('alpha2N', desc='Propensity to consume out of wealth, Country N')
model.param('alpha2S', desc='Propensity to consume out of wealth, Country S')
model.param('iotaN', desc='Parameter linking the propensity to consume to the interest rate for Country N')
model.param('iotaS', desc='Parameter linking the propensity to consume to the interest rate for Country S')
model.param('lambda0N', desc='Parameter in asset demand function, Country N')
model.param('lambda0S', desc='Parameter in asset demand function, Country S')
model.param('lambda1N', desc='Parameter in asset demand function, Country N')
model.param('lambda1S', desc='Parameter in asset demand function, Country S')
model.param('lambda2N', desc='Parameter in asset demand function, Country N')
model.param('lambda2S', desc='Parameter in asset demand function, Country S')
model.param('muN', desc='Import propensity, Country N')
model.param('muS', desc='Import propensity, Country S')
model.param('phiN', desc='Parameter in fiscal policy reaction function, Country N')
model.param('phiS', desc='Parameter in fiscal policy reaction function, Country S')
model.param('thetaN', desc='Tax rate in Country N')
model.param('thetaS', desc='Tax rate in Country S')
model.param('GN', desc='Government expenditure, Region N')
model.param('GS', desc='Government expenditure, Region S')
model.param('Pgbar', desc='Price of gold, set exogenously')
model.param('XRbar', desc='Exchange rate, set exogenously')
model.add('YN = CN + GN + XN - IMN')
model.add('YS = CS + GS + XS - IMS')
model.add('IMN = muN * YN')
model.add('IMS = muS * YS')
model.add('XN = IMS/XR')
model.add('XS = IMN*XR')
model.add('YDN = YN - TN + RN(-1)*BhN(-1)')
model.add('YDS = YS - TS + RS(-1)*BhS(-1)')
model.add('TN = thetaN * (YN + RN(-1)*BhN(-1))')
model.add('TS = thetaS * (YS + RS(-1)*BhS(-1))')
model.add('VN - VN(-1) = YDN - CN')
model.add('VS - VS(-1) = YDS - CS')
model.add('CN = alpha1N*YDN + alpha2N*VN(-1)')
model.add('CS = alpha1S*YDS + alpha2S*VS(-1)')
model.add('HhN = VN - BhN')
model.add('HhS = VS - BhS')
model.add('BhN = VN*(lambda0N + lambda1N*RN - lambda2N*(YDN/VN))')
model.add('BhS = VS*(lambda0S + lambda1S*RS - lambda2S*(YDS/VS))')
model.add('BsN - BsN(-1) = (GN + RN(-1)*BsN(-1)) - (TN + RN(-1)*BcbN(-1))')
model.add('BsS - BsS(-1) = (GS + RS(-1)*BsS(-1)) - (TS + RS(-1)*BcbS(-1))')
model.add('BcbN = BsN - BhN')
model.add('BcbS = BsS - BhS')
model.add('ORN - ORN(-1)= (HsN - HsN(-1) - (BcbN - BcbN(-1)))/PgN')
model.add('ORS - ORS(-1)= (HsS - HsS(-1) - (BcbS - BcbS(-1)))/PgS')
model.add('HsN = HhN')
model.add('HsS = HhS')
model.add('PgN = Pgbar')
model.add('PgS = PgN*XR')
model.add('XR = XRbar')
model.add('RN = RN(-1) - phiN*((ORN(-1) - ORN(-2))*PgN(-1))/ORN(-1)')
model.add('RS = RS(-1) - phiS*((ORS(-1) - ORS(-2))*PgS(-1))/ORS(-1)')
model.add('alpha1N = alpha10N - iotaN*RN(-1)')
model.add('alpha1S = alpha10S - iotaS*RS(-1)')
return model
openm3_parameters = {'alpha10N': 0.6125,
'alpha10S': 0.7125,
'alpha2N': 0.4,
'alpha2S': 0.3,
'iotaN': 0.5,
'iotaS': 0.5,
'lambda0N': 0.635,
'lambda0S': 0.67,
'lambda1N': 5,
'lambda1S': 6,
'lambda2N': 0.01,
'lambda2S': 0.07,
'muN': 0.18781,
'muS': 0.18781,
'phiN': 0.005,
'phiS': 0.005,
'thetaN': 0.2,
'thetaS': 0.2}
openm3_exogenous = {'Pgbar': 1,
'GN': 20,
'GS': 20,
'XRbar': 1}
openm3_variables = {'BcbN': 11.622,
'BcbS': 11.622,
'BhN': 64.865,
'BhS': 64.865,
'BsN': 76.486,
'BsS': 76.486,
'ORN': 10,
'ORS': 10,
'VN': 86.487,
'VS': 86.486,
'HhN': 86.487 - 64.865,
'HhS': 86.486 - 64.865,
'HsN': 86.487 - 64.865,
'HsS': 86.486 - 64.865,
'RN': 0.025,
'RS': 0.025,
'PgN': 1,
'PgS': 1,
'XR': 1}
# -
# ### Scenario: Model OPENM3, increase in propensity to import of country S
# +
muS = create_openm3_model()
muS.set_values(openm3_parameters)
muS.set_values(openm3_exogenous)
muS.set_values(openm3_variables)
# run to convergence
# Give the system more time to reach a steady state
for _ in range(15):
muS.solve(iterations=100, threshold=1e-6)
# shock the system
muS.set_values({'muS': 0.2})
for _ in range(40):
muS.solve(iterations=100, threshold=1e-6)
# -
# ###### Figure 6.16
# +
caption = '''
Figure 6.16 Evolution of interest rates, following an increase in the South propensity
to import, with interest rates acting on propensities to consume and reacting to changes
in gold reserves'''
rndata = [s['RN'] for s in muS.solutions[5:]]
rsdata = [s['RS'] for s in muS.solutions[5:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(-0.008, 0.05)
axes.plot(rndata, linestyle='-', color='r')
axes.plot(rsdata, linestyle='--', color='b')
# add labels
plt.text(22, 0.044, 'South interest rate')
plt.text(32, 0.023, 'North interest rate')
fig.text(0.1, -.1, caption);
# -
# ###### Figure 6.17
# +
caption = '''
Figure 6.17 Evolution of trade accounts and government balances, following
an increase in the South propensity to import, with interest rates acting
on propensities to consume and reacting to changes in gold reserves'''
tradeNdata = list()
tradeSdata = list()
govtNdata = list()
govtSdata = list()
for i in range(6, len(muS.solutions)):
s = muS.solutions[i]
s_1 = muS.solutions[i-1]
tradeNdata.append(s['XN'] - s['IMN'])
tradeSdata.append(s['XS'] - s['IMS'])
govtNdata.append(s['TN'] - (s['GN'] + s['RN']*s_1['BhN']))
govtSdata.append(s['TS'] - (s['GS'] + s['RS']*s_1['BhS']))
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(-1.6, 1.5)
axes.plot(tradeNdata, linestyle='-', color='k')
axes.plot(govtNdata, linestyle=':', color='r', linewidth=3)
axes.plot(tradeSdata, linestyle='--', color='g')
axes.plot(govtSdata, linestyle='-.', color='b', linewidth=2)
# add labels
plt.text(11, 0.8, 'North trade account')
plt.text(12.5, 0.2, 'North government')
plt.text(12.5, 0.1, 'account')
plt.text(33, -0.6, 'South trade account')
plt.text(29, -1.2, 'South government account')
fig.text(0.1, -.1, caption);
# -
| godley_&_lavoie/Python 3 - Chapter 6 Model OPENM3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Determiner2 Adjective3 Adjective6 Adposition7 Trigger_Rule
Verb1 -> "avait"
Determiner2 -> "un"
Adjective3 -> "\\w+"
Adjective6 -> "négatif"
Adposition7 -> "pour"
Trigger_Rule -> "|forward|trigger|negated|10|Group[557]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Determiner2 Adjective3 Adjective6 Adposition7 Trigger_Rule
Verb1 -> "a"
Determiner2 -> "un"
Adjective3 -> "\\w+"
Adjective6 -> "négatif"
Adposition7 -> "pour"
Trigger_Rule -> "|forward|trigger|negated|10|Group[557]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Auxiliary2 Adjective3 Trigger_Rule
Auxiliary1 -> "a"
Auxiliary2 -> "été"
Adjective3 -> "négatif"
Trigger_Rule -> "|backward|trigger|negated|10|Group[561]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Auxiliary2 Verb3 Trigger_Rule
Auxiliary1 -> "a"
Auxiliary2 -> "été"
Verb3 -> "exclu" | "refusé" | "repoussé" | "rejeté" | "éliminé" | "proscrit"
Trigger_Rule -> "|backward|trigger|negated|10|Group[561, 563]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary2 Adverb3 Verb4 Determiner5 Trigger_Rule
Auxiliary2 -> "n'a"
Adverb3 -> "pas"
Verb4 -> "eu"
Determiner5 -> "de"
Trigger_Rule -> "|forward|trigger|negated|10|Group[569]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Determiner2 Adjective3 Adjective6 Adposition7 Trigger_Rule
Verb1 -> "avoir"
Determiner2 -> "un"
Adjective3 -> "\\w+"
Adjective6 -> "négatif"
Adposition7 -> "pour"
Trigger_Rule -> "|forward|trigger|negated|10|Group[571]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Auxiliary2 Verb3 Trigger_Rule
Auxiliary1 -> "ont"
Auxiliary2 -> "été"
Verb3 -> "écartés" | "rejeter" | "éliminer" | "évincer" | "supprimer" | "proscrire" | "exclure" | "éloigner" | "côté" | "enlever" | "récuser" | "gouverner" | "mis à l'écart"
Trigger_Rule -> "|backward|trigger|negated|10|Group[573]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Pronoun1 Auxiliary2 Verb3 Adposition4 Trigger_Rule
Pronoun1 -> "il"
Auxiliary2 -> "a"
Verb3 -> "continué" | "reconduire" | "perpétuer" | "conserver" | "suivre" | "tenir" | "se poursuivre" | "donner suite" | "se perpétuer" | "s'acharner" | "s'obstiner" | "entretenir" | "perdurer" | "opiniâtrer" | "entêter"
Adposition4 -> "à"
Trigger_Rule -> "|forward|termination|negated|10|Group[575]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Determiner1 Noun2 Trigger_Rule
Determiner1 -> "son"
Noun2 -> "vieux" | "vétuste" | "usé" | "passé" | "fatigué" | "séculaire" | "éloigné" | "historique" | "usagé" | "périmé" | "vieilli" | "vieil"
Trigger_Rule -> "|forward|termination|negated|10|Group[576]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adjective2 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Adjective2 -> "physique"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adposition2 Noun3 Adjective4 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Adposition2 -> "de"
Noun3 -> "plainte" | "gémissement" | "lamentation" | "protestation" | "reproche" | "plainte"
Adjective4 -> "principale"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Coordinating_conjunction2 Adjective3 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Coordinating_conjunction2 -> "et"
Adjective3 -> "physique"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Coordinating_conjunction2 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Coordinating_conjunction2 -> "et"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adposition2 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Adposition2 -> "pour"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Coordinating_conjunction2 Noun3 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Coordinating_conjunction2 -> "et"
Noun3 -> "examen" | "analyse" | "consultation" | "observation" | "vérification" | "recherche" | "étude" | "auscultation" | "examen médical" | "autopsie" | "dépistage" | "interrogatoire"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adposition2 Noun4 Adjective5 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Adposition2 -> "de"
Noun4 -> "la maladie" | "la malaise" | "la mal" | "la trouble" | "l'indisposition" | "la souffrance" | "la syndrome" | "l'infirmité" | "l'incommodité" | "l'atteinte" | "la tare" | "la altération" | "la pathologie" | "la traumatisme" | "la récidive"
Adjective5 -> "actuelle"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Verb2 Trigger_Rule
Noun1 -> "histoire" | "passé" | "souvenir" | "historique"
Verb2 -> "prenant"
Trigger_Rule -> "|both|pseudo|historical|30|Group[578]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun3 Trigger_Rule
Noun3 -> "#l'histoire" | "#passé" | "#souvenir" | "#l'historique"
Trigger_Rule -> "|forward|trigger|historical|30|Group[586]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Trigger_Rule
Adverb1 -> "toutefois"
Trigger_Rule -> "|forward|termination|negated|10|Group[587]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Trigger_Rule
Noun1 -> "ho"
Trigger_Rule -> "|forward|trigger|historical|30|Group[588]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adposition1 Trigger_Rule
Adposition1 -> "hx"
Trigger_Rule -> "|forward|trigger|historical|30|Group[589]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Subordinating_conjunction1 Adjective2 Trigger_Rule
Subordinating_conjunction1 -> "si"
Adjective2 -> "négatif"
Trigger_Rule -> "|both|pseudo|conditional|30|Group[590]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Subordinating_conjunction1 Trigger_Rule
Subordinating_conjunction1 -> "si"
Trigger_Rule -> "|forward|trigger|conditional|30|Group[591]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adposition1 Pronoun2 Trigger_Rule
Adposition1 -> "en"
Pronoun2 -> "elle"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[592]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adposition1 Determiner2 Trigger_Rule
Adposition1 -> "dans"
Determiner2 -> "son"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[593]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Trigger_Rule
Noun1 -> "autrefois"
Trigger_Rule -> "|both|trigger|historical|30|Group[594]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adposition1 Determiner2 Noun3 Adposition4 Trigger_Rule
Adposition1 -> "dans"
Determiner2 -> "le"
Noun3 -> "contexte" | "situation" | "circonstance" | "cadre" | "conjoncture" | "ambiance" | "atmosphère" | "condition"
Adposition4 -> "de"
Trigger_Rule -> "|forward|termination|negated|10|Group[595]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adjective1 Adposition2 Trigger_Rule
Adjective1 -> "incompatible"
Adposition2 -> "avec"
Trigger_Rule -> "|forward|trigger|negated|10|Group[596]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Trigger_Rule
Noun1 -> "indication" | "avertissement" | "prescription" | "directive" | "annotation" | "explication" | "renvoi" | "information" | "note" | "recommandation" | "critère" | "notation" | "suggestion" | "mention" | "symptôme"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[598]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Adjective2 Trigger_Rule
Auxiliary1 -> "est"
Adjective2 -> "négatif"
Trigger_Rule -> "|backward|trigger|negated|10|Group[599]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Adverb2 Trigger_Rule
Auxiliary1 -> "est"
Adverb2 -> "neg"
Trigger_Rule -> "|backward|trigger|negated|10|Group[601]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb2 Adverb3 Trigger_Rule
Verb2 -> "n'est"
Adverb3 -> "plus"
Trigger_Rule -> "|backward|trigger|negated|10|Group[603]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Adverb3 Trigger_Rule
Verb1 -> "n'est"
Adverb3 -> "pas"
Trigger_Rule -> "|forward|trigger|negated|10|Group[605]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Verb2 Trigger_Rule
Auxiliary1 -> "est"
Verb2 -> "exclu" | "refusé" | "repoussé" | "rejeté" | "éliminé" | "proscrit"
Trigger_Rule -> "|backward|trigger|negated|10|Group[607]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Verb2 Trigger_Rule
Auxiliary1 -> "est"
Verb2 -> "arrêté" | "stopper" | "enrayer" | "contenir" | "suspendre" | "juguler" | "terminer" | "finir" | "endiguer" | "cesser" | "barrer" | "empêcher" | "interrompre" | "mettre fin" | "geler"
Trigger_Rule -> "|backward|trigger|negated|10|Group[609]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Adposition2 Noun3 Adposition4 Trigger_Rule
Auxiliary1 -> "est"
Adposition2 -> "à"
Noun3 -> "exclure" | "éliminer" | "rejeter" | "proscrire" | "éloigner" | "supprimer" | "radier"
Adposition4 -> "pour"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[611]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Adposition2 Noun3 Trigger_Rule
Auxiliary1 -> "est"
Adposition2 -> "à"
Noun3 -> "exclure" | "éliminer" | "rejeter" | "proscrire" | "éloigner" | "supprimer" | "radier"
Trigger_Rule -> "|backward|trigger|uncertain|30|Group[612]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adposition2 Trigger_Rule
Noun1 -> "manque" | "insuffisance" | "défaut" | "déficience" | "pénurie" | "carence" | "privation" | "lacune" | "omission" | "manquement" | "défaillance" | "rareté" | "oubli" | "faute" | "faiblesse"
Adposition2 -> "de"
Trigger_Rule -> "|forward|trigger|negated|10|Group[615]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Trigger_Rule
Verb1 -> "manquait" | "oublier" | "rater" | "fausser" | "déchoir" | "gâcher" | "omettre" | "enfreindre" | "faillir" | "être absent" | "avoir disparu" | "être en défaut" | "être dénué" | "être dépourvu" | "être disparu"
Trigger_Rule -> "|forward|trigger|negated|10|Group[617]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Ph1 Adjective2 Noun5 Trigger_Rule
Ph1 -> "il y a"
Adjective2 -> "\\> 0"
Noun5 -> "année" | "années" | "an" | "annuités" | "ans" | "annualité" | "semaine"
Trigger_Rule -> "|backward|trigger|historical|30|Group[619]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adjective2 Trigger_Rule
Noun1 -> "l'hiver" |"l'été" |"le printemps" |"Septembre" |"octobre" |"novembre" |"mai" |"dernier mars" |"juin" |"juillet" |"janvier" |"février" |"l'automne" |"décembre" |"août" |"avril"
Adjective2 -> "dernier"
Trigger_Rule -> "|backward|trigger|historical|30|Group[626]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adjective2 Adposition3 Trigger_Rule
Noun1 -> "contributeurs"
Adjective2 -> "probables"
Adposition3 -> "à"
Trigger_Rule -> "|forward|termination|negated|10|Group[642]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Adposition2 Noun3 Adposition4 Trigger_Rule
Adverb1 -> "probablement"
Adposition2 -> "en"
Noun3 -> "cas" | "situation" | "événement" | "possibilité" | "éventualité"
Adposition4 -> "de"
Trigger_Rule -> "|forward|termination|negated|10|Group[642]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Adposition2 Trigger_Rule
Adverb1 -> "probablement"
Adposition2 -> "de"
Trigger_Rule -> "|forward|termination|negated|10|Group[642]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adjective2 Adposition3 Trigger_Rule
Noun1 -> "composante" | "élément" | "ingrédient" | "constituante" | "facteur"
Adjective2 -> "probable"
Adposition3 -> "de"
Trigger_Rule -> "|forward|termination|negated|10|Group[642]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Adverb2 Trigger_Rule
Verb1 -> "reflétant" | "exprimer" | "renvoyer" | "indiquer" | "marquer" | "traduire" | "incarner"
Adverb2 -> "probablement"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[646]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adjective1 Trigger_Rule
Adjective1 -> "probable"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[647]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Adverb2 Trigger_Rule
Verb1 -> "chercher" | "scruter" | "sonder" | "consulter" | "essayer" | "explorer" | "rechercher" | "examiner" | "fouiller" | "prospecter" | "interroger" | "découvrir" | "analyser" | "aller chercher" | "considérer"
Adverb2 -> "tout"
Trigger_Rule -> "|forward|trigger|conditional|30|Group[648]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Trigger_Rule
Verb1 -> "chercher" | "scruter" | "sonder" | "consulter" | "essayer" | "explorer" | "rechercher" | "examiner" | "fouiller" | "prospecter" | "interroger" | "découvrir" | "analyser" | "aller chercher" | "considérer"
Trigger_Rule -> "|forward|trigger|conditional|30|Group[648]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Trigger_Rule
Adverb1 -> "nettement"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[650]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Verb3 Noun6 Trigger_Rule
Verb1 -> "peut"
Auxiliary2 -> "être"
Verb3 -> "\\w+"
Noun6 -> "sous-estimé" | "minimisé" | "minoré" | "mésestimé" | "déconsidéré" | "méprisé" | "décrié" | "dévalué" | "décrédité" | "discrédité" | "déprécié" | "méjugé" | "dévalorisé" | "sous-évalué"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[651]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Verb2 Trigger_Rule
Auxiliary1 -> "peut"
Verb2 -> "contribuer" | "collaborer" | "concourir" | "coopérer" | "participer" | "servir" | "seconder" | "favoriser" | "agir" | "tendre" | "avoir part" | "prendre part"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[652]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Adjective2 Adposition3 Trigger_Rule
Adverb1 -> "peut-être"
Adjective2 -> "dû"
Adposition3 -> "à"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[653]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Verb3 Trigger_Rule
Verb1 -> "peut"
Auxiliary2 -> "être"
Verb3 -> "démasquer" | "découvrir" | "montrer" | "révéler" | "dévoiler" | "démontrer" | "trahir" | "deviner" | "lever le masque" | "dénicher" | "déceler" | "dépister" | "débusquer" | "détecter"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[654]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Verb3 Adposition4 Trigger_Rule
Verb1 -> "peut"
Auxiliary2 -> "être"
Verb3 -> "lié" | "connexe" | "relié" | "imbriqué" | "solidaire" | "analogique" | "conjoint" | "attaché" | "inhérent" | "familier" | "allier" | "rattaché" | "coordonné" | "adjoint" | "assujetti"
Adposition4 -> "à"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[654]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Verb3 Trigger_Rule
Verb1 -> "peut"
Auxiliary2 -> "être"
Verb3 -> "sous-estimé" | "minimisé" | "minoré" | "mésestimé" | "déconsidéré" | "méprisé" | "décrié" | "dévalué" | "décrédité" | "discrédité" | "déprécié" | "méjugé" | "dévalorisé" | "sous-évalué"
Trigger_Rule -> "|both|pseudo|uncertain|30|Group[654]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Trigger_Rule
Verb1 -> "peut"
Auxiliary2 -> "être"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[659]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Verb2 Trigger_Rule
Auxiliary1 -> "peut"
Verb2 -> "représenter" | "symboliser" | "décrire" | "montrer" | "reproduire" | "dépeindre" | "figurer" | "dessiner" | "peindre" | "exposer" | "présenter" | "signifier" | "exhiber" | "évoquer" | "désigner"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[659]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adjective1 Verb2 Trigger_Rule
Adjective1 -> "puis-je"
Verb2 -> "avoir"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[659]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Auxiliary1 Auxiliary2 Auxiliary3 Verb4 Adposition5 Trigger_Rule
Auxiliary1 -> "peut"
Auxiliary2 -> "avoir"
Auxiliary3 -> "été"
Verb4 -> "précédé" | "devancer" | "annoncer" | "prévenir" | "distancé" | "annoncé" | "devancé" | "amené" | "préludé"
Adposition5 -> "par"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[659]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Trigger_Rule
Adverb1 -> "doucement"
Trigger_Rule -> "|forward|termination|negated|10|Group[666]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Trigger_Rule
Adverb1 -> "doux"
Trigger_Rule -> "|forward|termination|negated|10|Group[666]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Determiner2 Adjective3 Adposition6 Trigger_Rule
Verb1 -> "surveiller" | "veiller" | "inspecter" | "examiner" | "suivre" | "vérifier" | "avoir à l'oeil" | "être à l'affût" | "superviser" | "faire attention"
Determiner2 -> "le"
Adjective3 -> "\\w+"
Adposition6 -> "pour"
Trigger_Rule -> "|forward|trigger|conditional|30|Group[672]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Adjective2 Adposition5 Trigger_Rule
Verb1 -> "surveiller" | "veiller" | "inspecter" | "examiner" | "suivre" | "vérifier" | "avoir à l'oeil" | "être à l'affût" | "superviser" | "faire attention"
Adjective2 -> "\\w+"
Adposition5 -> "pour"
Trigger_Rule -> "|forward|trigger|conditional|30|Group[672]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Verb3 Adposition4 Trigger_Rule
Verb1 -> "doit"
Auxiliary2 -> "être"
Verb3 -> "exclu" | "refusé" | "repoussé" | "rejeté" | "éliminé" | "proscrit"
Adposition4 -> "pour"
Trigger_Rule -> "|forward|trigger|uncertain|30|Group[678]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Verb1 Auxiliary2 Verb3 Trigger_Rule
Verb1 -> "doit"
Auxiliary2 -> "être"
Verb3 -> "exclu" | "refusé" | "repoussé" | "rejeté" | "éliminé" | "proscrit"
Trigger_Rule -> "|backward|trigger|uncertain|30|Group[679]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adverb1 Trigger_Rule
Adverb1 -> "non"
Trigger_Rule -> "|forward|trigger|negated|10|Group[680]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Adposition2 Trigger_Rule
Noun1 -> "nég"
Adposition2 -> "pour"
Trigger_Rule -> "|forward|trigger|negated|10|Group[682]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Noun1 Trigger_Rule
Noun1 -> "nég."
Trigger_Rule -> "|backward|trigger|negated|10|Group[684]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
# +
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
cfg_grammar= """
S -> Adjective1 Adposition2 Trigger_Rule
Adjective1 -> "négatif"
Adposition2 -> "pour"
Trigger_Rule -> "|forward|trigger|negated|10|Group[686]|PRE-VALIDATION"
"""
for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):
print(' '.join(sentence))
| notebooks-pre-validators/Validation_Notebook_301_400.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # School Analysis
#
# Trend 1 - School size seems to be the most importnat factor for student success, Charter schools appear to perform better however, that seems to be a function of siz not budget.
#
# Trend 2 - Reading scores are higher than math scores regardless of whether it is a Charter school or a District school, they are also consistant between grades.
#
# Trend 3 - Average passing rate was the highest for the schools that have Budget Per Student less than 610, there seems to be no correlation between spending and student success.
# +
# Import Dependencies
import pandas as pd
# Create path to the schools csv and read them into the schools data frame
csv_schools = "Input Files/02-Homework_04-Pandas_PyCitySchools_Resources_schools_complete.csv"
schools_df = pd.read_csv(csv_schools)
schools_df.head()
# +
#Create path to the students csv and read them into a students data frame
csv_students = "Input Files/02-Homework_04-Pandas_PyCitySchools_Resources_students_complete.csv"
students_df = pd.read_csv(csv_students)
students_df["math_score"] = pd.to_numeric(students_df["math_score"])
students_df["reading_score"] = pd.to_numeric(students_df["reading_score"])
students_df.head()
# -
# # District Summary
#
#
# ### Create a high level snapshot (in table form) of the district's key metrics, including:
#
# - Total Schools
# - Total Students
# - Total Budget
# - Average Math Score
# - Average Reading Score
# - % Passing Math
# - % Passing Reading
# - Overall Passing Rate (Average of the above two)
# +
district_schools=schools_df.loc[schools_df["type"]=="District",:]
total_schools = district_schools["type"].count()
total_students=district_schools["size"].sum()
total_budget=district_schools["budget"].sum()
district_schools_list = district_schools["school_name"].unique()
district_students=students_df.loc[students_df["school_name"].isin(district_schools_list)]
avg_math_score = district_students["math_score"].mean()
avg_reading_score = district_students["reading_score"].mean()
passing_math_df = district_students.loc[district_students["math_score"] > 59, :]
passing_math= passing_math_df["math_score"].count()
passing_reading_df = district_students.loc[district_students["reading_score"] > 59, :]
passing_reading = passing_reading_df["reading_score"].count()
perc_passing_math = (passing_math/(district_students["student_name"].count()))*100
perc_passing_reading = (passing_reading/(district_students["student_name"].count()))*100
ovrall_pass_rate= (perc_passing_math+perc_passing_reading)/2
district_summary = pd.DataFrame({"Metric":["Total Schools","Total Students","Total Budget","Average Math Score","Average Reading Score","% Passing Math","% Passing Reading","Overall Passing Rate"],
"Value":[total_schools,total_students,total_budget,"{0:.2f}".format(avg_math_score),"{0:.2f}".format(avg_reading_score),"{0:.2f}".format(perc_passing_math),"{0:.2f}".format(perc_passing_reading),"{0:.2f}".format(ovrall_pass_rate)]})
district_summary
# -
# # School Summary
#
#
# ### Create an overview table that summarizes key metrics about each school, including:
#
#
# - School Name
# - School Type
# - Total Students
# - Total School Budget
# - Per Student Budget
# - Average Math Score
# - Average Reading Score
# - % Passing Math
# - % Passing Reading
# - Overall Passing Rate (Average of the above two)
# +
school_summary=schools_df[["school_name","type","size","budget"]]
school_summary=school_summary.rename(columns={"school_name":"School Name","type":"Type","size": "Number of Students","budget":"Budget"})
std_budget=schools_df["budget"]/schools_df["size"]
school_summary["Per Student Budget"]=std_budget
avg_math_scores =[]
avg_reading_scores = []
perc_pass_math=[]
perc_pass_reading=[]
for name in school_summary["School Name"]:
test_df=students_df.loc[students_df["school_name"] == name]
avg_math_scores.append(test_df["math_score"].mean())
avg_reading_scores.append(test_df["reading_score"].mean())
pass_math_df = test_df.loc[test_df["math_score"] > 59, :]
pass_math= pass_math_df["math_score"].count()-1
perc_pass_math.append((pass_math/(test_df["student_name"].count()))*100)
pass_reading_df = test_df.loc[test_df["reading_score"] > 59, :]
pass_reading= pass_reading_df["reading_score"].count()-1
perc_pass_reading.append((pass_reading/(test_df["student_name"].count()))*100)
school_summary["Average Math Score"]=avg_math_scores
school_summary["Average Reading Score"]=avg_reading_scores
school_summary["% Passing Math"]= perc_pass_math
school_summary["% Passing Reading"]=perc_pass_reading
overall_passing_rate = (school_summary["% Passing Math"] + school_summary["% Passing Reading"])/2
school_summary["Overall Passing Rate"]= overall_passing_rate
school_summary.head(15)
# -
# # Top Performing Schools (By Passing Rate)
#
#
# ### Create a table that highlights the top 5 performing schools based on Overall Passing Rate. Include:
#
#
# - School Name
# - School Type
# - Total Students
# - Total School Budget
# - Per Student Budget
# - Average Math Score
# - Average Reading Score
# - % Passing Math
# - % Passing Reading
# - Overall Passing Rate (Average of the above two)
sorted_school_summary = school_summary.sort_values("Overall Passing Rate", ascending=False)
sorted_school_summary.head(5)
# # Bottom Performing Schools (By Passing Rate)
#
#
# - Create a table that highlights the bottom 5 performing schools based on Overall Passing Rate. Include all of the same metrics as above
sorted_school_summary = school_summary.sort_values("Overall Passing Rate")
sorted_school_summary.head(5)
# # Math Scores by Grade**
#
#
# - Create a table that lists the average Math Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
# +
students_df = students_df.rename(columns={"grade":"Grade"})
by_grade = students_df.groupby(["Grade"])
by_grade = by_grade.mean()
math_by_grade = by_grade.rename(columns={"math_score":"Math Score"})
del math_by_grade["Student ID"]
del math_by_grade["reading_score"]
math_by_grade.head()
# -
# # Reading Scores by Grade
#
#
# - Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
# +
reading_by_grade = by_grade.rename(columns={"reading_score":"Reading Score"})
del reading_by_grade["Student ID"]
del reading_by_grade["math_score"]
reading_by_grade.head()
# -
# # Scores by School Spending
#
#
# - Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
#
# - Average Math Score
# - Average Reading Score
# - % Passing Math
# - % Passing Reading
# - Overall Passing Rate (Average of the above two)
# +
budget_summary = school_summary.loc[:,["Per Student Budget","Average Math Score","Average Reading Score","% Passing Math","% Passing Reading","Overall Passing Rate"]]
budget_bins = [570, 590, 610, 630, 650, 670]
budget_names = [">570 and <590", ">590 and <610", ">610 and <630", ">630 and <650",">650 and <670"]
budget_summary["Per Student Budget Range"] = pd.cut(budget_summary["Per Student Budget"], budget_bins, labels=budget_names)
del budget_summary["Per Student Budget"]
budget_summary_grouped = budget_summary.groupby("Per Student Budget Range")
budget_summary_grouped.mean()
# -
# # Scores by School Size
#
#
# - Repeat the above breakdown, but this time group schools based on a reasonable approximation of school size (Small, Medium, Large).
# +
size_summary = school_summary.loc[:,["Number of Students","Average Math Score","Average Reading Score","% Passing Math","% Passing Reading","Overall Passing Rate"]]
size_bins = [400, 2000, 3500, 5000]
size_names = ["Small","Medium","Large"]
size_summary["School Size"] = pd.cut(size_summary["Number of Students"], size_bins, labels=size_names)
del size_summary["Number of Students"]
size_summary_grouped = size_summary.groupby("School Size")
size_summary_grouped.mean()
# -
# # Scores by School Type
#
#
# - Repeat the above breakdown, but this time group schools based on school type (Charter vs. District).
# +
type_summary = school_summary.loc[:,["Type","Average Math Score","Average Reading Score","% Passing Math","% Passing Reading","Overall Passing Rate"]]
type_summary_grouped = type_summary.groupby("Type")
type_summary_grouped.mean()
# -
| Academy of Py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import FactorAnalysis
import pandas as pd
import numpy as np
# PCA’s approach to data/dimension reduction is to create one or more index variables from a larger set of measured variables. It does this using a linear combination (basically a weighted average) of a set of variables. The created index variables are called <b>components</b>. components maximize the total variance
# <img src = "PCA.png">. image source = https://www.theanalysisfactor.com
iris = load_iris()
X = iris.data
Y = iris.target
cols = [s[:12].strip() for s in iris.feature_names]
cols
# Perform Scaling on the Data. This means that we need to center and scale the data.
#This way the average value of each record would be 0 and the variance for each record would be 1
X = StandardScaler().fit_transform(X)
# +
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(X)
p_Dataframe = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2'])
# -
p_Dataframe.head(n=2)
# <b>explained_variance_ratio_</b> attribute provides quantification (in percentage) of the informative value of each extracted component. Higher percentage indicates better retention
# +
print('Explained variance by each component: %s'
% pca.explained_variance_ratio_)
# -
np.sum([0.72962445, 0.22850762])
new_Df = pd.concat([p_Dataframe,pd.DataFrame(Y,columns=['target'])], axis=1)
new_Df.head()
# A <b>Factor Analysis</b> is a model of the measurement of a latent variable. This latent variable cannot be directly measured with a single variable. Instead, it is seen through the relationships it causes in a set of Y variables. The new variable are called <b>factors</b>. Factors maximize the shared portion of the variance. F - the factor is causing response on 4 variables.
# <img src = "factor.png"/>image source = https://www.theanalysisfactor.com
factor = FactorAnalysis(n_components=4).fit(X)
# +
import pandas as pd
print(pd.DataFrame(factor.components_, columns=cols))
# -
# Interpret the numbers as correlation. At the intersection of each factor and feature, a positive number indicates that a positive proportion exists between the two; a negative number points out that they diverge and that one is contrary to the other.
# ### Choosing between PCA or Factor analysis
# 1. If the objective is to just reduce the dimension then use PCA
# 2. Use factor analysis if the objective to uncover hidden factors in the data
# ### PCA Application
# #### face classification with PCA
from sklearn.datasets import fetch_olivetti_faces
dataset = fetch_olivetti_faces(shuffle=True,random_state=101)
print(dataset.DESCR)
# +
X_train = dataset.data[:350,:]
X_test = dataset.data[350:,:]
Y_train = dataset.target[:350]
Y_test = dataset.target[350:]
# +
n_components = 25
pca = PCA(svd_solver='randomized', n_components=n_components, whiten=True)
pca.fit(X_train)
# -
# The resulted decomposition uses 25 components which is about 80% of information help in 4096 features
print(f'Explained variance by {n_components} components: {np.sum(pca.explained_variance_ratio_)}')
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print(X_train_pca.shape, X_test_pca.shape)
# #### Building the classifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
from sklearn.svm import SVC
# +
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'),
param_grid, cv=5, iid=False)
clf = clf.fit(X_train_pca, Y_train)
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# +
print("Predicting classes on the test set")
Y_pred = clf.predict(X_test_pca)
print(accuracy_score(Y_test, Y_pred))
# -
| Chapter 02/Hello PCA-Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
import sys
from skimage.filters import threshold_otsu
from skimage.morphology import disk
from skimage.morphology import dilation
from PIL import Image
import pytesseract
import os
from resturant_menu import resturant_menu_expert
resturant_menu_expert('../img/menu2.jpg' ,4)
resturant_menu_expert('../img/menu2.jpg' ,4)
resturant_menu_expert('../img/hand.jpg' ,4)
resturant_menu_expert('../img/hand1.jpg' ,4)
orig_img = cv2.imread('../img/hand_rot.jpg' ,0)
plt.figure(figsize=(15,15))
plt.imshow(orig_img ,cmap='gray')
resturant_menu_expert('../img/hand_rot.jpg' ,4)
| src/examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# +
class ActorCritic(nn.Module):
def __init__(self, state_size, action_size, seed = 0):
super(ActorCritic, self).__init__()
self.hidden = 32
self.actor = nn.Sequential(
nn.Linear(state_size, self.hidden),
nn.Tanh(),
nn.Linear(self.hidden, self.hidden),
nn.Tanh(),
nn.Linear(self.hidden, action_size),
nn.Tanh())
self.critic = nn.Sequential(
nn.Linear(state_size, self.hidden),
nn.Tanh(),
nn.Linear(self.hidden, self.hidden),
nn.Tanh(),
nn.Linear(self.hidden, 1),
nn.Tanh())
self.std = nn.Parameter(torch.zeros(action_size)) #returns an array of action_size for continous zero
def forward(self, obs):
mean = self.actor(obs)
v = self.critic(obs)
dist = torch.distributions.Normal(mean, F.softplus(self.std))
return (v,dist)
actorcritic = ActorCritic(6,3).to(device)
print(actorcritic)
# -
import gym
env = gym.make('Acrobot-v1')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
d[0]
| Examples/Experiment_network/.ipynb_checkpoints/ppo network-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 2 - Linear Regression
# +
import sys
sys.path.append("../")
from utils import *
np.random.seed(17)
# -
# ## RSS Visualization
# +
vals = np.linspace(-5, 5, 100)
xx, yy = np.meshgrid(vals, vals)
z = xx**2 + yy**2
fig = make_subplots(rows=1, cols=2, specs=[[{'type': 'scatter'}, {'type': 'scene'}]])
fig.add_traces(data = [
go.Contour(z=z, colorscale='Electric', showscale=False),
go.Surface(x = vals, y=vals, z=z, opacity=.8, colorscale='Electric', contours=dict(z=dict(show=True)))],
rows=[1,1], cols=[1,2])
fig.update_layout(width=800, height=300, scene_aspectmode="cube", scene=dict(camera = dict(eye=dict(x=-1.5, y=-1.5, z=.2))))
fig.write_image(f"../rss.png")
fig.show()
# -
# ## Polynomial Fitting
# +
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 30)
y_ = response(x)
polynomial_degree = 8
frames, preds = [], []
for _ in range(10):
y = y_ + np.random.normal(scale=2, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(polynomial_degree), LinearRegression()).fit( x.reshape(-1, 1), y).predict( x.reshape(-1, 1))
preds.append(y_hat)
frames.append(go.Frame(
data=[
go.Scatter(x=x, y=y_, mode="markers+lines", name="Real Points", marker=dict(color="black", opacity=.7)),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7)),
go.Scatter(x=x, y=y_hat, mode="markers+lines", name="Predicted Points", marker=dict(color="blue", opacity=.7))],
layout=go.Layout(title_text=rf"$\text{{Polynomial Fitting of Degree {polynomial_degree} - Sample Noise }}\mathcal{{N}}\left(0,2\right)$",
xaxis={"title": r"$x$"},
yaxis={"title": r"$y$", "range":[-6,10]}) ))
mean_pred, var_pred = np.mean(preds, axis=0), np.var(preds, axis=0)
for i in range(len(frames)):
frames[i]["data"] = (go.Scatter(x=x, y=mean_pred, mode="markers+lines", name="Mean Prediction", line=dict(dash="dash"), marker=dict(color="green", opacity=.7)),
go.Scatter(x=x, y=mean_pred-2*var_pred, fill=None, mode="lines", line=dict(color="lightgrey"), showlegend=False),
go.Scatter(x=x, y=mean_pred+2*var_pred, fill='tonexty', mode="lines", line=dict(color="lightgrey"), showlegend=False),) + frames[i]["data"]
fig = go.Figure(data=frames[0]["data"],
frames=frames[1:],
layout=go.Layout(
title=frames[0]["layout"]["title"],
xaxis=frames[0]["layout"]["xaxis"],
yaxis=frames[0]["layout"]["yaxis"],
updatemenus=[dict(visible=True,
type="buttons",
buttons=[dict(label="Play",
method="animate",
args=[None, dict(frame={"duration":1000}) ])])] ))
animation_to_gif(fig, f"../poly-deg{polynomial_degree}-diff-samples.gif", 1000)
fig.show()
| code examples/Chapter 2 - Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
regex101
# ### RegEx
#
# https://docs.python.org/3/library/re.html
#
# Regular expressions provide a flexible way to search or match (often more complex)
# string patterns in text. A single expression, commonly called a regex, is a string
# formed according to the regular expression language. Python’s built-in re module is
# responsible for applying regular expressions to strings
#
# Functions:
#
# - `findall` Returns a list containing all matches
# - `search` Returns a Match object if there is a match anywhere in the string. If there is more than one match, only the first occurrence of the match will be returned.
# - `split` Returns a list where the string has been split at each match
# - `sub` Replaces one or many matches with a string
import re
alphanumeric = "4298fsfsDFGHv012rvv21v9"
# +
#use findall to pull out the letters only
re.findall("[A-z]", alphanumeric)
# -
#findall using a known pattern can be used to pull pertinent information out of a text value
text = "Sian <EMAIL>"
pattern = r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}'
regex = re.compile(pattern, flags=re.IGNORECASE) #ignore the case of A-Z
regex.findall(text)
#using findall to split out the parts of the email address by amending the pattern with ()
text = "Sian <EMAIL>"
pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
regex = re.compile(pattern, flags=re.IGNORECASE) #ignore the case of A-Z
regex.findall(text)
my_string = "Kosta likes climbing. Kosta is a great TA so he also loves data"
# +
# return all occurrances of 'Kosta' using re.findall()
re.findall("Kosta ", my_string)
# +
# use re.sub() to replace "TA" by "Triceratops Alligator"
my_string = re.sub("TA", "Triceratops Alligator", my_string)
# -
my_string
x = re.search("ove", my_string)
print(x)
x = re.search(r"\bT\w+", my_string)
print(x.span())
print(x.group())
multiples= "ear hand foot knee"
#use split with \s+ to comile and then split the passed text around the spaces
re.split('\s+', multiples)
# **The Match object** has properties and methods used to retrieve information about the search, and the result:
#
# - `.span()` returns a tuple containing the start-, and end positions of the match.
# - `.string` returns the string passed into the function
# - `.group()` returns the part of the string where there was a match
# ### Special Sequences
# \A Returns a match if the specified characters are at the beginning of the string "\AThe"
# \b Returns a match where the specified characters are at the beginning or at the end of a word r"\bain"
# r"ain\b"
# \B Returns a match where the specified characters are present, but NOT at the beginning (or at the end) of a word r"\Bain"
# r"ain\B"
# \d Returns a match where the string contains digits (numbers from 0-9) "\d"
# \D Returns a match where the string DOES NOT contain digits "\D"
# \s Returns a match where the string contains a white space character "\s"
# \S Returns a match where the string DOES NOT contain a white space character "\S"
# \w Returns a match where the string contains any word characters (characters from a to Z, digits from 0-9, and the underscore _ character) "\w"
# \W Returns a match where the string DOES NOT contain any word characters "\W"
# \Z Returns a match if the specified characters are at the end of the string "Spain\Z"
strings = ["there was a dog and there was a cat",
"if you capitalize this part of the string you will be in trouble",
"this is the end of the string"]
# Use a special sequence to capitalize the strings above without getting into trouble
for string in strings:
print(re.sub("\At", "T", string))
quotes = ["work hard all day, all days",
"There are 3 types of people: those who can count and those who can't",
"Nice to be nice",
"Some people feel the rain, others just get wet",
"could you complete the exercise? wow"
]
#what will this capitalise?
for i in range(len(quotes)):
quotes[i]= re.sub("\sw"," W", quotes[i])
quotes
#what will this capitalise?
for quote in quotes:
print(re.sub(r"\bw","W",quote))
# use a special sequence to find the numbers in the string
some_nums = "I have had 3 coffees this morning and I plan to drink 7 more"
re.findall("\d", some_nums)
# ### `+`One or more occurrences
# +
# use re.sub() together with + to fix the occurrance of too many whitespaces
spaces = "I have too many spaces"
re.sub(" +", " ", spaces)
# -
# ### `^`- Starts with
# print all veggies that start with a
veggies = ["tomato", "potato", "apple juice",
"pear", "asparagus are tasty", "peach"]
for veg in veggies:
print(re.findall(r"^a\S*", veg))
| code/Regex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME> - Healthcare edition
# ### Building a classifier using the [fastai](https://www.fast.ai/) library
from fastai.tabular import *
#hide
path = Path('./covid19_ml_education')
df = pd.read_csv(path/'covid_ml.csv')
df.head(3)
# ## Independent variable
#
# This is the value we want to predict
y_col = 'urgency_of_admission'
# ## Dependent variable
#
# The values on which we can make a prediciton
cat_names = ['sex', 'cough', 'fever', 'chills', 'sore_throat', 'headache', 'fatigue']
cat_names = ['sex', 'cough', 'fever', 'headache', 'fatigue']
cont_names = ['age']
#hide
procs = [FillMissing, Categorify, Normalize]
#hide
test = TabularList.from_df(df.iloc[660:861].copy(), path = path, cat_names= cat_names, cont_names = cont_names)
data = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs = procs)
.split_by_rand_pct(0.2)
.label_from_df(cols=y_col)
# .add_test(test)
.databunch() )
data.show_batch(rows=5)
# ## Model
#
# Here we build our machine learning model that will learn from the dataset to classify between patients
# ### Using Focal Loss
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
# -
learn = tabular_learner(data, layers = [150,50], \
metrics = [accuracy,FBeta("macro")])
learn.load('150-50-focal')
learn.loss_func = FocalLoss()
#hide
learn.fit_one_cycle(5, 1e-4, wd= 0.2)
learn.save('150-50-focal')
learn.export('150-50-focal.pth')
#hide
testdf = df.iloc[660:861].copy()
testdf.urgency.value_counts()
testdf.head()
testdf = testdf.iloc[:,1:]
#hide
testdf.insert(0, 'predictions','')
#hide
for i in range(len(testdf)):
row = testdf.iloc[i][1:]
testdf.predictions.iloc[i] = str(learn.predict(row)[0])
# ### Making predictions
#
# We've taken out a test set to see how well our model works, by making predictions on them.
#
# Interestingly, all those predicted with 'High' urgency have a common trait of absence of **chills** and **sore throat**
testdf.urgency.value_counts()
testdf.predictions.value_counts()
from sklearn.metrics import classification_report
print(classification_report(testdf.predictions, testdf.urgency, labels = ["High", "Low"]))
print(classification_report(testdf.predictions, testdf.urgency, labels = ["High", "Low"]))
testdf = pd.read_csv('processed_over_test.csv')
testdf = testdf.iloc[:,1:]
testdf.head()
yesnomapper = {1:'Yes', 0: 'No'}
for col in testdf.columns[2:-1]:
testdf[col]= testdf[col].map(yesnomapper
testdf['sex'] = testdf['sex'].map({1: 'male', 0:'female'})
testdf['urgency'] = testdf['urgency'].map({0:'Low', 1:'High'})
from sklearn.metrics import confusion_matrix
cm_test = confusion_matrix(testdf.urgency, testdf.predictions)
cm_test
cm_test = np.array([[72, 51], [18,27]])
cm_test
cm_test2 = np.array([[94, 29],[30,15]])
df_cm
# +
import seaborn as sn
import pandas as pd
fig, ax = plt.subplots()
fig.set_size_inches(7,5)
df_cm = pd.DataFrame(cm_test2, index = ['Actual Low','Actual High'],
columns = ['Predicted Low','Predicted High'])
sns.set(font_scale=1.2)
sn.heatmap(df_cm, annot=True, ax = ax)
ax.set_ylim([0,2]);
ax.set_title('Deep Model Confusion Matrix')
fig.savefig('DeepModel_CM.png')
# -
# ## Profile after focal loss
# +
import seaborn as sns
import pandas as pd
fig, ax = plt.subplots()
fig.set_size_inches(7,5)
df_cm = pd.DataFrame(cm_test, index = ['Actual Low','Actual High'],
columns = ['Predicted Low','Predicted High'])
sns.set(font_scale=1.2)
sns.heatmap(df_cm, annot=True, ax = ax)
ax.set_ylim([0,2]);
ax.set_title('Deep Model Confusion Matrix (with Focal Loss)');
fig.savefig('DeepModel_CM_Focal Loss.png')
# +
import seaborn as sns
import pandas as pd
fig, ax = plt.subplots()
fig.set_size_inches(7,5)
df_cm = pd.DataFrame(cm_test, index = ['Actual Low','Actual High'],
columns = ['Predicted Low','Predicted High'])
sns.set(font_scale=1.2)
sns.heatmap(df_cm, annot=True, ax = ax)
ax.set_ylim([0,2]);
ax.set_title('Deep Model Confusion Matrix (with Focal Loss)');
# -
testdf.head()
row = testdf.iloc[0]
round(float(learn.predict(row[1:-1])[2][0]),5)
# ## Experimental Section
#
# Trying to figure out top
for i in range(len(testdf)):
row = testdf.iloc[i][1:]
testdf.probability.iloc[i] = round(float(learn.predict(row[1:-1])[2][0]),5)
testdf.head()
testdf.sort_values(by=['probability'],ascending = False, inplace = True)
# +
#
cumulative lift gain
baseline model - test 20%
# +
Cost based affection
Give kits only top 20%
Profiling them:
How you can get the probs?
# Decile?
subsetting your group - divide 100 people into ten equal groups
descending order of probability
profile them: see features (prediction important features)
top 20 vs rest 80
Descriptive statistics (count, mean, median, average)
How are they different? (see a big distinction top 20 top 80)
figure out what is happening
questions:
lift curve
# +
#
1. GET PROBABILITIES
2. MAKE DECILES
3. MAKE CURVE
4. PROFILING (feature selection - HOW ARE THEY BEHAVING??)
Optional:
Work with different thresholds
# -
Confusion matrix to risk matrix (cost what minimizes - risk utility matrix)
import scikitplot as skplt
y2 = y2.urgency
fig, ax = plt.subplots()
fig.set_size_inches(8, 4)
skplt.metrics.plot_cumulative_gain(y_true = testdf.urgency, y_probas= predicted_probas, ax=ax)
# plt.savefig('lift_curve.png')
df['decile1'] = pd.qcut(df['pred_prob'].rank(method='first'), 10, labels=np.arange(10, 0, -1))
# +
lr_predicted_probas = []
for i in range(len(lr_df)):
iprob = lr_df.iloc[i,0]
lr_predicted_probas.append([round(iprob,4), round(1 - iprob,4)])
# -
pickle_in = open('lg_predictions.pkl', 'rb')
lr_df = pickle.load(pickle_in)
plt.style.available
plt.style.use('classic')
# +
fig, ax = plt.subplots(figsize = (10,6))
classes = np.unique(np.array(testdf.urgency))
percentages, gains1 = skplt.metrics.cumulative_gain_curve(np.array(testdf.urgency), predicted_probas[:,0], classes[0])
percentages, gains2 = skplt.metrics.cumulative_gain_curve(np.array(lr_df.urgency), lr_predictions[:,0], classes[0])
ax.plot(percentages, gains1, lw=3, label = f'{classes[0]} Urgency - Deep Model')
ax.plot(percentages, gains2, lw=3, label = f'{classes[0]} Urgency - Logistic Regression')
ax.set_xlim([0.0,1.0])
ax.set_ylim([0.0,1.0])
ax.plot([0,1],[0,1], 'k--', lw =2, label = 'Baseline')
ax.grid('off')
ax.set_xlabel('Percentage of sample', fontsize=20)
ax.set_ylabel('Gain', fontsize=20)
ax.legend(loc = 'lower right', fontsize = 15)
ax.set_title("Cumulative Gains Curve -'High Urgency' classification", fontsize = 20)
fig.savefig('Cumulative Gains Chart')
# -
os_data_y = testdf.urgency
os_data_y = os_data_y.to_frame()
os_data_y = os_data_y.urgency.map({'High':1, 'Low':0})
os_data_y = np.array(os_data_y)
# ## Profiling
testdf.head()
testdf['decile'] = pd.qcut(testdf.probability, q = 5, labels= False)
testdf[testdf.decile >2].fever.value_counts()
| UnivAiBlog/CoVID-49.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Now You Code 2: Is That An Email Address?
#
# Let's use Python's built-in string functions to write our own function to detect if a string is an email address.
#
# The function `isEmail(text)` should return `True` when `text` is an email address, `False` otherwise.
#
# For simplicity's sake we will define an email address to be any string with just ONE `@` symbol in it, where the `@` is not at the beginning or end of the string. So `a@b` is considered an email (even though it really isn't).
#
# The program should detect emails until you enter quit.
#
# Sample run:
# ```
# Email address detector. Type quit to exit.
# Email: <EMAIL>
# <EMAIL> ==> email
# Email: mafudge@
# mafudge@ ==> NOT EMAIL
# Email: mafudge
# mafudge ==> NOT EMAIL
# Email: @syr.edu
# @syr.edu ==> NOT EMAIL
# Email: @
# @ ==> NOT EMAIL
# Email: <EMAIL>
# <EMAIL> ==> NOT EMAIL
# Email: <EMAIL>
# <EMAIL> ==> NOT EMAIL
# ```
#
# Once again we will use the problem simplification technique to writing this program.
#
# First we will write the `isEmail(text)` function, then we will write the main program.
#
# ## Step 1: Problem Analysis for isEmail function only
#
# Inputs (function arguments): The function will see if the the user input is a valid email based on the criteria of there not being and @ symbol at the beginning or end. It will also look for an @ symbole inside the user input.
#
# Outputs (what is returns): The function will return false if there is an @ symbol at the beginning or end of the input as well as if there isn't an @ symbol anywhere in the input. It will return true is there is an @ symbol within the input, but not on the either end.
#
# Algorithm (Steps in Function):
# 1. define the fucntion isEmail
# 2. is the email starts with or ends with @ return false
# 3. is @ is inside email return true
# 4. if none of the above happen return false
#
#
# +
## Step 2: Todo write the function definition for isEmail functiuon
def isEmail(email):
if email.startswith("@") or email.endswith("@"):
return False
elif email.count("@") != 1:
return False
# elif "@" in email:
# return True
else:
return True
# -
## Step 3: Write some tests, to ensure the function works, for example
## Make sure to test all cases!
print("WHEN text=<EMAIL> We EXPECT isEmail(text) to return True", "ACTUAL", isEmail("<EMAIL>") )
print("WHEN text=mike@ We EXPECT isEmail(text) to return False", "ACTUAL", isEmail("mike@") )
print("WHEN text=mike@ We EXPECT isEmail(text) to return False", "ACTUAL", isEmail("joesh") )
print("WHEN text=mike@ We EXPECT isEmail(text) to return False", "ACTUAL", isEmail("joe@@gmail") )
# ## Step 4: Problem Analysis for full Program
#
# Inputs:
# - Prompt user to input an email address
#
# Outputs:
# - If email is not valid print that it is not valid
# - If email is valid print that it is valid
# - When quit is entered print that the program has ended
#
# Algorithm (Steps in Program):
# 1. start loop
# 2. prompt user to enter email
# 3. if input is quit, end the program and print that the program has ended
# 4. if email is valid, print that it is a valid email
# 5. if email is not valid, print that it is not a valid email
#
#
# +
## Step 5: todo write code for full problem, using the isEmail function to help you solve the problem
print ("IST256 Email Checker")
while True:
email = input("Please enter an email: ")
if email == "quit":
break
elif isEmail(email) == True:
print ("This is a valid email")
elif isEmail(email) == False:
print ("This is not a valid email")
print ("The program has ended")
# -
# ## Step 6: Questions
#
# 1. How many test cases should you have in step 3 to ensure you've tested all the cases?
# - You need four tests to ensure you have tested all cases. This is because you have to test an email with @ at the beginning, one with @ at the end, one with @ within it, and one without an @ symbol anwhere.
# 2. What kind of logic should we add to make our `isEmail` function even better, so that is detects emails more accurately?
# - We could make so that it must end with .com, .net, etc...
#
# ## Reminder of Evaluation Criteria
#
# 1. Was the problem attempted (analysis, code, and answered questions) ?
# 2. Was the problem analysis thought out? (does the program match the plan?)
# 3. Does the code execute without syntax error?
# 4. Does the code solve the intended problem?
# 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)
#
| content/lessons/07/Now-You-Code/NYC2-Email-Address.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="LGLgP-4B2xk1" colab_type="code" colab={}
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import os
import zipfile
import cv2
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D ,AveragePooling2D, Flatten, Dropout
from keras.layers.core import Dense
from keras.optimizers import RMSprop,Adam,SGD
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Activation
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# + id="jLy_moUs5qdO" colab_type="code" outputId="920bdee1-7ab4-4279-984a-d45dc3922dd1" colab={"base_uri": "https://localhost:8080/", "height": 34}
os.getcwd()
# + id="MXLkoPXU5yD6" colab_type="code" colab={}
handle_train=zipfile.ZipFile(r'/content/Train.zip')
handle_train.extractall('/content/train')
handle_train.close()
handle_test=zipfile.ZipFile(r'/content/Test.zip')
handle_test.extractall('/content/test')
handle_test.close()
# + id="qxjrbYfU54HA" colab_type="code" colab={}
train_images=os.listdir('/content/train/Train/')
test_images = os.listdir('/content/test/Test')
filepath_train = '/content/train/Train/'
filepath_test = '/content/test/Test/'
# + id="U_8LmO0U69qf" colab_type="code" outputId="7b60268d-247c-4c93-9c17-56c7a410408c" colab={"base_uri": "https://localhost:8080/", "height": 359}
df_train = pd.read_csv('/content/train.csv')
df_train.head(10)
# + id="7ACwwtVNPvtH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="55a18740-700d-4b4d-d7d8-b3a035a3ddd2"
sample_submn = pd.read_csv('/content/sample_submission_sDO3m7O.csv')
sample_submn.head()
# + id="hvXb3AUj6z8v" colab_type="code" colab={}
images=[]
labels=[]
for index, row in df_train.iterrows():
image=cv2.imread(filepath_train+row['ID'])
image=cv2.resize(image , (64,64))
images.append(image)
labels.append(row['Class'])
#print(row['ID'])
# + id="w7qzyfofasc8" colab_type="code" colab={}
images_test=[]
outputs=[]
for index,row in sample_submn.iterrows():
image=cv2.imread(filepath_test+row['ID'])
image=cv2.resize(image , (64,64))
images_test.append(image)
outputs.append(image)
# + id="vX_-xhO0cPiQ" colab_type="code" colab={}
images_test[0]
# + id="nOwMr80-7HyA" colab_type="code" colab={}
images[0]
# + id="eMnYNVv88YiT" colab_type="code" outputId="ee5b557f-533a-4c44-87e0-e304ba133b98" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(type(images))
print(type(images_test))
# + id="KSZTVzIkRwxF" colab_type="code" colab={}
outputs[0]
# + id="3nNeWGPA9qYy" colab_type="code" outputId="c39536b0-3988-49b2-c53b-bf553966764f" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images[0])
# + id="6XlF5Vaf9vvO" colab_type="code" outputId="bc0c8f25-142c-4d53-f674-35272cfd7f2e" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images[1])
# + id="tGkInGn9962-" colab_type="code" outputId="6ad1a5fd-1110-408d-816f-dcf32c09f7d3" colab={"base_uri": "https://localhost:8080/", "height": 204}
df_train.tail()
# + id="BiM2sVLT-K75" colab_type="code" outputId="2aeb90ef-a546-4c21-a3f3-f2767eb02871" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images[-1])
# + id="FCrGMzPv-bOJ" colab_type="code" outputId="6a66c2f6-de82-4fac-e8dc-ac9201e32569" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images[19905])
# + id="6BRRnHz4-1J4" colab_type="code" outputId="5d935173-8c9d-4466-e624-6a6222e56666" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images[19904])
# + id="bRqE0e5CdXSt" colab_type="code" outputId="b689b6f7-84a4-4f3b-f7f5-a10617d7dc22" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images_test[0])
# + id="e77g6GHJdbwa" colab_type="code" outputId="e4170342-165b-4fa6-9f17-308fcab951dd" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images_test[-1])
# + id="DFo5UYq3R5gH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 285} outputId="931eea39-6af0-4569-ab0c-9b4f1494fc2d"
plt.imshow(outputs[0])
# + id="zf-XfMvD-4A6" colab_type="code" colab={}
images = np.array(images, dtype="float") / 255.0
images_test = np.array(images_test, dtype="float") / 255.0
labels = np.array(labels)
# + id="bjzeYxOldgpH" colab_type="code" outputId="5efbd100-9319-495c-ead1-2b5b3aef7e89" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images[0])
# + id="ZTP0_1C1eBmY" colab_type="code" outputId="6f41d899-5963-4abb-d518-76dea4c2b7b9" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images_test[0])
# + id="AI0cj7Ls_IYc" colab_type="code" colab={}
images[0]
# + id="e6z9Bzh2eGBy" colab_type="code" colab={}
images_test[0]
# + id="YuOAXfJT_KRY" colab_type="code" colab={}
(trainX, testX, trainY, testY) = train_test_split(images,labels, test_size=0.30, random_state=42)
# + id="4GbOd2DW_P7L" colab_type="code" outputId="6e87f2ad-d435-4058-92e0-8a7c89697ae3" colab={"base_uri": "https://localhost:8080/", "height": 153}
print(type(trainX))
print(trainX.shape)
print(type(trainY))
print(trainY.shape)
print(type(testX))
print(testX.shape)
print(type(testY))
print(testY.shape)
# + id="5dOPPpO6_SC8" colab_type="code" colab={}
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
# + id="By4suEdA_WsG" colab_type="code" outputId="7d33b675-fa35-4a07-9ac5-041786c9791b" colab={"base_uri": "https://localhost:8080/", "height": 34}
lb.classes_
# + id="PYzJ8mGa_WuT" colab_type="code" colab={}
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (3,3),padding = "same", activation ='relu', input_shape = (64,64,3)))
model.add(BatchNormalization(axis=-1))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64,kernel_size=(3,3), padding="same",activation="relu"))
model.add(BatchNormalization(axis=-1))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding="same",activation="relu"))
model.add(BatchNormalization(axis=-1))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same",activation="relu"))
model.add(BatchNormalization(axis=-1))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same",activation="relu"))
model.add(BatchNormalization(axis=-1))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same",activation="relu"))
model.add(BatchNormalization(axis=-1))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(3,activation="softmax"))
# + id="q_jgn9Lq_Ww5" colab_type="code" colab={}
INIT_LR = 0.01
EPOCHS = 50
BS = 32
opt=SGD(lr=INIT_LR)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# + id="k1rOMYpdhKom" colab_type="code" colab={}
aug = ImageDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.1,horizontal_flip=True, fill_mode="nearest")
# + id="6QQqkrgp_hkf" colab_type="code" outputId="6be8c47a-e806-45be-a087-c169b96d24d0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,epochs=EPOCHS)
# + id="mnPPzEwb_kQ4" colab_type="code" outputId="7e7eedca-79de-4aef-ec53-65b9ffafb37c" colab={"base_uri": "https://localhost:8080/", "height": 187}
predictions = model.predict(testX, batch_size=BS)
print(classification_report(testY.argmax(axis=1),predictions.argmax(axis=1), target_names=lb.classes_))
# + id="i6L4Hg39Artq" colab_type="code" outputId="43f80f58-b4e6-4961-aa33-10107e5d90a5" colab={"base_uri": "https://localhost:8080/", "height": 626}
# plot the training loss and accuracy
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure(figsize=(15,10))
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.plot(N, H.history["accuracy"], label="train_acc")
plt.plot(N, H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy (MNIST CNN for age classification)")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
# + id="r4HB9KsyVpCS" colab_type="code" outputId="a3bc9726-ae68-4ec5-84dc-bcb21026df9e" colab={"base_uri": "https://localhost:8080/", "height": 136}
pred = model.predict(images_test)
pred
# + id="vNjvJXxtWqhN" colab_type="code" outputId="b57c8d90-8065-4ba8-8ea6-68374175b308" colab={"base_uri": "https://localhost:8080/", "height": 34}
i = pred.argmax(axis=1)
i
# + id="9hmgiZl4Wwmz" colab_type="code" outputId="44995e08-3980-4d9d-db14-ff176cab3c1d" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Going for second test example
i1 = pred.argmax(axis=1)[1]
i1
# + id="dDDCiq0FWHm_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 285} outputId="e93771c3-d22a-4d9e-b16e-4ec6a6cb0578"
plt.imshow(images_test[1])
# + id="lVOni6ruXDs3" colab_type="code" outputId="44fee403-d208-4218-ef82-d0bfcfa7d2b4" colab={"base_uri": "https://localhost:8080/", "height": 51}
vals = np.amax(pred, axis=1)
vals
# + id="AO3o3aFjYE86" colab_type="code" outputId="e1959111-223c-4c78-eece-687654ce44a8" colab={"base_uri": "https://localhost:8080/", "height": 34}
#going for second test example
val1 = vals[1]
val1
# + id="W4RhXNiKYP9G" colab_type="code" outputId="416493ab-6c6d-454e-9dba-c7aef692b0f0" colab={"base_uri": "https://localhost:8080/", "height": 34}
#second test example
perc_val1 = val1*100
perc_val1 = perc_val1.round(2)
perc_val1
# + id="jnEBlhPyYckI" colab_type="code" colab={}
from google.colab.patches import cv2_imshow
# + id="mCdxW0CfYiBx" colab_type="code" outputId="40dcf2a0-4ae5-48c1-d9d0-9809817afe95" colab={"base_uri": "https://localhost:8080/", "height": 34}
label1 = lb.classes_[i1]
label1
# + id="WvgW_3FrYk8d" colab_type="code" colab={}
#SEE THIS
#need to fix this
#output = images_test[0].copy()
# + id="CtfHbSd8Y04l" colab_type="code" outputId="1fdc3be2-69ae-423a-dace-ed768e192b72" colab={"base_uri": "https://localhost:8080/", "height": 317}
text = label1+": "+str(perc_val1)
#text='theri'
cv2.putText(outputs[1], text , (10,50), cv2.FONT_HERSHEY_SIMPLEX, 0.7 ,(0, 0, 255), 2)
outputs[1] = cv2.resize(outputs[1] , (300,300))
# show the output image
cv2_imshow(outputs[1])
# + id="_QAs529mY5dS" colab_type="code" outputId="78859dfb-59f0-4f94-9319-dd40f41a89c0" colab={"base_uri": "https://localhost:8080/", "height": 285}
plt.imshow(images_test[1])
# + id="_8vrrBtbZJTH" colab_type="code" colab={}
| notebooks/v5_Age_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Emotion detection using Spacy 3
#
# This notebook show how to do emotion detection on tweet size texts using a transformer architecture with Spacy 3.
#
# You can run this notebook on Google Colab if you want to customize it to your own needs. Remember to choose GPU hardware.
# ## Installations and imports
# + id="RUVIl0mAm9jw"
# Installing Spacy library
# !pip install spacy==3.1.1
# !pip install spacy-transformers
# + id="5DgQgC4rnB3L"
# Downloading the spaCy Transformer model "en_core_web_trf"
# !python -m spacy download en_core_web_trf
# + id="TeGHiDDsnZge"
# Importing libraries
import pandas as pd
from datetime import datetime
import spacy
import spacy_transformers
# Storing docs in binary format
from spacy.tokens import DocBin
# -
# ## Read in the data
#
# I got the dataset from this github repository:
# https://github.com/RoozbehBandpey/ELTEA17
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="HLqydEZfmoeB" outputId="f331192d-1e0d-43f1-cf78-68d5894f2196"
# Read in dataset
jsonpath = "sentence_level_annotation.json"
df = pd.read_json(jsonpath)
df.head()
# -
# As you can see there are a column with emotions and a column with the text. We are interested in those two.
#
# There are 6 different emotions, and I am interested in splitting the data into train and test sets, but keep the ratio across the emotions.
# + id="eWLp31WAmvr1"
# Splitting the dataset into train and test
train = df.groupby("emotion").sample(frac = 0.8, random_state = 25)
test = df.drop(train.index)
# + colab={"base_uri": "https://localhost:8080/"} id="3_eBmoEtm2QP" outputId="7c5a2977-d9b6-4aa8-f985-a9353de72c69"
# Checking the shape
print(train.shape, test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="P7loEj3XoKgH" outputId="d6b4835b-ddea-48de-eb73-d4cd90044f3d"
#Creating tuples
train['tuples'] = train.apply(lambda row : (row['text'],row['emotion']), axis=1)
train = train['tuples'].tolist()
test['tuples'] = test.apply(lambda row : (row['text'],row['emotion']), axis=1)
test = test['tuples'].tolist()
train[0]
# + colab={"base_uri": "https://localhost:8080/"} id="Ufrgx8ZzoYZ6" outputId="e011ec4e-60b1-42ce-a7e1-b1b1b93f1406"
df.emotion.value_counts()
# + id="VcNcU41Iosz3"
# User function for converting the train and test dataset into spaCy document
nlp = spacy.load("en_core_web_trf")
def document(data):
#Creating empty list called "text"
emotions = ["joy", "sad", "dis", "sup", "fea", "ang"]
text = []
for doc, label in nlp.pipe(data, as_tuples = True):
for emotion in emotions:
if (label == emotion):
doc.cats[emotion] = 1
else:
doc.cats[emotion] = 0
#Adding the doc into the list 'text'
text.append(doc)
return(text)
# + colab={"base_uri": "https://localhost:8080/"} id="q8UB5ndTpgPk" outputId="75ac51ab-5f8c-44af-f8d1-21ff9dd999f1"
# Calculate the time for converting into binary document for train dataset
start_time = datetime.now()
#passing the train dataset into function 'document'
train_docs = document(train)
#Creating binary document using DocBin function in spaCy
doc_bin = DocBin(docs = train_docs)
#Saving the binary document as train.spacy
doc_bin.to_disk("train.spacy")
end_time = datetime.now()
#Printing the time duration for train dataset
print('Duration: {}'.format(end_time - start_time))
# + colab={"base_uri": "https://localhost:8080/"} id="je9c4D5Bpuc-" outputId="0e5af7ab-b58c-4341-feec-aea48f2e2ba5"
# Calculate the time for converting into binary document for test dataset
start_time = datetime.now()
#passing the test dataset into function 'document'
test_docs = document(test)
doc_bin = DocBin(docs = test_docs)
doc_bin.to_disk("test.spacy")
end_time = datetime.now()
#Printing the time duration for test dataset
print('Duration: {}'.format(end_time - start_time))
# + [markdown] id="a4EauO7Er-S0"
# Go here https://spacy.io/usage/training#quickstart
#
# And download the base_config.cfg
#
# Set it to:
# - textcat
# - gpu
# - accuracy
#
# Put it here. And then change the paths to:
#
# train = "train.spacy"
#
# dev = "test.spacy"
# + colab={"base_uri": "https://localhost:8080/"} id="VnQDOxKTpyBk" outputId="54217c52-9699-4b82-de99-e4dda8b03af4"
#Converting base configuration into full config file
# !python -m spacy init fill-config ./base_config.cfg ./config.cfg
# + colab={"base_uri": "https://localhost:8080/"} id="1vDGXmnRqkla" outputId="296cbf5a-83f9-4a07-d1f1-84e810f0df92"
#Calculating the time for training the model
start_time = datetime.now()
# To train the model. Enabled GPU and storing the model output in folder called output_updated
# !python -m spacy train config.cfg --verbose --gpu-id 0 --output ./output_updated
end_time = datetime.now()
#Printing the time taken for training the model
print('Duration: {}'.format(end_time - start_time))
# + id="jkCs57V9tN6Y"
# Testing the model
# Loading the best model from output_updated folder
nlp = spacy.load("output_updated/model-best")
# + colab={"base_uri": "https://localhost:8080/"} id="CwxKXST_tRLs" outputId="2c7044e1-05b4-4126-c243-63332004f67b"
text = "Capitalism produces ecological crisis for the same reason it produces inequality: because the fundamental mechanism of capitalist growth is that capital must extract (from nature and labour) more than it gives in return."
demo = nlp(text)
a_dictionary = demo.cats
cat = max(a_dictionary, key=a_dictionary.get)
print(text)
print(cat.upper())
# + colab={"base_uri": "https://localhost:8080/"} id="zOXYoD8x-tHp" outputId="2dad9843-f49f-4953-ecd4-6eb531583a50"
a_dictionary
# + [markdown] id="yJIb1oK9-Rat"
# ## Store the stuff for faster reuse
# + colab={"base_uri": "https://localhost:8080/"} id="qZyjAhg17zuR" outputId="9e6a9b64-0a09-43f7-d5b3-faf088899393"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="-JtAlT3i94lb"
# %cp -r `ls -A | grep -v "gdrive"` /content/gdrive/MyDrive/emotions/
| emotion_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MapReduce
#
# The MapReduce programming technique was designed to analyze massive data sets across a cluster. In this Jupyter notebook, you'll get a sense for how Hadoop MapReduce works; however, this notebook will run locally rather than on a cluster.
#
# The biggest difference between Hadoop and Spark is that Spark tries to do as many calculations as possible in memory, which avoids moving data back and forth across a cluster. Hadoop writes intermediate calculations out to disk, which can be less efficient. Hadoop is an older technology than Spark and one of the cornerstone big data technologies.
#
# If you click on the Jupyter notebook logo at the top of the workspace, you'll be taken to the workspace directory. There you will see a file called "songplays.txt". This is a text file where each line represents a song that was played in the Sparkify app. The MapReduce code will count how many times each song was played. In other words, the code counts how many times the song title appears in the list.
#
#
# # MapReduce versus Hadoop MapReduce
#
# Don't get confused by the terminology! MapReduce is a programming technique. Hadoop MapReduce is a specific implementation of the programming technique.
#
# Some of the syntax will look a bit funny, so be sure to read the explanation and comments for each section. You'll learn more about the syntax in later lessons.
#
# Run each of the code cells below to see the output.
# +
# Install mrjob library. This package is for running MapReduce jobs with Python
# In Jupyter notebooks, "!" runs terminal commands from inside notebooks
# ! pip install mrjob
# +
# %%file wordcount.py
# # %%file is an Ipython magic function that saves the code cell as a file
from mrjob.job import MRJob # import the mrjob library
class MRSongCount(MRJob):
# the map step: each line in the txt file is read as a key, value pair
# in this case, each line in the txt file only contains a value but no key
# _ means that in this case, there is no key for each line
def mapper(self, _, song):
# output each line as a tuple of (song_names, 1)
yield (song, 1)
# the reduce step: combine all tuples with the same key
# in this case, the key is the song name
# then sum all the values of the tuple, which will give the total song plays
def reducer(self, key, values):
yield (key, sum(values))
if __name__ == "__main__":
MRSongCount.run()
# -
# run the code as a terminal command
# ! python wordcount.py songplays.txt
# # Summary of what happens in the code.
#
# There is a list of songs in songplays.txt that looks like the following:
#
# Deep Dreams
# Data House Rock
# Deep Dreams
# Data House Rock
# Broken Networks
# Data House Rock
# etc.....
#
# During the map step, the code reads in the txt file one line at a time. The map steps outputs a set of tuples that look like this:
#
# (Deep Dreams, 1)
# (Data House Rock, 1)
# (Deep Dreams, 1)
# (Data House Rock, 1)
# (Broken Networks, 1)
# (Data House Rock, 1)
# etc.....
#
# Finally, the reduce step combines all of the values by keys and sums the values:
#
# (Deep Dreams, \[1, 1, 1, 1, 1, 1, ... \])
# (Data House Rock, \[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...\])
# (Broken Networks, \[1, 1, 1, ...\]
#
# With the output
#
# (Deep Dreams, 1131)
# (Data House Rock, 510)
# (Broken Networks, 828)
| Spark/code/mapreduce_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from funcs import *
import matplotlib.pyplot as plt
import seaborn as seabornInstance
#from sklearn.model_selection import train_test_split
#from sklearn.linear_model import LinearRegression
from sklearn import metrics
# %matplotlib inline
# -
# ### Import data
# +
#data = pd.concat([X, y_recovered, y_deaths, y_recovered_smoothed, y_deaths_smoothed], axis=1)
# +
#Number of infected for past two weeks
X = pd.read_csv('data.csv').iloc[:,1:-2].values
#Number of recovered with transformation to smooth data
y_rec_smoothed = pd.read_csv('data.csv').iloc[:,-1].values
# -
"""# ignore last two elements because they are equal to zero
y_rec_smoothed = y_rec_smoothed[:-2]
X = X[:-2,:]"""
# # Smoothing
# All different smoothing that I have tried:
# - simple exponential smoothing: smaller error:0.19
# -
# ### Simple Exponential Smoothing
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='simple')
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='simple', with_validation=False)
X.shape
# ### Exponential Smoothing
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=1)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=1, with_validation=False)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=2)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=2, with_validation=False)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=3)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=3, with_validation=False)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=4)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=4, with_validation=False)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=5)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=5, with_validation=False)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=6)
find_best_alpha(X, y_rec_smoothed, X.shape[1], model='non-simple', K=6, with_validation=False)
# ### Gaussian Smoothing
# Find optimum K for gaussian smoothing
find_best_K(X, y_rec_smoothed, 'even')
find_best_K(X, y_rec_smoothed, 'even', with_validation=False)
# Find optimum K for gaussian smoothing, odd
find_best_K(X, y_rec_smoothed, 'odd')
find_best_K(X, y_rec_smoothed, 'odd', with_validation=False)
# ## Quadratic Regularization
X = apply_smoothing(X, 0, 'odd')
N = X.shape[1]
# To do:
# - Create matrix M
# - Create matrix X (DONE)
# - Compute X^TX
# - Compute M^TM
# - Verify M^TM value, if it coincides with the one G.O. wrote in report
# - install library, define instances, run optimizer
# +
# ----------------------------#
# GENERATE PREDICTIONS
# ----------------------------#
pct_90 = int(np.ceil(90*len(X)/100))
pct_80 = int(np.ceil(80*len(X)/100))
pct_70 = int(np.ceil(70*len(X)/100))
X_train, X_test = X[:pct_80], X[pct_80:]
y_train, y_test =y_rec_smoothed[:pct_80], y_rec_smoothed[pct_80:]
index = find_best_index(X_train, y_train, X_test, y_test, 'maape', N)
P, q, G, h = generate_params(X_train, y_train, index, N)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_test@gamma
# -
gamma
pd.DataFrame({'gammas': gamma}).plot()
index
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
#df
df.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# +
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('Mean Absolute percentage error:', mape(y_test, y_pred))
print('Mean Square percentage error:', mspe(y_test, y_pred))
# -
# ## Cross Validation
# ### Advancement validation
print('for each split we have the following MAPE losses: {}, \nResulting in a mean MAAPE of {}'.format(advancement_val(X, y_rec_smoothed)[0],advancement_val(X, y_rec_smoothed)[1]))
# # Find best hyperparameter $\lambda$
# this is the function we want to minimize
# we want to minimize the mean loss function MAE from our cross validation run
def f(lambda_):
mapes, maes, y_vals, y_preds = cross_val(splits_X, splits_y, lambda_)
return np.mean(maes)
# +
from scipy.optimize import minimize
minimize(f,1.0,method='SLSQP')
# +
from skopt import gp_minimize
from skopt.space import Real, Integer
space = [Real(10**-5, 10**0, name='learning_rate')]
res = gp_minimize(f,space)
lambda_ = res['x'][0]
# +
def plot_loss_per_lambda():
lambdas = [-10,-1,0, 10e-5, 10e-4, 10e-3, 10e-2, 10e-1, 1, 10]
mapes = []
for l in lambdas:
X_train = X_4[:pct_80]
X_test = X_4[pct_80:]
y_train = y_recovered[:pct_80]
y_test = y_recovered[pct_80:]
#print(X_test@gamma)
#print(y_test)
index = find_best_k(X_train, y_train, X_test, y_test, 'mape')
P, q, G, h = generate_params(X_train, y_train, index,l)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_test@gamma
mapes.append(format(100*mape(y_test, y_pred),'.20'))
print(mapes)
print(len(mapes) == len(np.unique(mapes)))
lambdas1 = ['-10','-1','0','10e-5', '10e-4', '10e-3', '10e-2', '10e-1', '1', '10']
plt.plot(lambdas1, mapes, 'b')
#plt.xlabel('Day')
#plt.ylabel('Number of Daily Recovered')
#plt.legend(['Predicted value','True value'])
#plt.title('Baseline Prediction model for k=' + str(k))
#plt.axvline(x=pct_80-1)
# -
plot_loss_per_lambda()
# +
def plot_gammas_per_lambda():
lambdas = [-10, -1, 0, 10e-5, 10e-4, 10e-3, 10e-2, 10e-1, 1, 10]
gammas = []
for l in lambdas:
X_train = X_4[:pct_80]
X_test = X_4[pct_80:]
y_train = y_recovered[:pct_80]
y_test = y_recovered[pct_80:]
#print(X_test@gamma)
#print(y_test)
index = find_best_k(X_train, y_train, X_test, y_test, 'mape')
P, q, G, h = generate_params(X_train, y_train, index,l)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_test@gamma
gammas.append(format(np.mean(gamma), '.20f'))
print(gammas)
lambdas1 = ['-10','-1','0','10e-5', '10e-4', '10e-3', '10e-2', '10e-1', '1', '10']
plt.plot(lambdas1, gammas, 'b')
#plt.xlabel('Day')
#plt.ylabel('Number of Daily Recovered')
#plt.legend(['Predicted value','True value'])
#plt.title('Baseline Prediction model for k=' + str(k))
#plt.axvline(x=pct_80-1)
# -
plot_gammas_per_lambda()
| 2.1 [RECOVERED] Quadratic Regularization_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import gym
import math
EPISODES = 700
BATCH_SIZE = 64
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# # Utils
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class PytorchWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
def step(self, action):
obs, reward, done, _ = self.env.step(action)
obs = torch.tensor(obs, dtype=torch.float)
if done: reward = -10 ## Specific to Cartpole env
return obs, reward, done
def reset(self):
obs = self.env.reset()
obs = torch.tensor(obs, dtype=torch.float)
return obs
# # Policy Network - FCN
class PolicyFC(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.net = nn.Sequential(
nn.Linear(in_features, 64),
nn.LeakyReLU(),
nn.Linear(64, 32),
nn.LeakyReLU(),
nn.Linear(32, 16),
nn.LeakyReLU(),
nn.Linear(16, out_features)
)
def forward(self, x):
return self.net(x)
# # Prioritized Experience Replay
#
# [Schaul et al.](https://arxiv.org/pdf/1511.05952.pdf) propose using binary heap for faster search and retreival of samples. However, In this implementation we use a list. <br />
# The samples are prioritized based on TD error. Instead of purely sampling only on TD error (greedy approach), an alpha parameter is used to interpolate between uniform random and greedy sampling.
class PrioritizedExperienceReplay:
"""
Implementation is adapted from: https://github.com/higgsfield/RL-Adventure
"""
def __init__(self, buffer_size=100000, alpha=0.6):
self.alpha = alpha ## Determines how much prioritization is used
self.state = []
self.action = []
self.next_state = []
self.reward = []
self.buffer_size = buffer_size
self.priorities = np.zeros((buffer_size,)).astype(np.float) + 1e-5 ## TD errors
self.count = 0
def store(self, state, action, next_state, reward):
if(len(self.state) == self.buffer_size):
self.state = self.state[1:]
self.action = self.action[1:]
self.next_state = self.next_state[1:]
self.reward = self.reward[1:]
self.state.append(state)
self.action.append(action)
self.next_state.append(next_state)
self.reward.append(reward)
max_priority = self.priorities.max()
self.priorities[self.count] = max_priority
self.count += 1
self.count = min(self.count, self.buffer_size - 1)
def sample_batch(self, batch_size, beta=0.4):
probs = self.priorities[:self.count] ** self.alpha
probs /= probs.sum()
idxs = np.random.choice(len(self.state), batch_size, p=probs)
state = torch.stack(self.state)[idxs]
action = torch.tensor(self.action, dtype=torch.long)[idxs]
next_state = torch.stack(self.next_state)[idxs]
reward = torch.tensor(self.reward, dtype=torch.float)[idxs]
## Beta parameter is used to anneal the amount of importance sampling
total = len(self.state)
weights = (total * probs[idxs]) ** (-beta)
weights /= weights.max()
weights = torch.tensor(weights, dtype=torch.float)
return (state, action, next_state, reward, idxs, weights)
def update_priorities(self, idxs, priorities):
for i, priority in zip(idxs, priorities):
self.priorities[i] = abs(priority)
def __len__(self):
return len(self.state)
# # Double DQN Agent
class DQN:
def __init__(self, obs_size, action_size, device, gamma=0.99, lr=0.001):
self.target = PolicyFC(obs_size, action_size).to(device)
self.target.eval()
self.policy = PolicyFC(obs_size, action_size).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr)
self.device = device
self.buffer = PrioritizedExperienceReplay()
self.gamma = gamma
self.action_size = action_size
def loss_fct(self, target, pred):
return F.smooth_l1_loss(pred, target, reduction="none")
def forward(self, policy, obs, grad=False):
obs = obs.to(self.device)
if(obs.size() == (4,)):
obs = obs.unsqueeze(0)
q_values = policy(obs)
if(not grad):
q_values = q_values.detach()
action = torch.argmax(q_values, 1)
return q_values, action
def optimize_policy(self, batch, beta):
self.optimizer.zero_grad()
state, action, next_state, reward, idxs, weights = batch
weights = weights.to(device)
action = action.unsqueeze(1).to(device)
reward = reward.to(self.device)
Q, _ = self.forward(self.policy, state, grad=True)
_, next_action = self.forward(self.policy, next_state)
next_Q, _ = self.forward(self.target, next_state)
## Target value estimation is made using both networks. Prevents overestimation
Q_target = next_Q.gather(1, next_action.unsqueeze(-1)).squeeze()
target = reward + self.gamma * Q_target
Q = Q.gather(1, action).squeeze()
loss = self.loss_fct(Q, target)
loss = loss * weights
priorities = loss + 1e-5
self.buffer.update_priorities(idxs, priorities.detach().cpu().numpy())
loss = loss.mean()
loss.backward()
self.optimizer.step()
return loss.item()
def update_target(self):
self.policy.eval()
self.target.load_state_dict(self.policy.state_dict())
torch.save(self.target.state_dict(), "DQN_Agent.bin")
self.policy.train()
def load_policy(self, path=None):
if path is None:
path = "DQN_Agent.bin"
self.target.load_state_dict(torch.load(path))
print("Successfully loaded")
def evaluate_policy(self, env):
obs = env.reset()
done = False
count = 0
while(not done):
obs = obs.unsqueeze(0)
obs = obs.to(self.device)
env.render()
with torch.no_grad():
q_values = self.target(obs)
action = torch.argmax(q_values, 1).item()
obs, reward, done = env.step(action)
print(f"{count}, {action}, {reward}")
count += 1
def get_beta(self, curr_eps, total_eps):
"""
Reduce beta as episodes trained increases.
"""
beta_start = 0.4
beta = beta_start + curr_eps * (1.0 - beta_start) / total_eps
beta = min(1.0, beta)
return beta
def get_eps(self, i, decay=100):
"""
Reduce epsilon as training progresses to reduce exlporation.
"""
epsilon_start = 1.0
epsilon_final = 0.05
eps = epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * i / decay)
return eps
def learn(self, env, episodes, batch_size):
writer = SummaryWriter()
counter = 1
loss_count = 0
reward_count = 0
for eps in range(episodes):
obs = env.reset()
loss_tracker = AverageMeter()
reward_tracker = AverageMeter()
for t in range(1000):
epsilon = self.get_eps(eps)
if(np.random.rand() <= epsilon): ## Epsilon greedy
action = np.random.randint(self.action_size)
else:
_, action = self.forward(self.policy, obs)
action = action.item()
next_obs, reward, done = env.step(action)
self.buffer.store(obs, action, next_obs, reward)
reward_tracker.update(reward)
if(len(self.buffer) >= batch_size):
batch = self.buffer.sample_batch(batch_size)
beta = self.get_beta(eps, episodes)
loss = self.optimize_policy(batch, beta)
loss_tracker.update(loss)
writer.add_scalar('Loss', loss, loss_count)
loss_count += 1
if(counter % 200 == 0): ## Delayed update of target. Promotes exploration
self.update_target()
if done: break
counter += 1
obs = next_obs
writer.add_scalar("Reward", reward_tracker.sum, reward_count)
reward_count += 1
if((eps + 1) % 10 == 0):
print(f"Episode: {eps}/{episodes}, step: {t+1}/1000, Epsilon: {epsilon}, reward: {reward_tracker.sum}, loss: {loss_tracker.avg}")
env = gym.make('CartPole-v0')
obs_size = env.observation_space.shape[0]
action_size = env.action_space.n
env = PytorchWrapper(env)
agent = DQN(obs_size, action_size, device)
## Load tensorboard for visualization of loss
# %load_ext tensorboard
# %tensorboard --logdir runs
agent.learn(env, EPISODES, BATCH_SIZE)
# agent.load_policy() # Load trained policy from local
agent.evaluate_policy(env) # Evaluate target policy
| Prioritized Experience Replay.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import matplotlib.pyplot as plt
import numpy as np
import tb
# +
eps0 = 1
eps_h = 1
c = 1
omega = np.linspace(1, 5, 100)
k = eps_h * omega ** 2 / (c**2)
d = 10
R = 1
V = 4 * np.pi * R ** 3
omega_p = 1
# Drude model for particle
alpha = 1 / (eps0 * V) * (1.0/3.0 - omega **2 / omega_p ** 2)
losses = 1j * k ** 2 / (6 * np.pi * eps0)
# interparticle coupling
def A1(om, n):
k = eps_h * om ** 2 / (c**2)
return np.exp(1j*k*np.abs(n*d))/(4.0*np.pi*eps0*np.abs(n*d))*(k**2)
def A2(om, n):
k = eps_h * om ** 2 / (c**2)
return np.exp(1j*k*np.abs(n*d))/(4.0*np.pi*eps0*np.abs(n*d))*(1.0/((n*d)**2)-1j*k/np.abs(n*d))
x = tb.Atom('x')
x.add_orbital('s', E_x)
y = tb.Atom('y')
y.add_orbital('s', E_x)
z = tb.Atom('z')
z.add_orbital('s', E_x)
tb.Atom.orbital_sets = {'x': ax, 'y': ay,'z': az}
tb.set_tb_params(PARAMS_x_x={'ss_sigma': A1-A2},
PARAMS_y_y={'ss_sigma': A1-A2},
PARAMS_z_z={'ss_sigma': 2*A2})
xyz_file = """1
H cell
x1 0.0000000000 0.0000000000 0.0000000000
y1 0.0000000000 1.0000000000 0.0000000000
z1 0.0000000000 2.0000000000 0.0000000000
"""
h = tb.Hamiltonian(xyz=xyz_file, nn_distance=1.1)
h.initialize()
h.set_periodic_bc([[0, 0, 1.0]])
h_l, h_0, h_r = h.get_hamiltonians()
energy = np.linspace(-3.0, 1.5, 700)
sgf_l = []
sgf_r = []
for E in energy:
L, R, _, _, _ = tb.surface_greens_function(E, h_l, h_0, h_r)
# L, R = surface_greens_function_poles_Shur(E, h_l, h_0, h_r)
sgf_l.append(L)
sgf_r.append(R)
# -
| docs/source/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
DATA = "1000000101000100"
def random_test(DATA):
DATA2 = [int(i) for i in DATA]
DATA3 = [abs(1-int(i)) for i in DATA]
num = [int(i) for i in range(10,0,-1)]
num1 = [int(i) for i in range(-5,15,1)]
data_1 = [0]*10
data_0 = [0]*10
for j in range(len(num)):
for i in range(0,len(DATA2)):
if sum(DATA2[i:i+num[j]]) == num[j]:
data_1[j]+=1
for j in range(len(num)):
for i in range(0,len(DATA3)):
if sum(DATA3[i:i+num[j]]) == num[j]:
data_0[j]+=1
for i in range(0,9):
res = np.sum(np.multiply(num1[i+7:6:-1],data_1[0:i+1]))
data_1[i+1] = data_1[i+1] - res
for i in range(0,9):
res = np.sum(np.multiply(num1[i+7:6:-1],data_0[0:i+1]))
data_0[i+1] = data_0[i+1] - res
print('run length ones zeros')
for i in range(9,-1,-1):
print(' ',i,' ',data_1[i],' ',data_0[i])
random_test(DATA)
# -
| HW3/HW3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Porting IDL to Python
import numpy as np
import idlwrap
# ## Introduction
#
# With `numpy` and `scipy`, there are powerful and open-source tools available for scientific computing in python. Currently, still lots of scientific projects — especially in astrophysics — rely on the proprietary and expensive IDL programming language instead of moving foward to open and reproducible science. This guide aims to help in porting an IDL codebase to python, while taking full advantage of its powers.
#
# For help with porting specific IDL functions and routines you are invited to look at the source code of `idlwrap`, which has porting instructions in its docstrings.
#
# ###### reading this guide
#
# This guide contains code examples in both IDL and python. IDL code blocks are prefixed with `IDL>`, whereas python code starts with `>>>`. Also, IDL functions and routines are represented in uppercase.
# ## Rounding
#
#
# ###### technical background
#
# In computer hardware, floating-point numbers are represent as binary fractions. This *binary approximation* can cause confusion --- e.g. in the well-known [example](https://docs.python.org/3.6/tutorial/floatingpoint.html):
#
# ``` python
# >>> 0.1 + 0.1 + 0.1 == 0.3
# False
# ```
# The floating-point value `0.1` is not stored as *exactly* `0.1` in memory, but rather as `3602879701896397 / 2 ** 55 `, which is approximatively `0.1000000000000000055511151231257827021181583404541015625...`. These differences add together and lead to the unusual result.
#
#
# ###### rounding
#
# In IDL, `ROUND` uses *round-half-away-from-zero*, also known as *commercial rounding*. That's what you usually learn in school. It treats positive and negative values symmetrically: If positive and negative numbers are equally probable, this rounding is free of any bias.
#
#
# ``` idl
# IDL> PRINT, ROUND(-0.5), ROUND(0.5), ROUND(1.5), ROUND(2.5)
# -1 1 2 3
# ```
#
# python / numpy use *half-to-even* / *financial rounding* / *mathematical rounding*, which is the default rounding mode in the IEEE-754 standard. On machines, which represent floating-point numbers using *binary approximation*, this rounding is non-biased, whereas *round half away from zero* (like IDL's `ROUND`), would be positively biased.
#
# ``` python
# >>> round(-0.5), round(0.5), round(1.5), round(2.5)
# (0, 0, 2, 2)
# ```
# numpy's `numpy.around` function and the `ndarray.round` method round as python's built-in `round`.
#
#
# ###### porting
#
# In general, you don't have to bother which rounding method your program uses. But if you use `ROUND` when e.g. determining list indices, this could cause differences. Use `idlwrap.round` in that cases, which implements IDL's *round-half-away-from-zero* rounding.
# ## Precision
#
# <!-- Python, and most machines use the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE-754). -->
#
# Floating point numbers are stored internally with a fixed number of *bits*, or *precision*. The IEEE Standard for Binary Floating-Point for Arithmetic (IEEE-754) defines
#
# - **double precision.** python default, used in `float` / `np.float64`. IDL `DOUBLE`. Contains 53bits of precision.
# - **single precision.** IDL default, called `FLOAT`. If you really really need to, use `np.float32`
# - **half precision.** listed for completeness. Corresponds to `np.float16`.
#
# <!-- Python maps `float`s to "IEEE-754 double precision" numbers, which contain 53 bits of precision. In numpy, `float`s are called `np.float64`, which is the default for most function. You could also chose `np.float32` (single-precision) or `np.float16` (half-precision), but you do not want that. -->
#
# IDL often has multiple functions for the different data types, e.g. `FINDGEN` (`FLOAT`, 32 bit) and `DINDGEN` (`DOUBLE`, 64 bit), or `!PI` (32 bit) and `!DPI` (double, 54 bit), while most of numpy's functions accept a `dtype=...` argument.
#
# You usually do not need to think about bits in python, just use e.g. `np.zeros(...)` for both `FLTARR(...)` and `DBLARR(...)`.
#
# > Note: `INTARR(...)` could be replaced by `np.zeros(..., dtype=int)`
# ## Arrays
#
#
# ### memory order
#
# ###### general
#
# There are two different ways of storing a matrix/array in memory:
#
# - **column-major.** The matrix is stored by columns, so the first index is the most rapidly varying index when moving through the elements of the array
# - the first index moves to the next row as it changes
# - e.g. FORTRAN, **IDL**
# - access element by `[column, row]`, upper-left element is `[0,0]`
# - **row-major.** The first index is the row.
# - last index changes most rapidly as one moves through the array as stored in memory
# - e.g. C, Visual Basic, **python**
# - access element by `[row, column]`
#
#
#
# further reading:
#
# - [numpy doc](https://docs.scipy.org/doc/numpy-1.13.0/reference/internals.html#multidimensional-array-indexing-order-issues) on array indexing order
# - [IDL article](http://www.harrisgeospatial.com/Support/SelfHelpTools/HelpArticles/HelpArticles-Detail/TabId/2718/ArtMID/10220/ArticleID/19656/1799.aspx) which talks about array order (see point #5)
#
#
# <!--
# |———————> Row
# |
# |
# |
# |
# V
# column
# -->
#
#
# ###### Example 1
#
# Let's look at an example:
#
# ``` idl
# IDL> PRINT, FLTARR(2, 4) ; 2 columns
# 0.00000 0.00000
# 0.00000 0.00000
# 0.00000 0.00000
# 0.00000 0.00000
# ```
# ``` python
# >>> np.zeros((2,4)) # 4 columns
# array([[0., 0., 0., 0.],
# [0., 0., 0., 0.]])
# ```
#
# In IDL, the *first diemsion* is the number of columns, the second the number of rows. You index them the same way, `[column, row]` --- to get the bottom right element:
#
#
# ```idl
# IDL> PRINT, (FLTARR(2, 4))[1,3]
# 0.00000
# ```
#
# In Python, the *first dimension* is the number of rows. Indexing works like `[row, column]`, so the bottom right element is
#
# ``` python
# >>> np.zeros((2,4))[1,3]
# 0.0
# ```
#
# Did you notice how the subset-indices are the *same* for both IDL and python in this case, even if we chose a different element?
#
#
# ###### Example 2
#
#
# ``` idl
# IDL> a = [[1,2,3,4], [5,6,7,8]]
# IDL> a
# 1 2 3 4
# 5 6 7 8
# IDL> SIZE(a)
# 2 4 2 2 8
# ; n_dimensions, rows, columns, ...
# IDL> a[3, 0]
# 4
# ```
#
# ``` python
# >>> a = np.array([[1,2,3,4], [5,6,7,8]])
# >>> a
# array([[1, 2, 3, 4],
# [5, 6, 7, 8]])
# >>> a.shape
# (2, 4) # (rows, columns)
# >>> a[0, 3] # inverse order compared to IDL!
# 4
# ```
# ### array index ranges
#
# In IDL, the index ranges are *inclusive* (they include the endpoint):
#
# ``` idl
# IDL> (FLTARR(10))[3:5]
# 0.00000 0.00000 0.00000 ; -> three elements
#
# ```
#
# While in python, the endpoint is not included:
#
# ``` python
# >>> np.zeros(10)[3:5]
# array([0., 0.]) # -> two elements
# ```
#
# This is also the case for the `FOR` statement.
#
# > *idlwrap* provides two ways around this. The first one would be to use the `subset_` function:
# >
# > ``` python
# > >>> a = np.zeros(10)
# > >>> idlwrap.subset_(a, "[3:5]")
# > array([0., 0., 0.])
# > ```
# >
# > The second way would be to wrap the array inside `subsetify_`. The resulting object (`b`) is like a numpy array, but behaves differently when a string is passed as subset:
# >
# > ``` python
# > >>> a = np.zeros(10)
# > >>> b = idlwrap.subsetify_(a) # b is like a numpy array...
# > >>> b[3:5] # python behaviour
# > array([0., 0.])
# > >>> b["3:5"] # IDL behaviour: pass indices as string
# > array([0., 0., 0.])
# > ```
# ### float indices
#
# IDL automatically floors array indices, so `a[1]` and `a[1.9]` lead to the same result:
#
# ``` idl
# IDL> a = INDGEN(3)
# IDL> a
# 0 1 2
# IDL> a[1]
# 1
# IDL> a[1.9]
# 1
# ```
#
# In python, you'll have to `int` indices, or `numpy` with throw an `IndexError`.
# ## `FOR` statement
#
# In IDL, the endpoint of the `FOR` statement is also included in the loop, while python's `range` excludes the endpoint.
#
# ###### Example 1: integer ranges
#
# ``` idl
# IDL> FOR i=4, 6 DO PRINT, i
# 4
# 5
# 6 ; -> 3 elements
# ```
#
# ``` python
# >>> for i in range(4, 6):
# >>> print(i)
# 4
# 5 # 2 elements
# ```
#
# A common way of dealing with the endpoint in python is to explicitely increment it:
#
# ``` python
# >>> for i in range(4, 6+1):
# >>> print(i)
# 4
# 5
# 6
# ```
#
# ###### Example 2: float ranges
#
# ``` IDL
# IDL> FOR i=3.5, 4.5 DO PRINT, i
# 3.50000
# 4.50000
# ```
#
# While python's built-in `range` only supports integer arguments, numpy's `arange` also allows floats:
#
# ``` python
# >>> for i in np.arange(3.5, 4.5+1):
# >>> print(i)
# 3.5
# 4.5
# ```
#
#
# ###### Example 3: endpoint not reached
#
# ``` IDL
# IDL> FOR i=3.5, 5 DO PRINT, i
# 3.50000
# 4.50000
# ```
#
# Adding an explicit `+1` to `range`/`np.arange` would add another unwanted element to the iteration:
#
# ``` python
# >>> for i in np.arange(3.5, 5+1):
# >>> print(i)
# 3.5
# 4.5
# 5.5
# ```
#
# An alternative approach would be to add a very small offset, e.g. `1e-12` to the endpoint, which leads to the expected result:
#
#
# ``` python
# >>> for i in np.arange(3.5, 5+1e-12):
# >>> print(i)
# 3.5
# 4.5
# ```
#
#
# > *idlwrap*'s `idlwrap.range_` uses `1e-12` as an offset.
#
#
# ###### Example 4: float ranges and array indices
#
# IDL automatically transforms array indices to integers, so this is perfectly valid:
#
# ``` IDL
# IDL> a = INDGEN(6)
# IDL> for i=0.0, 5, 0.7 DO print, i, a[i]
# 0.00000 0
# 0.700000 0
# 1.40000 1
# 2.10000 2
# 2.80000 2
# 3.50000 3
# 4.20000 4
# 4.90000 4
# ```
#
# In python, you'll have to `int` the indices explicitely: `a[int(i)]`.
#
# > **warning**: the following code:
# > ``` IDL
# > FOR i=0, 5, 0.7 DO print, a[i]
# > ```
# > would lead to an infinite loop printing `0`! The difference is the `i=0` (integer type) instead of `i=0.0` (float).
# ## Matrix multiplication
#
#
# IDL provides two matrix multiplication operators, `#` and `##`:
#
# ``` IDL
# IDL> a = indgen(2, 3)
# IDL> a
# 0 1
# 2 3
# 4 5
# IDL> b = indgen(3, 2)
# IDL> b
# 0 1 2
# 3 4 5
# IDL> a # b
# 10 13
# 28 40
# IDL> a ## b
# 3 4 5
# 9 14 19
# 15 24 33
# ```
#
#
#
# ``` python
# >>> a = np.arange(2*3).reshape((3, 2))
# >>> a
# array([[0, 1],
# [2, 3],
# [4, 5]])
# >>> b = np.arange(3*2).reshape((2, 3))
# >>> b
# array([[0, 1, 2],
# [3, 4, 5]])
# ```
#
# python 3.5+ has a new matrix multiplication operator `@`, which behaves like IDL's `##`:
#
# ``` python
# >>> a @ b
# array([[ 3, 4, 5],
# [ 9, 14, 19],
# [15, 24, 33]])
# ```
#
# `@` is an alias for `np.matmul`, the latter also being available in older python/`numpy` versions.
#
# To replicate the `#` operator, one would have to use `.T` to transpose the input and output:
#
# ``` python
# >>> (a.T @ b.T).T
# array([[10, 13],
# [28, 40]])
# ```
| docs/notebooks/01.Porting IDL to Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Models
#
# ***
def f(x, p):
return p[0] + x * p[1]
# <br>
#
# #### Analysis
#
# ***
# +
import seaborn as sns
# Load a dataset.
penguins = sns.load_dataset("penguins")
# Have a look at it.
sns.pairplot(penguins, hue="species")
# -
# Pick out two variables.
flipper = penguins[["body_mass_g", "flipper_length_mm"]].dropna()
# Scatter and fit line for just those two variables.
sns.regplot(x="body_mass_g", y="flipper_length_mm", data=penguins)
# <br>
#
# #### Train
#
# ***
# +
import sklearn.linear_model as lin
x = flipper["body_mass_g"].to_numpy()
y = flipper["flipper_length_mm"].to_numpy()
x = x.reshape(-1, 1)
model = lin.LinearRegression()
model.fit(x, y)
r = model.score(x, y)
p = [model.intercept_, model.coef_[0]]
# -
r
p
# <br>
#
# #### Predict
#
# ***
f(4500.0, p)
def predict(x):
return f(x, p)
predict(4500.0)
# ***
| models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bauer-Fike Eigenvalue Sensitivity Bound
#
# Copyright (C) 2019 <NAME>
#
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>
import numpy as np
import numpy.linalg as la
# In the Bauer-Fike eigenvalue sensitivity bound, an important observation is that, given a diagonalized matrix
# $$X^{- 1} A X = D$$
# that is perturbed by an additive perturbation $E$
# $$X^{- 1} (A + E) X = D + F,$$
# and if we suppose that $\mu$ is an eigenvalue of $A+E$ (and $D+F$), we have
# $$\|(\mu I - D)^{- 1}\|^{- 1} = | \mu - \lambda _k |,$$
# where $\lambda_k$ is the eigenvalue of $A$ (diagonal entry of $D$) closest to $\mu$.
#
# This notebook illustrates this latter fact. To that end, let the following be $D$:
D = np.diag(np.arange(6))
D
mu = 2.1
mu * np.eye(6) - D
la.inv(mu * np.eye(6) - D).round(3)
la.norm(la.inv(mu * np.eye(6) - D), 2)
# The actual norm doesn't matter--the norm of a diagonal matrix has to be the biggest (abs. value) diagonal entry:
la.norm(la.inv(mu * np.eye(6) - D), np.inf)
1/ la.norm(la.inv(mu * np.eye(6) - D), 2)
# Note that this matches the distance between $\mu$ and the closest entry of $D$.
| cleared-demos/eigenvalue/Bauer-Fike Eigenvalue Sensitivity Bound.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # KNN & PCA
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# %matplotlib inline
iris = load_iris()
type(iris)
iris.keys()
iris['DESCR'][:193]
iris['target']
iris['target_names']
iris['feature_names']
iris['data'][:10]
type(iris['data'])
iris['data'].shape
iris['target'].shape
X_train, X_test, y_train, y_test = train_test_split(iris['data'], iris['target'], random_state=10)
X_train.shape
X_test.shape
# +
fig, ax = plt.subplots(3, 3, figsize=(15, 15))
plt.suptitle("iris_pairplot")
for i in range(3):
for j in range(3):
ax[i, j].scatter(X_train[:, j], X_train[:, i + 1], c=y_train, s=60)
ax[i, j].set_xticks(())
ax[i, j].set_yticks(())
if i == 2:
ax[i, j].set_xlabel(iris['feature_names'][j])
if j == 0:
ax[i, j].set_ylabel(iris['feature_names'][i + 1])
if j > i:
ax[i, j].set_visible(False)
# -
# ### Principal Component Analysis
#
# Principal Component Analysis (PCA) is a dimension-reduction tool that can be used to reduce a large set of variables to a small set that still contains most of the information in the large set.
#
# The first principal component accounts for as much of the variability in the data as possible, and each succeeding component accounts for as much of the remaining variability as possible.
#
# PCA is a dimensionality reduction or data compression method. The goal is dimension reduction and there is no guarantee that the dimensions are interpretable (a fact often not appreciated by (amateur) statisticians).
y = iris.target
X = iris.data
pca = PCA(n_components=2)
reduced_X = pca.fit_transform(X)
red_x, red_y = [], []
blue_x, blue_y = [], []
green_x, green_y = [], []
for i in range(len(reduced_X)):
if y[i] == 0:
red_x.append(reduced_X[i][0])
red_y.append(reduced_X[i][1])
elif y[i] == 1:
blue_x.append(reduced_X[i][0])
blue_y.append(reduced_X[i][1])
else:
green_x.append(reduced_X[i][0])
green_y.append(reduced_X[i][1])
plt.scatter(red_x, red_y, c='r', marker='x')
plt.scatter(blue_x, blue_y, c='b', marker='D')
plt.scatter(green_x, green_y, c='g', marker='.')
plt.show()
# ## KNN
model = KNeighborsClassifier(n_neighbors=1)
model.fit(X_train, y_train)
model.score(X_test, y_test)
y_pred = model.predict(X_test)
y_pred
np.mean(y_pred == y_test)
# i.e. our model is expected to have an accuracy of 97%!
# ### Prediction
X_new = np.array([[5, 2.9, 1, 0.2]])
X_new.shape
prediction = model.predict(X_new)
prediction
iris['target_names'][prediction]
# Our model predicts that this new iris belongs to the class 0, meaning its species is Setosa.
# ### References
#
# 1. ftp://statgen.ncsu.edu/pub/thorne/molevoclass/AtchleyOct19.pdf
# 2. Mastering Machine Learning with scikit-learn by <NAME>
| machinelearning/ml_python_knn_pca_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as nplin
import itertools
from coniii import *
np.random.seed(0)
def operators(s):
#generate terms in the energy function
n_seq,n_var = s.shape
ops = np.zeros((n_seq,n_var+int(n_var*(n_var-1)/2.0)))
jindex = 0
for index in range(n_var):
ops[:,jindex] = s[:,index]
jindex +=1
for index in range(n_var-1):
for index1 in range(index+1,n_var):
ops[:,jindex] = s[:,index]*s[:,index1]
jindex +=1
return ops
def energy_ops(ops,w):
return np.sum(ops*w[np.newaxis,:],axis=1)
def generate_seqs(n_var,n_seq,n_sample=30,g=1.0):
n_ops = n_var+int(n_var*(n_var-1)/2.0)
#w_true = g*(np.random.rand(ops.shape[1])-0.5)/np.sqrt(float(n_var))
w_true = np.random.normal(0.,g/np.sqrt(n_var),size=n_ops)
samples = np.random.choice([1.0,-1.0],size=(n_seq*n_sample,n_var),replace=True)
ops = operators(samples)
#n_ops = ops.shape[1]
sample_energy = energy_ops(ops,w_true)
p = np.exp(sample_energy)
p /= np.sum(p)
out_samples = np.random.choice(np.arange(n_seq*n_sample),size=n_seq,replace=True,p=p)
return w_true,samples[out_samples] #,p[out_samples],sample_energy[out_samples]
def hopfield_model(s):
ops = operators(s)
w = np.mean(ops,axis=0)
#print('hopfield error ',nplin.norm(w-w_true))
return w
def MLE(s,s_all,max_iter=100,alpha=5e-2,cov=False):
n_seq,n_var = s.shape
ops = operators(s)
cov_inv = np.eye(ops.shape[1])
ops_obs = np.mean(ops,axis=0)
ops_model = operators(s_all)
n_ops = ops.shape[1]
np.random.seed(13)
w = np.random.rand(n_ops)-0.5
for iterate in range(max_iter):
energies_w = energy_ops(ops_model,w)
probs_w = np.exp(energies_w)
probs_w /= np.sum(probs_w)
#if iterate%10 == 0:
#print(iterate,nplin.norm(w-w_true)) #,nplin.norm(spin_cov_w-spin_cov_obs))
#MSE = ((w-w_true)**2).mean()
#print(iterate,MSE)
w += alpha*cov_inv.dot(ops_obs - np.sum(ops_model*probs_w[:,np.newaxis],axis=0))
#print('final',iterate,MSE)
return w
def eps_machine(s,eps_scale=0.1,max_iter=100,alpha=0.1,eps_type='random'):
MSE = np.zeros(max_iter)
#KL = np.zeros(max_iter)
E_av = np.zeros(max_iter)
n_seq,n_var = s.shape
ops = operators(s)
n_ops = ops.shape[1]
cov_inv = np.eye(ops.shape[1])
np.random.seed(13)
w = np.random.rand(n_ops)-0.5
w_iter = np.zeros((max_iter,n_ops))
for i in range(max_iter):
if eps_type == 'random':
eps_scale = np.random.rand()/np.max([1.,np.max(np.abs(w))])
#if eps_scale == 'modified':
# eps_scale /= np.max([1.,np.max(np.abs(w))])
energies_w = energy_ops(ops,w)
probs_w = np.exp(-energies_w*(1-eps_scale))
z_data = np.sum(probs_w)
probs_w /= z_data
ops_expect_w = np.sum(probs_w[:,np.newaxis]*ops,axis=0)
E_exp = (probs_w*energies_w).sum()
E_av[i] = energies_w.mean()
#KL[i] = -E_exp - np.log(z_data) + np.sum(np.log(np.cosh(w*eps_scale))) + n_var*np.log(2.)
#MSE[i] = ((w-w_true)**2).mean()
sec_order = w*eps_scale
w += alpha*cov_inv.dot((ops_expect_w - sec_order))
#w_iter[i,:] = w
return -E_av,w
# +
max_iter = 100
n_var,n_seq = 40,2000
g = 0.5
w_true,seqs = generate_seqs(n_var,n_seq,g=g)
n_ops = n_var+int(n_var*(n_var-1)/2.0)
n_method = 5
w = np.zeros((n_method,n_ops))
mse = np.zeros(n_method)
# +
## Hopfield:
w_hf = hopfield_model(seqs)
w[0,:] = w_hf
mse[0] = ((w_hf-w_true)**2).mean()
print('HF:',mse[0])
# +
## MLE:
#s_all = np.asarray(list(itertools.product([1.0, -1.0], repeat=n_var)))
#print('all configs size:',s_all.shape)
#w_mle = MLE(seqs,s_all,cov=False)
#w[1,:] = w_mle
#mse[1] = ((w_mle-w_true)**2).mean()
#print('MLE:',mse[1])
# +
## pseudo likelihood estimation
np.random.seed(13)
# Define common functions
calc_e,calc_observables,mchApproximation = define_ising_helper_functions()
get_multipliers_r,calc_observables_r = define_pseudo_ising_helpers(n_var)
solver = Pseudo(n_var,calc_observables=calc_observables,
calc_observables_r=calc_observables_r,
get_multipliers_r=get_multipliers_r)
w_pl = solver.solve(seqs,np.zeros(n_ops))
w[2,:] = w_pl
mse[2] = ((w_pl-w_true)**2).mean()
print('PL:',mse[2])
# +
## random eps
E_av,w_random = eps_machine(seqs,eps_scale=0.1,max_iter=max_iter,eps_type='random')
w[3,:] = w_random
mse[3] = ((w_random-w_true)**2).mean()
print('random eps:',mse[3])
# +
## optimal eps
eps_list = np.linspace(0.1,0.9,91)
n_eps = len(eps_list)
E_av = np.zeros((n_eps,max_iter))
w_eps = np.zeros((n_eps,n_ops))
for i,eps in enumerate(eps_list):
E_av[i,:],w_eps[i,:] = eps_machine(seqs,eps_scale=eps,max_iter=max_iter,eps_type='optimal')
#print(eps,E_av[i,-1])
ieps = np.argmax(E_av[:,-1])
print('optimal eps:',ieps,eps_list[ieps])
w_opt = w_eps[ieps]
w[4,:] = w_eps[ieps]
mse[4] = ((w_eps[ieps]-w_true)**2).mean()
print('opt epsilon:',mse[4])
plt.plot(eps_list,E_av[:,-1])
# +
plt.plot([-0.4,0.4],[-0.4,0.4])
plt.plot(w_true,w[0],'m^',marker='^',mfc='none',markersize=5,label='HF')
plt.plot(w_true,w[1],'kv',marker='v',mfc='none',markersize=5,label='MLE')
plt.plot(w_true,w[2],'bs',marker='s',mfc='none',markersize=5,label='PLE')
plt.plot(w_true,w[3],'go',marker='o',mfc='none',markersize=5,label='RE')
plt.plot(w_true,w[4],'ro',marker='o',markersize=5,label='OE')
plt.legend()
# -
print(mse)
w_all = np.vstack((w_true[np.newaxis,:],w))
np.savetxt('w_%s_%s_%s.dat'%(n_var,g,n_seq),w_all,fmt='%f')
np.savetxt('mse_%s_%s_%s.dat'%(n_var,g,n_seq),mse,fmt='%f')
| Ref/fig2_compare_all_method/m40/eps_machine_all_methods_m40_g05_2000.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# ## 一. Predicting Movie Ratings
#
# 
#
# 以预测第3部电影第1个用户可能评的分数为例子。
#
# 首先我们用 $x_1$ 表示爱情浪漫电影类型, $x_2$ 表示动作片类型。上图左表右侧则为每部电影对于这两个分类的相关程度。我们默认 $x_0=1$ 。则第一部电影与两个类型的相关程度可以这样表示: $x^{(3)}=\left[ \begin{array}{ccc}1 \\0.99 \\0 \end{array} \right]$ 。然后用 $\theta^{(j)}$ 表示第 j 个用户对于该种类电影的评分。这里我们假设已经知道(详情下面再讲) $\theta^{(1)}=\left[ \begin{array}{ccc}0 \\5 \\0 \end{array} \right]$ ,那么我们用 $(\theta^{(j)})^Tx^{(i)}$ 即可计算出测第3部电影第1个用户可能评的分数。这里计算出是4.95。
#
#
# ### 1. 目标优化
#
# 为了对用户 j 打分状况作出最精确的预测,我们需要:
#
# $$\min_{(\theta^{(j)})}=\frac{1}{2}\sum_{i:r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{k=1}^{n}{(\theta_k^{(j)})^2}$$
#
# 计算出所有的 $\theta$ 为:
#
#
# $$J(\theta^{(1)},\cdots,\theta^{(n_u)})=\min_{(\theta^{(1)},\cdots,\theta^{(n_u)})}=\frac{1}{2}\sum_{j=1}^{n_u}\sum_{i:r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{j=1}^{n_u}\sum_{k=1}^{n}{(\theta_k^{(j)})^2}$$
#
#
# 与前面所学线性回归内容的思路一致,为了计算出 $J(\theta^{(1)},\cdots,\theta^{(n_u)})$,使用梯度下降法来更新参数:
#
# 更新偏置(插值):
#
# $$\theta^{(j)}_0=\theta^{(j)}_0-\alpha \sum_{i:r(i,j)=1}((\theta^{(j)})^Tx^{(i)}-y^{(i,j)})x^{(i)}_0$$
#
#
#
# 更新权重:
#
# $$\theta^{(j)}_k=\theta^{(j)}_k-\alpha \left( \sum_{i:r(i,j)=1}((\theta^{(j)})^Tx^{(i)}-y^{(i,j)})x^{(i)}_k+\lambda \theta^{(j)}_k \right),\;\;\; k \neq 0$$
#
#
#
#
# ----------------------------------------------------------------------------------------------------------------
# ## 二. Collaborative Filtering 协同过滤
#
# 前提是我们知道了 $\theta^{(j)}$ 也就是每个用户对于各个电影类型的喜爱程度。那么我们就可以根据各个用户对各部电影的评分= $(\theta^{(j)})^Tx^{(i)}$ 反推出 $x^{(i)}$ 。
#
# ### 1. 目标优化
#
#
#
# 当用户给出他们喜欢的类型,即 $\theta^{(1)},\cdots,\theta^{(n_u)}$ ,我们可以由下列式子得出 $x^{(i)}$ :
#
# $$\min_{(x^{(i)})}=\frac{1}{2}\sum_{j:r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{k=1}^{n}{(x_k^{(i)})^2}$$
#
# 可出所有的 x 则为:
#
# $$\min_{(x^{(1)},\cdots,x^{(n_m)})}=\frac{1}{2}\sum_{i=1}^{n_m}\sum_{j:r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{i=1}^{n_m}\sum_{k=1}^{n}{(x_k^{(i)})^2}$$
#
# 只要我们得到 $\theta$ 或者 x ,都能互相推导出来。
#
#
# 协同过滤算法基本思想就是当我们得到其中一个数据的时候,我们推导出另一个,然后根据推导出来的再推导回去进行优化,优化后再继续推导继续优化,如此循环协同推导。
#
#
# ### 2. 协同过滤的目标优化
#
# 1. 推测用户喜好:给定$x^{(1)},\cdots,x^{(n_m)}$ ,估计$\theta^{(1)},\cdots,\theta^{(n_\mu)}$ :
# $$\min_{(\theta^{(1)},\cdots,\theta^{(n_\mu)})}=\frac{1}{2}\sum_{j=1}^{n_\mu}\sum_{i:r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{j=1}^{n_\mu}\sum_{k=1}^{n}{(\theta_k^{(j)})^2}$$
# 2. 推测商品内容:给定$\theta^{(1)},\cdots,\theta^{(n_\mu)}$ ,估计$x^{(1)},\cdots,x^{(n_m)}$ :
# $$\min_{(x^{(1)},\cdots,x^{(n_m)})}=\frac{1}{2}\sum_{i=1}^{n_m}\sum_{j:r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{i=1}^{n_m}\sum_{k=1}^{n}{(x_k^{(i)})^2}$$
# 3. 协同过滤:同时优化$x^{(1)},\cdots,x^{(n_m)}$ ,估计$\theta^{(1)},\cdots,\theta^{(n_\mu)}$:
# $$\min \; J(x^{(1)},\cdots,x^{(n_m)};\theta^{(1)},\cdots,\theta^{(n_\mu)})$$
#
#
# 即:
#
# $$\min_{(x^{(1)},\cdots,x^{(n_m)};\theta^{(1)},\cdots,\theta^{(n_\mu)})}=\frac{1}{2}\sum_{(i,j):r(i,j)=1}^{}{((\theta^{(j)})^T(x^{(i)})-y^{(i,j)})^2}+\frac{\lambda}{2}\sum_{i=1}^{n_m}\sum_{k=1}^{n}{(x_k^{(i)})^2}+\frac{\lambda}{2}\sum_{j=1}^{n_u}\sum_{k=1}^{n}{(\theta_k^{(j)})^2}$$
#
# 因为正则化的原因在这里面不再有之前的 $x_0=1$,$\theta_0=0$ 。
#
#
#
# ### 3. 协同过滤算法的步骤为:
#
# 1. 随机初始化$x^{(1)},\cdots,x^{(n_m)},\theta^{(1)},\cdots,\theta^{(n_\mu)} $为一些较小值,与神经网络的参数初始化类似,为避免系统陷入僵死状态,不使用 0 值初始化。
# 2. 通过梯度下降的算法计算出$J(x^{(1)},\cdots,x^{(n_m)},\theta^{(1)},\cdots,\theta^{(n_\mu)})$,参数更新式为:
# $$x^{(i)}_k=x^{(i)}_k-\alpha \left( \sum_{j:r(i,j)=1}((\theta^{(j)})^Tx^{(i)}-y^{(i,j)})\theta^{(j)}_k+\lambda x^{(i)}_k \right)$$
# $$\theta^{(j)}_k=\theta^{(j)}_k-\alpha \left( \sum_{i:r(i,j)=1}((\theta^{(j)})^Tx^{(i)}-y^{(i,j)})x^{(i)}_k+\lambda \theta^{(j)}_k \right)$$
# 3. 如果用户的偏好向量为$\theta$,而商品的特征向量为 x ,则可以预测用户评价为 $\theta^Tx$ 。
#
# 因为协同过滤算法 $\theta$ 和 x 相互影响,因此,二者都没必要使用偏置 $\theta_0$ 和 $x_0$,即,$x \in \mathbb{R}^n$、 $\theta \in \mathbb{R}^n$ 。
#
#
#
# ----------------------------------------------------------------------------------------------------------------
# ## 三. Low Rank Matrix Factorization 低秩矩阵分解
#
#
# ### 1. 向量化
#
#
# 
#
# 还是以电影评分为例子。首先我们将用户的评分写成一个矩阵 Y 。
#
#
# 
#
#
# 更为详细的表达如上图所示。矩阵 Y 可表示为 $\Theta^TX$ 。这个算法也叫低秩矩阵分解(Low Rank Matric Factorization)。
#
#
# ### 2. 均值标准化 Mean Normalization
#
# 
#
#
#
# 当有一个用户什么电影都没有看过的话,我们用 $\Theta^TX$ 计算最后得到的结果全部都是一样的,并不能很好地推荐哪一部电影给他。
#
#
# 
#
#
# 均值归一化要做的就是先计算每一行的平均值,再将每一个数据减去该行的平均值,得出一个新的评分矩阵。然后根据这个矩阵拟合出 $\Theta^TX$ ,最后的衡量结果加上平均值,即: $\Theta^TX+\mu_i$ 。而该 $\mu_i$ 就作为之前什么都没有的一个权值进行推荐。
#
#
# ----------------------------------------------------------------------------------------------------------------
# ## 四. Recommender Systems 测试
#
#
# ### 1. Question 1
#
# Suppose you run a bookstore, and have ratings (1 to 5 stars) of books. Your collaborative filtering algorithm has learned a parameter vector θ(j) for user j, and a feature vector x(i) for each book. You would like to compute the "training error", meaning the average squared error of your system's predictions on all the ratings that you have gotten from your users. Which of these are correct ways of doing so (check all that apply)? For this problem, let m be the total number of ratings you have gotten from your users. (Another way of saying this is that $m=\sum^{n_m}_{i=1}\sum^{n_\mu}_{j=1}r(i,j))$. [Hint: Two of the four options below are correct.]
#
#
# A. $$\frac{1}{m}\sum_{(i,j):r(i,j)=1}((\theta^{(j)})^{T}x_{i}^{(i)}-y^{(i,j)})^2$$
#
# B. $$\frac{1}{m}\sum^{n_\mu}_{i=1}\sum_{j:r(i,j)=1}(\sum_{k=1}^{n}(\theta^{(j)})_{k}x_{k}^{(i)}-y^{(i,j)})^2$$
#
# C. $$\frac{1}{m}\sum^{n_\mu}_{j=1}\sum_{i:r(i,j)=1}(\sum_{k=1}^{n}(\theta^{(k)})_{j}x_{i}^{(k)}-y^{(i,j)})^2$$
#
# D. $$\frac{1}{m}\sum_{(i,j):r(i,j)=1}((\theta^{(j)})^{T}x_{i}^{(i)}-r(i,j))^2$$
#
# 解答:A、B
#
#
#
# ### 2. Question 2
#
# In which of the following situations will a collaborative filtering system be the most appropriate learning algorithm (compared to linear or logistic regression)?
#
#
# A. You run an online bookstore and collect the ratings of many users. You want to use this to identify what books are "similar" to each other (i.e., if one user likes a certain book, what are other books that she might also like?)
#
# B. You own a clothing store that sells many styles and brands of jeans. You have collected reviews of the different styles and brands from frequent shoppers, and you want to use these reviews to offer those shoppers discounts on the jeans you think they are most likely to purchase
#
# C. You manage an online bookstore and you have the book ratings from many users. You want to learn to predict the expected sales volume (number of books sold) as a function of the average rating of a book.
#
# D. You're an artist and hand-paint portraits for your clients. Each client gets a different portrait (of themselves) and gives you 1-5 star rating feedback, and each client purchases at most 1 portrait. You'd like to predict what rating your next customer will give you.
#
# 解答:A、B
#
# 协同过滤算法的要求是特征量和数据比较多。
#
# A. 您运行在线书店并收集许多用户的评分。你想用这个来确定哪些书是彼此“相似”的(例如,如果一个用户喜欢某本书,她可能还喜欢其他书?)特征量很多,协同过滤。
#
# B. 你拥有一家销售多种风格和品牌牛仔裤的服装店。您已经收集了来自经常购物者的不同款式和品牌的评论,并且您希望使用这些评论为您认为他们最有可能购买的牛仔裤提供这些购物者折扣。特征量很多,协同过滤。
#
# C. 您可以管理在线书店,并拥有来自许多用户的图书评分。你想要学习预测预期销售量(出售书籍的数量)作为书籍平均评分的函数。用线性回归更好。
#
# D. 你是一位艺术家,为你的客户提供手绘肖像画。每个客户都会获得不同的肖像(他们自己),并为您提供1-5星评级反馈,每位客户至多购买1张肖像。您想预测下一位客户给您的评分。用逻辑回归更好。
#
#
#
# ### 3. Question 3
#
# You run a movie empire, and want to build a movie recommendation system based on collaborative filtering. There were three popular review websites (which we'll call A, B and C) which users to go to rate movies, and you have just acquired all three companies that run these websites. You'd like to merge the three companies' datasets together to build a single/unified system. On website A, users rank a movie as having 1 through 5 stars. On website B, users rank on a scale of 1 - 10, and decimal values (e.g., 7.5) are allowed. On website C, the ratings are from 1 to 100. You also have enough information to identify users/movies on one website with users/movies on a different website. Which of the following statements is true?
#
#
# A. It is not possible to combine these websites' data. You must build three separate recommendation systems.
#
# B. You can merge the three datasets into one, but you should first normalize each dataset separately by subtracting the mean and then dividing by (max - min) where the max and min (5-1) or (10-1) or (100-1) for the three websites respectively.
#
# C. You can combine all three training sets into one as long as your perform mean normalization and feature scaling after you merge the data.
#
# D. You can combine all three training sets into one without any modification and expect high performance from a recommendation system.
#
# 解答: B
#
# 做特征缩放。
#
# ### 4. Question 4
#
# Which of the following are true of collaborative filtering systems? Check all that apply.
#
# A. Even if each user has rated only a small fraction of all of your products (so r(i,j)=0 for the vast majority of (i,j) pairs), you can still build a recommender system by using collaborative filtering.
#
# B. For collaborative filtering, it is possible to use one of the advanced optimization algoirthms (L-BFGS/conjugate gradient/etc.) to solve for both the $x^{(i)}$'s and $\theta^{(j)}$'s simultaneously.
#
# C. For collaborative filtering, the optimization algorithm you should use is gradient descent. In particular, you cannot use more advanced optimization algorithms (L-BFGS/conjugate gradient/etc.) for collaborative filtering, since you have to solve for both the $x^{(i)}$'s and $\theta^{(j)}$'s simultaneously.
#
# D. Suppose you are writing a recommender system to predict a user's book preferences. In order to build such a system, you need that user to rate all the other books in your training set.
#
# 解答:A、B
#
#
#
# ### 5. Question 5
#
# Suppose you have two matrices A and B, where A is 5x3 and B is 3x5. Their product is C=AB, a 5x5 matrix. Furthermore, you have a 5x5 matrix R where every entry is 0 or 1. You want to find the sum of all elements C(i,j) for which the corresponding R(i,j) is 1, and ignore all elements C(i,j) where R(i,j)=0. One way to do so is the following code:
#
# 
#
# Which of the following pieces of Octave code will also correctly compute this total? Check all that apply. Assume all options are in code.
#
#
# A. $total = sum(sum((A * B) .* R))$
#
# B. $C = A * B; total = sum(sum(C(R == 1)))$;
#
# C. $C = (A * B) * R; total = sum(C(:))$;
#
# D. $total = sum(sum(A(R == 1) * B(R == 1))$;
#
#
# 解答:A、B
#
# ----------------------------------------------------------------------------------------------------------------
# > GitHub Repo:[Halfrost-Field](https://github.com/halfrost/Halfrost-Field)
# >
# > Follow: [halfrost · GitHub](https://github.com/halfrost)
# >
# > Source: [https://github.com/halfrost/Halfrost-Field/blob/master/contents/Machine\_Learning/Recommender\_Systems.ipynb](https://github.com/halfrost/Halfrost-Field/blob/master/contents/Machine_Learning/Recommender_Systems.ipynb)
| contents/Machine_Learning/Recommender_Systems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Differentiable Vietoris-Rips persistent homology
#
# In this example, we essentially reproduce the *toy experiment* from
#
# **Connectivity-Optimized Representation Learning via Persistent Homology**
# <NAME>, <NAME>, <NAME> and <NAME>
# ICML '19
# [Online](http://proceedings.mlr.press/v97/hofer19a.html)
#
# ## Notation
#
# - $S$ is a mini-batch of points $x \in \mathbb{R}^2$ of size $|S|=b$
# - $\dagger(S)$ is the set of death-times obtained from the VR PH of $S$
# - $\eta$ is the desired lifetime value (in our case $\eta=2$)
# - $\varepsilon_t, t \in \dagger(S)$ are the pairwise distance values of points in $S$
#
# ## Learning task
#
# Given a 2D point cloud (sampled from three Gaussians), find a mapping
# $f_\theta: \mathbb{R}^2 \to \mathbb{R}^2$ (implemented via a simple MLP) such that the
# *connectivity loss*
#
# $$L_\eta(S) = \sum_{t \in \dagger(S)} |\eta -\epsilon_t|$$
#
# is minimized over mini-batches of samples ($S$).
#
# %load_ext autoreload
# %autoreload 2
# +
# PyTorch imports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset, random_split, TensorDataset
# matplotlib imports
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# misc
from collections import defaultdict, Counter
from itertools import combinations
# imports from torchph
from torchph.pershom import vr_persistence_l1
def apply_model(model, dataset, batch_size=100, device='cpu'):
"""
Utility function which applies ``model`` to ``dataset``.
"""
dl = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
num_workers=0
)
X, Y = [], []
model.eval()
model.to(device)
with torch.no_grad():
for x, y in dl:
x = x.to(device)
x = model(x)
# If the model returns more than one tensor, e.g., encoder of an
# autoencoder model, take the first one as output...
if isinstance(x, (tuple, list)):
x = x[0]
X += x.cpu().tolist()
Y += (y.tolist())
return X, Y
# Run everything on the GPU
device = "cuda"
# -
# ## Toy data
#
# First, we create a toy dataset with 2D points sampled from three Gaussians with different means and covariances. In particular, we create a class which derives from `torch.utils.data.Dataset` so we can later conveniently use PyTorch's dataloader.
class Toy2DData(torch.utils.data.Dataset):
def __init__(self, n_samples_by_class):
super().__init__()
X = []
Y = []
self.mus = [
[4.0,4.0],
[3.0,3.0],
[0.0,0.5]
]
self.sigmas = [
[1.0,1.0],
[3.0,3.0],
[0.5,0.5]
]
for y, (m, s) in enumerate(zip(self.mus, self.sigmas)):
X_class = torch.randn((n_samples_by_class, 2))* torch.tensor(s) - torch.tensor(m)
X.append(X_class)
Y += n_samples_by_class*[0]
self.X = torch.cat(X, dim=0)
self.Y = Y
def __len__(self):
return len(self.Y)
def __getitem__(self, item):
return self.X[item], self.Y[item]
def __iter__(self):
return zip(self.X, self.Y)
# Let's sample 1,500 points from this dataset (500 per Gaussian) and visualize the configuration in $\mathbb{R}^2$.
# +
dataset = Toy2DData(500)
plt.figure(figsize=(5,5))
X = dataset.X.numpy()
plt.plot(X[:,0],
X[:,1], '.',markersize=2);
plt.xlabel('x')
plt.ylabel('y')
plt.title('Toy2DData');
# -
# ## Model & Optimization
#
# We implement our mapping $f_\theta$ as a simple MLP with three linear layers, interleaved with LeakyReLU activations. For optimization we use ADAM. We run over 30 epochs with a learning rate of $0.01$ and over 20 additional epochs with a learning rate of $0.001$.
# +
model = nn.Sequential(
nn.Linear(2, 10),
nn.LeakyReLU(),
nn.Linear(10, 10),
nn.LeakyReLU(),
nn.Linear(10, 2)
).to(device)
opt = torch.optim.Adam(
model.parameters(),
lr=0.01)
dl = DataLoader(
dataset,
batch_size=50,
shuffle=True,
drop_last=True)
# Get the transformed points at initialization
transformed_pts = [apply_model(model, dataset, device=device)[0]]
iteration_loss = []
for epoch_i in range(1, 51):
epoch_loss = 0
model.train()
# Learning rate schedule
if epoch_i == 20:
for param_group in opt.param_groups:
param_group['lr'] = 0.001
if epoch_i == 40:
for param_group in opt.param_groups:
param_group['lr'] = 0.0001
# Iterate over batches
for x, _ in dl:
x = x.to(device)
# Compute f_\theta(S)
x_hat = model(x)
"""
Loss computation (for \eta=2):
(1) Compute VR persistent homology (0-dim)
(2) Get lifetime values
(3) Compute connectivity loss
Note that all barcode elements are of the form (0,\varepsilon_t)!
"""
loss = 0
pers = vr_persistence_l1(x_hat, 0, 0)[0][0] # VR PH computation
pers = pers[:, 1] # get lifetimes
loss = (pers - 2.0).abs().sum() #
# Track loss over iterations and epochs
iteration_loss.append(loss.item())
epoch_loss += loss.item()
# Zero-grad, backprop, update!
opt.zero_grad()
loss.backward()
opt.step()
print('Epoch: {:2d} | Loss: {:.2f}'.format(epoch_i, epoch_loss/len(dl)), end='\r')
x_hat, _ = apply_model(
model,
dataset,
device=device)
transformed_pts.append(x_hat)
# -
# Visualize the loss over all iterations ...
plt.figure(figsize=(5,3))
plt.plot(iteration_loss)
plt.xlabel('#Batches');
plt.ylabel('Loss');
plt.grid();
# ## Visualization
#
# To study the effect of minimizing the connectivity loss, we freeze the model and check how the min/max/avg. lifetime changes over (1) epochs.
def track_persistence_info(points, batch_size, N):
ds = TensorDataset(
torch.tensor(points),
torch.tensor([0]*len(points)))
dl = DataLoader(
ds,
batch_size=batch_size,
shuffle=True,
drop_last=True)
stats = defaultdict(list)
for i in range(N):
for x,_ in dl:
x = x.to(device)
pers = vr_persistence_l1(x, 0, 0)[0][0]
pers = pers[:, 1]
stats['alpha'].append(pers.min().item())
stats['beta'].append(pers.max().item())
stats['avgeps'].append(pers.mean().item())
return stats
def visualize(transformed_pts, ax):
pts = np.array(transformed_pts)
x, y = pts[:,0], pts[:,1]
ax.plot(x, y, '.', **{'markersize':2, 'color':'black', 'alpha': 0.3})
stats = track_persistence_info(
pts,
50,
10)
ax.set_title(r'$\widehat{\alpha},\widehat{\varepsilon}, \widehat{\beta}$ = ' + '{:.2f}, {:.2f}, {:.2f}'.format(
np.mean(np.array(stats['alpha'])),
np.mean(np.array(stats['avgeps'])),
np.mean(np.array(stats['beta']))),
position=(0.04,0.02),
fontsize=12,
horizontalalignment='left',
bbox=dict(facecolor='white', alpha=0.7));
# From left to right: Initialization (epoch 0), after 5 epochs, after 50 epochs:
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
for i, epoch in enumerate([0, 5, 50]):
ax = axes[i]
visualize(transformed_pts[epoch], ax)
# **Note**: Observe how the $[\hat{\alpha}, \hat{\beta}]$ interval gets tighter throughout the epochs and $\hat{\varepsilon}$ gets closer to $\eta=2$. However, arranging batches of size 50 in the desired manner is impossible in $\mathbb{R}^2$ which is why the actual value of $2$ is never reached (for details see paper).
| docs_src/source/tutorials/ToyDiffVR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Exercise: transient channels (do this after you have done the lessons!)
# *This lesson has been written by <NAME> at the University of Edinburgh*
#
# *Last update 30/09/2021*
# This notebook contains an exercise to help you understand transient channels. You can attempt this after going through all the lessons. **THIS IS NOT THE ERODING LANDSCAPES ASSESSMENT**
#
# **Warning:** As of October 2021 the widgets are no longer working properly. So you might want to skip to the part that just has you running the code without widgets
#
# * We will be using the channel toy (see previous lessons) to look at the behaviour of a river in response to changing uplift.
# * We then will look at how this modifies the chi profile.
# * We will then look at a real channel network and let you draw some conclusions about the history of uplift in that location.
# ## Before we start: install and import some stuff
# !pip install channeltoy ipympl
# %matplotlib widget
# %matplotlib inline
import ipywidgets as widgets
from ipywidgets import interactive
import channeltoy as ct
import matplotlib.pyplot as plt
import numpy as np
# ## Part 1: Simple channel profile
# Let's create an initial channel and then change the uplift rate. Simply execute (shift-enter) the following cell and then play around with `K` and the `initial_U` (which is initial uplift in m/yr) to see how steep a channel is.
#
# Note: the script fixes the vertical axis. If it is too big or too small go into the cell and change the line that sets the limits:
#
# `ax.set_ylim([0, 2000])`
# +
plt.rcParams['figure.figsize'] = [10, 5]
def plot_channel_SS(K = 0.00005, initial_U = 0.0002, basin_length = 10000 ):
"""Remove old lines from plot and plot new one"""
#[l.remove() for l in ax.lines]
chan = ct.channeltoy(spacing=250, U = initial_U, K = K, n=1, m= 0.45, maximum_x = basin_length-999, X_0 = basin_length)
initial_elevation = chan.solve_steady_state_elevation()
x = chan.x_data
chi = chan.chi_data
#print(x)
#print(initial_elevation)
fig, ax = plt.subplots()
plt.plot(x, initial_elevation,label="Initial elevation")
plt.xlabel("Distance from outlet (m)")
plt.ylabel("Elevation (m)")
ax.set_ylim([0, 2000])
plt.legend()
# adjust the main plot to make room for the sliders
#plt.subplots_adjust(left=0.25, bottom=0.25)
interactive_plot = interactive(plot_channel_SS,
K=(0.000001, 0.0001, 0.000005),
initial_U=(0.0001, 0.001, 0.0001),
basin_length=(5000,100000,5000)
)
output = interactive_plot.children[1]
output.layout.height = '80px'
interactive_plot
# -
# ## Part 2: A transient channel
# This next bit of code sets up an interactive transient channel.
#
# * It starts with a channel profile in steady-state with the uplift rate `initial_U`.
# * You then increase the uplift rate to `new_U`. The whole landscape will begin to uplift faster.
# * A knickpoint will develop as the channel steepens until the erosion rate matches the new uplift rate. The knickpoint will move upslope as you increase the duration of this simulation (set by the `end_time`).
# * Play around with some of the parameters (`K`, `new_U`, etc) to see how fast the knickpoint moves!
#
# Again, you don't need to adjust anything in the code. Just click on the box and then shift-enter and it will give you an interactive plot.
# Under the hood is a numerical model so you will need to wait a little while each time you change a parameter.
# +
def plot_channel(K = 0.00005, initial_U = 0.0002, new_U = 0.0005 ,end_time = 50000,basin_length = 10000 ):
"""Remove old lines from plot and plot new one"""
#[l.remove() for l in ax.lines]
chan = ct.channeltoy(spacing=250, U = initial_U, K = K,
n=1, m= 0.45,
maximum_x = basin_length-999, X_0 = basin_length)
initial_elevation = chan.solve_steady_state_elevation()
x = chan.x_data
chi = chan.chi_data
# change the uplift rate
chan.set_U_values(U = new_U)
times, elevations = chan.transient_simulation(base_level = 0, dt = 200,
start_time = 0, end_time = end_time+1,
print_interval = end_time)
#print(times)
#print(elevations)
plt.plot(x, initial_elevation,label="Initial elevation")
plt.plot(x, elevations[-1],label = "Time is: "+str(times[-1]))
plt.xlabel("Distance from outlet (m)")
plt.ylabel("Elevation (m)")
plt.legend()
interactive_plot = interactive(plot_channel,
K=(0.000001, 0.0001, 0.000005),
initial_U=(0.0001, 0.001, 0.0001),
new_U=(0.0001, 0.001, 0.0001),
end_time =(10000, 500000, 10000),
basin_length=(5000,100000,5000))
output = interactive_plot.children[-1]
output.layout.height = '500px'
interactive_plot
# -
# ## Part 3: A transient channel in chi-elevation space
# We have explained in class and in some of the previous lessons that slope-area data has been used to see where channel steepness changes. But this kind of data can be quite noisy. See Lessons 5 and 6. It is easier to see where the knickpoint is using a chi transformation.
#
# The chi transformation basically squashes the channel at large drainage areas and stretches it at small drainage areas so that, in a steady state landscape, the chi-elevation profile becomes a straight line.
#
# In chi ($\chi$)-elevation space, the steeper the profile, the higher the steepness index.
#
# Use the chi profiles below to see where the knickpoint is. Hopefully you can see why this is a little bit easier than using the profiles.
# +
def plot_channel_chi(K = 0.00005, initial_U = 0.0002, new_U = 0.0005 ,end_time = 50000, basin_length = 10000):
"""Remove old lines from plot and plot new one"""
#[l.remove() for l in ax.lines]
chan = ct.channeltoy(spacing=100, U = initial_U, K = K, n=1, m= 0.45,
maximum_x = basin_length-999, X_0 = basin_length)
initial_elevation = chan.solve_steady_state_elevation()
x = chan.x_data
chi = chan.chi_data
# change the uplift rate
chan.set_U_values(U = new_U)
times, elevations = chan.transient_simulation(base_level = 0, dt = 200,
start_time = 0, end_time = end_time+1,
print_interval = end_time)
plt.plot(chi, initial_elevation,label="Initial elevation")
plt.plot(chi, elevations[-1],label = "Time is: "+str(times[-1]))
plt.xlabel("Chi ($\chi$) (m)")
plt.ylabel("Elevation (m)")
plt.legend()
return plt.gca()
interactive_plot = interactive(plot_channel_chi,
K=(0.000001, 0.0001, 0.000005),
initial_U=(0.0001, 0.001, 0.0001),
new_U=(0.0001, 0.001, 0.0001),
end_time =(10000, 500000, 10000),
basin_length=(5000,100000,5000))
output = interactive_plot.children[-1]
output.layout.height = '500px'
interactive_plot
# -
# ## Optional: showing the knickpoint without the interactive plot
# You could also plot the profile at a given time interval (instead of using the interactive plots) to see how fast the knickpoint moves, using the code below. You will need to change the parameters in the code:
#
# * `new_U`
# * `this_K`
# * etc.
#
# The profile will be plotted every `print_every_this_many_years`.
# +
##
## IF YOU WANT TO CHANGE THE PLOT
## Change these parameters
##
basin_length = 10000
initial_U = 0.0001
new_U = 0.0005
this_K = 0.00005
print_every_this_many_years = 10000
last_year_your_print_a_profile = 70000
do_you_want_to_plot_in_chi_space = False # True or False
# create a channel
chan = ct.channeltoy(spacing=50, U = initial_U, K = 0.00005, n=1, m= 0.45,maximum_x = basin_length-999, X_0 = basin_length)
initial_elevation = chan.solve_steady_state_elevation()
# change the uplift rate
chan.set_U_values(U = new_U)
# Run the transient simulation. You can use the start and end time to
times, elevations = chan.transient_simulation(base_level = 0, dt = 200,
start_time = 0, end_time = last_year_your_print_a_profile+1,
print_interval = print_every_this_many_years)
# Make a plot of the elevations
# If you set use_chi=True then you get the chi profiles.
chan.plot_transient_channel(times = times,
elevations = elevations,
initial_elevation = initial_elevation,
show_figure=True,print_to_file=False,use_chi = do_you_want_to_plot_in_chi_space)
# -
# ## Practise exercise
# #### **Exercise Part 2: Transience in this landscape**
#
# What happens if the uplift rate were to increase in this landscape? How long would it take the mountain range to adjust?
# Below are some broad questions. You can address a subset of these (see below).
#
# * Change the uplift rate: how long does it take for the knickpoint to move through the landscape (to the top of the river profile)? Does this time change if the uplift rate is even greater?
# * Change the erodibility coefficient K: how long does it take for the knickpoint to move through the landscape (to the top of the river profile)? Does this time change if you change K? What is the significance of this result?
# * Does the knickpoint migration rate change, as the knickpoint moves upstream, or is it moving at the same rate all along?
#
#
# **What you do for the exercise**:
#
# Prepare 2-4 figures to answer some of the questions above. Then write a few paragraphs about your findings. This should look like a small discussion section in a paper. Start with a few sentences explaining what you are simulating (that is, don't assume the reader already knows everything about knickpoints and channels). Then explain what simulations you performed (by simulations we just mean changing the parameters in the plots above), and then use the figures to explain what you found. We are looking for your ability to explain what you have done and your findings, so you really could focus on one of the above questions and still do well on the assignment. Answering all the above questions will probably result in a worse mark since you will not have the space to explain what you have done. Again, the figures should be in the format of a scientific paper: use figure captions instead of titles.
| Channel_incision/Exercise_01_transient_channels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # README
#
# ### Dashboard
#
# Install NCBI [Entrez Programming Utilities](https://www.ncbi.nlm.nih.gov/books/NBK179288/).
#
# ```bash
# sh -c "$(curl -fsSL ftp://ftp.ncbi.nlm.nih.gov/entrez/entrezdirect/install-edirect.sh)"
# ```
#
# Find all article ids matching a given query.
#
# ```bash
# ./esearch -db pubmed -query "gpcr" | ./elink -related | ./efetch -format uid
# ```
#
# Use [codejail](https://github.com/edx/codejail) to restrict the ability of users to do nefarious things from within their code.
| notebooks/00-README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import seaborn as sns
import numpy as np
import pandas as pd
import librosa.display
import glob
import IPython.display as ipd
import matplotlib.pyplot as plt
# %matplotlib inline
import random
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
train=pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
train.head()
test.head()
# ### Basic Exploratory Analysis
# Class distribution
plt.figure(figsize=(15,7))
sns.countplot(x="Class", data=train)
plt.show()
ipd.Audio('Train/0.wav')
data, sampling_rate = librosa.load('Train/0.wav')
plt.figure(figsize=(12, 4))
librosa.display.waveplot(data, sr=sampling_rate)
temp = []
data_dir=''
for name in train.ID:
file_name = os.path.join(os.path.abspath(data_dir), 'Train', str(name) + '.wav')
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# we extract mfcc feature from data
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
temp.append(mfccs)
train_X = np.stack(temp)
train_X
temp = []
data_dir=''
for name in test.ID:
file_name = os.path.join(os.path.abspath(data_dir), 'Test', str(name) + '.wav')
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# we extract mfcc feature from data
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
temp.append(mfccs)
test_X = np.stack(temp)
lb = LabelEncoder()
train_y = lb.fit_transform(train.Class)
train_y = to_categorical(train_y)
# ## CNN
# Feature Scaling
from sklearn.preprocessing import StandardScaler
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.utils import np_utils
from keras.optimizers import RMSprop
from keras.utils import np_utils
sc = StandardScaler()
train_X = sc.fit_transform(train_X)
test_X = sc.transform(test_X)
# reshape to be [samples][pixels][width][height]
train_X = train_X.reshape(train_X.shape[0], 8, 5,1).astype('float32')
test_X = test_X.reshape(test_X.shape[0], 8, 5,1).astype('float32')
# define the larger model
def larger_model():
# create model
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (8,5,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# build the model
model = larger_model()
# Fit the model
model.fit(train_X, train_y, epochs=20)
y_pred=model.predict_classes(test_X)
y_pred
y_pred=pd.DataFrame(lb.inverse_transform(y_pred))
y_pred
t=pd.read_csv('test.csv')
y_pred['Class']=pd.DataFrame(y_pred)
output=pd.concat([y_pred,t['ID']],axis=1)
output.drop(0,axis=1,inplace=True)
output.to_csv('CNN_output.csv',index=False)
| Urban Sound Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: astropy_tutorials
# language: python
# name: astropy_tutorials
# ---
# # Fazer um plot com o redshift e a idade de universo como eixos usando astropy.cosmology
#
# ## Autores
# <NAME>, <NAME>
#
# ## Tradução
# <NAME>
#
# ## Objetivos
# * Plotar relações usando 'matplotlib'
# * Adicionar um segundo eixo em um plot do 'matplotlib'
# * Relacionar distância, redshift e idade para dois diferentes tipos de cosmologia usando 'astropy.cosmology'
#
# ## Palavras-Chave
# unidades, física, cosmologia, matplotlib
#
# ## Sumário
#
# Cada redshift corresponde a uma idade do universo, então quando se plota uma quantidade em função do redshift, o gráfico resultante pode ser usado também para indicar a idade do universo. A relação entre os dois depende do tipo de cosmologia que se está assumindo, e é nesse ponto que o 'astropy.cosmology' pode ser utilizado. Nesse tutorial nós vamos mostrar como usar as ferramentas no 'astropy.cosmology' para criar um plot desse tipo:
# +
# Inicializar o matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# -
from IPython.display import Image
Image(filename="ang_dist.png", width=500)
# Nós começamos com um objeto cosmologia (variável cosmo). Vamos criar uma cosmologia plana (o que significa que a densidade de curvatura $\Omega_k=0$) com um parâmetro de Hubble igual a $70$ km/s/Mpc e uma densidade de matéria de $\Omega_M=0.3$ para o redshift 0. A partir disso, a cosmologia `FlatLambdaCDM` calcula automaticamente que o valor da densidade de energia escura precisa ser $\Omega_\Lambda=0.7$, pois $\Omega_M + \Omega_\Lambda + \Omega_k = 1$.
# +
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
# Nesse caso nós só precisamos definir a densidade de matéria
# e o parâmetro de hubble para z=0 (a função FlatLambdaCDM já
# considera que omega_k=0)
# Note que a a unidade padrão para o parâmetro de Hubble é
# km/s/Mpc. Mesmo assim, vamos passar um objeto 'Quantidade'
# com as unidades especificadas
cosmo = FlatLambdaCDM(H0=70*u.km/u.s/u.Mpc, Om0=0.3)
# -
# Note que nós poderíamos ter usado também uma das outras cosmologias inclusas no astropy.cosmology, como a 'WMAP9' ou a 'Planck13'.
#
# Agora, precisamos de uma certa quantidade para plotar em função do redshift. Vamos usar a distância do diâmetro angular, que é a distância física transversal (o tamanho de uma galáxia, por exemplo) correpondente a uma certa separação angular do céu. Para calcular a distância do diâmetro angular para um intervalo de redshifts:
import numpy as np
zvals = np.arange(0, 6, 0.1)
dist = cosmo.angular_diameter_distance(zvals)
# Note que nós passamos um array de redshifts para 'cosmo.angular_diameter_distances', e isso produziu um array de valores de distância, um para cada redshift. Vamos então plotá-los:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
# Para checar a unidades da distância do diâmetro angular, olhamos o seu atributo unit:
dist.unit
# Vamos agora criar um array com algumas idades que irão aparecer no eixo superior do gráfico. Escolhemos uma série de valores de idade, correspondendo aos lugares onde queremos colocar os ticks. Talvez seja necessário que você ajuste os valores abaixo a depender do seu range de redshifts para conseguir ticks espaçados igualmente.
ages = np.array([13, 10, 8, 6, 5, 4, 3, 2, 1.5, 1.2, 1])*u.Gyr
# Para linkar o eixo dos redshifts com o das idades, temos que encontrar o redshift correspondente a cada uma das idades do nosso array, e a função 'z_at_value' faz exatamente isso.
from astropy.cosmology import z_at_value
ageticks = [z_at_value(cosmo.age, age) for age in ages]
# Agora fazemos o segundo eixo, e definimos as posições dos seus ticks (usando '.set_xticks') usando o array ageticks.
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
# Agora já temos os ticks no eixo superior nas posições corretas, mas os valores de suas labels são os redshifts, e não as idades. Para corrigir isso, podemos definir suas labels manualmente.
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
# Precisamos então garantir que os dois eixos tenham os mesmos limites de redshift. No plot acima eles podem não estar devidamente alinhados, a depender do seu setup (por exemplo, a idade do universo deve ser ~13 Gyr para z=0).
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
zmin, zmax = 0.0, 5.9
ax.set_xlim(zmin, zmax)
ax2.set_xlim(zmin, zmax)
# Quase lá. Só precisamos agora adicionar os títulos dos eixos e os ticks menores. Vamos ajustar também os limites do eixo y para evitar que suas labels fiquem muito próximas do topo do plot.
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist)
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
zmin, zmax = 0, 5.9
ax.set_xlim(zmin, zmax)
ax2.set_xlim(zmin, zmax)
ax2.set_xlabel('Time since Big Bang (Gyr)')
ax.set_xlabel('Redshift')
ax.set_ylabel('Angular diameter distance (Mpc)')
ax.set_ylim(0, 1890)
ax.minorticks_on()
# Para comparação, vamos adicionar a distância do diâmetro angular de uma cosmologia diferente, derivada dos resultados de 2013 do Planck. E finalmente, salvamos a figura em um arquivo png.
# +
from astropy.cosmology import Planck13
dist2 = Planck13.angular_diameter_distance(zvals)
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(zvals, dist2, label='Planck 2013')
ax.plot(zvals, dist, label=
'$h=0.7,\ \Omega_M=0.3,\ \Omega_\Lambda=0.7$')
ax.legend(frameon=0, loc='lower right')
ax2 = ax.twiny()
ax2.set_xticks(ageticks)
ax2.set_xticklabels(['{:g}'.format(age) for age in ages.value])
zmin, zmax = 0.0, 5.9
ax.set_xlim(zmin, zmax)
ax2.set_xlim(zmin, zmax)
ax2.set_xlabel('Time since Big Bang (Gyr)')
ax.set_xlabel('Redshift')
ax.set_ylabel('Angular diameter distance (Mpc)')
ax.minorticks_on()
ax.set_ylim(0, 1890)
fig.savefig('ang_dist.png', dpi=200, bbox_inches='tight')
# -
# `bbox_inches='tight'` remove automaticamente qualquer espaço em branco ao redor das margens do plot.
#
# E terminamos!
# ## Exercício
# Bem, quase terminamos. Note que nós calculamos os tempos no eixo superior usando a cosmologia original, e não a nova cosmologia baseada nos resultados de 2013 do Planck. Por isso, tecnicamente o eixo superior só pode ser utilizado pela cosmologia original, apesar de a diferença entre as duas ser pequena. Como exercício, você pode tentar plotar dois eixos superiores diferentes (com uma pequena separação entre os dois), para demonstrar os tempos correspondentes a cada cosmologia. Dê uma olhada na primeira resposta [dessa pergunta no Stack Overflow](http://stackoverflow.com/questions/7733693/matplotlib-overlay-plots-with-different-scales) que tem algumas dicas de como fazer isso.
| redshift-plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WbdSBVjN_LS1"
# # Save and Store Features
# In this notebook we will compute all prediction and store the relative features in drive using the model computed in the notebook "ResNet50".
#
# *Note*: the features related to the simple feature extraction model are already computed in the notebook "ResNet50", thus they won't again be computed here.
# + id="fOEEsshaASKY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643386358392, "user_tz": -60, "elapsed": 24963, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="e1269398-eaa9-4a53-be82-0dad30b6c8a4"
from google.colab import drive
drive.mount('/content/drive')
# + id="768ZK3lEBFKZ" executionInfo={"status": "ok", "timestamp": 1643386370184, "user_tz": -60, "elapsed": 2936, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
import tensorflow as tf
from tensorflow import keras as ks
from tensorflow.keras import layers
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras import regularizers
import pathlib
import matplotlib.pyplot as plt
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 73, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} executionInfo={"elapsed": 8404, "status": "ok", "timestamp": 1643386378584, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}, "user_tz": -60} id="_B3mi1RqblLY" outputId="9f68bf0f-1e0b-4fed-a08e-5b3017293a46"
# ! pip install -q kaggle
from google.colab import files
_ = files.upload()
# ! mkdir -p ~/.kaggle
# ! cp kaggle.json ~/.kaggle/
# ! chmod 600 ~/.kaggle/kaggle.json
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 12553, "status": "ok", "timestamp": 1643386395251, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}, "user_tz": -60} id="VoWnE3oCb1eG" outputId="a1e20e34-667a-439a-c2b5-9255b3c60d7c"
# ! kaggle datasets download -d gpiosenka/100-bird-species
# + colab={"base_uri": "https://localhost:8080/"} id="-tdH48JqcIDE" outputId="666cae5c-ed4d-4e8b-8bc3-2f57d4d3cc4b" executionInfo={"status": "ok", "timestamp": 1643386419497, "user_tz": -60, "elapsed": 21194, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
# !unzip 100-bird-species.zip
# + [markdown] id="SMGEEWo0_2pg"
# ## Create the different sets
# In this section the training set, the test set and the discrimator sets are computed in order to extract the features from them
# + id="_jrjDHV9-xFI" executionInfo={"status": "ok", "timestamp": 1643386484042, "user_tz": -60, "elapsed": 235, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
TRAIN_DIR = 'train/'
VALID_DIR = 'valid/'
TEST_DIR = 'test/'
DISTRACTOR_DIR = 'mirflickr'
BATCH_SIZE = 128
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
RANDOM_SEED = 42
# + [markdown] id="Kns-kYL_dslj"
# Distractor path:
# + id="G1X7pqSOduoD" executionInfo={"status": "ok", "timestamp": 1643386596818, "user_tz": -60, "elapsed": 111439, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
# !unzip -q '/content/drive/My Drive/CV_Birds/mirflickr.zip' -d '/content'
# + [markdown] id="sSl3bW9BdvII"
# Create sets:
# + id="zSs64XNtAAaX" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643368549784, "user_tz": -60, "elapsed": 5004, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="3a785598-2e21-499c-c1bf-30e1b447a081"
training_images = tf.keras.preprocessing.image_dataset_from_directory(
TRAIN_DIR, labels='inferred', label_mode='categorical',
class_names=None, color_mode='rgb', batch_size=BATCH_SIZE, image_size=(IMAGE_HEIGHT,
IMAGE_WIDTH), shuffle=False, seed=RANDOM_SEED, interpolation='bilinear')
test_images = tf.keras.preprocessing.image_dataset_from_directory(
TEST_DIR, labels='inferred', label_mode='categorical',
class_names=None, color_mode='rgb', batch_size=BATCH_SIZE, image_size=(IMAGE_HEIGHT,
IMAGE_WIDTH), shuffle=False, seed=RANDOM_SEED, interpolation='bilinear')
# + colab={"base_uri": "https://localhost:8080/"} id="-Ib-8nmbcsfZ" executionInfo={"status": "ok", "timestamp": 1643386604875, "user_tz": -60, "elapsed": 6336, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="b53c1bc5-e9e6-4fb5-d4ac-c545227776f2"
distractor_images = tf.keras.preprocessing.image_dataset_from_directory(
DISTRACTOR_DIR,
image_size = (IMAGE_HEIGHT, IMAGE_WIDTH),
batch_size = BATCH_SIZE,
seed=RANDOM_SEED,
labels=None,
label_mode=None)
# + [markdown] id="tiaw9QlvF0Di"
# ## Model 1
# Load the model from drive:
# + id="KIH-VSPCF0Dj" executionInfo={"status": "ok", "timestamp": 1643387103416, "user_tz": -60, "elapsed": 3755, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model1.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} id="S_i7Di5-bJ_D" executionInfo={"status": "ok", "timestamp": 1643303702087, "user_tz": -60, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="e8934e4d-245a-4a0d-8a32-baf664a5b2ed"
model.summary()
# + id="buMzVaHqbcDi" executionInfo={"status": "ok", "timestamp": 1643387103761, "user_tz": -60, "elapsed": 348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} id="XgU19qq0fao1" executionInfo={"status": "ok", "timestamp": 1643303702510, "user_tz": -60, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="e2ebd367-9ae7-492b-8851-a1686c1469d2"
a.summary()
# + [markdown] id="obhgZHk5F0Dj"
# Predict features for training set and save them:
# + id="599cWVnhF0Dk" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643303965780, "user_tz": -60, "elapsed": 263275, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="b0fc9bd5-7a87-4a31-8551-fc6b5c03c057"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="8wC29uGmF0Dk"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model1_train_features.npy', features_model)
# + [markdown] id="Eyz9_Ww5F0Dk"
# Predict features for test set and save them:
# + id="2vOD2g7KF0Dk" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643304080016, "user_tz": -60, "elapsed": 11865, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="4a05baba-c393-454b-8b8d-260800e6e10b"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="_GkPaYYJF0Dk"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model1_test_features.npy', features_model)
# + [markdown] id="Xnwv2XZrF0Dl"
# Predict features for the distractor and save them
# + id="Sroc6txMF0Dl" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387235092, "user_tz": -60, "elapsed": 128023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="a1bc1d3d-7f8c-4e62-ac4c-0e3770bf8e05"
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="3rAelSeRF0Dl" executionInfo={"status": "ok", "timestamp": 1643387235800, "user_tz": -60, "elapsed": 712, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model1_distractor_features.npy', features_model)
# + [markdown] id="YblEBZqNjqfy"
# ## Model 2
# Load the model from drive:
# + id="72Reuutkjqf1" executionInfo={"status": "ok", "timestamp": 1643387239299, "user_tz": -60, "elapsed": 3500, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model2.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643386856251, "user_tz": -60, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="c6376608-8c74-4361-f1c3-0914ae87ffa4" id="lLlo70Ovjqf2"
model.summary()
# + id="8cIySEGpjqf3" executionInfo={"status": "ok", "timestamp": 1643387239300, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643386856627, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="f2f8c271-8679-46f5-f9a8-96507ad61a1f" id="7viI_8MSjqf3"
a.summary()
# + [markdown] id="Ww7dtoXojqf3"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643304432097, "user_tz": -60, "elapsed": 247401, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="14aa659b-0661-4a43-b180-21acc1b0e053" id="f0wqJFjsjqf4"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="z6151Ut0jqf4"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model2_train_features.npy', features_model)
# + [markdown] id="5FtDBTpYjqf4"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643304444264, "user_tz": -60, "elapsed": 10304, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="a9543955-3f44-4b16-be1c-b481a8b21c2c" id="KknjPS9ljqf4"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="HcNAHf6wjqf5"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model2_test_features.npy', features_model)
# + [markdown] id="SHR1JRd4jqf5"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="49bdc896-e736-42f2-9168-d941ff3d1ae4" id="gQNwThAMe6_r" executionInfo={"status": "ok", "timestamp": 1643387368216, "user_tz": -60, "elapsed": 127620, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="rmCe7_V4e6_s" executionInfo={"status": "ok", "timestamp": 1643387369211, "user_tz": -60, "elapsed": 999, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model2_distractor_features.npy', features_model)
# + [markdown] id="PGIKNMo5kAeE"
# ## Model 3
# Load the model from drive:
# + id="ZyJxuQj2kAeE" executionInfo={"status": "ok", "timestamp": 1643387376298, "user_tz": -60, "elapsed": 7089, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model3.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387376298, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="e7f64b5e-f06f-4e8a-a008-2e7b8cb8fe31" id="mhTa5XOqkAeF"
model.summary()
# + id="rBiUUqtMkAeF" executionInfo={"status": "ok", "timestamp": 1643387376899, "user_tz": -60, "elapsed": 604, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387376900, "user_tz": -60, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="942f936f-7255-4333-ea7e-e21cacf0721d" id="j2bp2dX8kAeG"
a.summary()
# + [markdown] id="68ARb_HekAeG"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643304766086, "user_tz": -60, "elapsed": 246879, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="41b257df-6e2b-46ee-bead-935126aad3eb" id="sb9l53_JkAeG"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="92eo3ExZkAeG"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model3_train_features.npy', features_model)
# + [markdown] id="dxG9sRgikAeH"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643304776783, "user_tz": -60, "elapsed": 9096, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="b2ecbfb5-7af7-4608-e217-5af77891516c" id="zO9PlwFJkAeH"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="6IT0_mDckAeH"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model3_test_features.npy', features_model)
# + [markdown] id="N0eNEEm_kAeH"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="cc54eca4-6a21-4311-8440-4a2c2e9c05cd" id="LduuGJh7fBw1" executionInfo={"status": "ok", "timestamp": 1643387506032, "user_tz": -60, "elapsed": 129136, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="NbLkfXRTfBw2" executionInfo={"status": "ok", "timestamp": 1643387506873, "user_tz": -60, "elapsed": 845, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model3_distractor_features.npy', features_model)
# + [markdown] id="Nv4nZ-GAkHsF"
# ## Model 4
# Load the model from drive:
# + id="sxqkBLIBkHsG" executionInfo={"status": "ok", "timestamp": 1643387515198, "user_tz": -60, "elapsed": 8327, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model4.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387515199, "user_tz": -60, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="2551d798-1416-4424-a86d-292398e56b5e" id="M4bwdhj6kHsG"
model.summary()
# + id="R-v6GF9rkHsH" executionInfo={"status": "ok", "timestamp": 1643387515850, "user_tz": -60, "elapsed": 655, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387515850, "user_tz": -60, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="28525c75-6484-4d11-e470-02186e3b4d25" id="SUc2EWfkkHsH"
a.summary()
# + [markdown] id="cUOeiZXLkHsH"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305029605, "user_tz": -60, "elapsed": 247637, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="67b0646e-d44b-4936-c3d9-ad2a27be35b5" id="d5Gxk0sDkHsH"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="Y0c_OxAkkHsI"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model4_train_features.npy', features_model)
# + [markdown] id="H3x_VD3CkHsI"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305039742, "user_tz": -60, "elapsed": 9109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="e0db5384-f501-49f3-ca33-b507ff079a2c" id="jit4jPvbkHsI"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="DzEw4QD2kHsI"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model4_test_features.npy', features_model)
# + [markdown] id="XN_H94yjkHsJ"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="e34a088d-a6a3-4c35-c092-38135032b73b" id="bguKj8sefL0S" executionInfo={"status": "ok", "timestamp": 1643387643524, "user_tz": -60, "elapsed": 127677, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="Uim09Sc1fL0U" executionInfo={"status": "ok", "timestamp": 1643387643894, "user_tz": -60, "elapsed": 373, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model4_distractor_features.npy', features_model)
# + [markdown] id="IZD534QIkTyh"
# ## Model 9
# Load the model from drive:
# + id="b9P9F03AkTyi" executionInfo={"status": "ok", "timestamp": 1643387651699, "user_tz": -60, "elapsed": 7806, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model9.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387651700, "user_tz": -60, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="2f4f62c4-d9b4-40cc-f94d-b6c2267d1b35" id="vXoi2kGLkTyi"
model.summary()
# + id="aya5YHQbkTyj" executionInfo={"status": "ok", "timestamp": 1643387652553, "user_tz": -60, "elapsed": 856, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643387652553, "user_tz": -60, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="3339899a-5930-412f-8002-1381fd5c58a9" id="Uizj-43BkTyj"
a.summary()
# + [markdown] id="kFxGYYuekTyj"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305295818, "user_tz": -60, "elapsed": 248162, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="6786cfb5-9ee8-4e5c-8d02-35702d07aa46" id="MebbgEGokTyj"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="U_WmzfeikTyj"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model9_train_features.npy', features_model)
# + [markdown] id="6M4QOSJekTyk"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305305919, "user_tz": -60, "elapsed": 8291, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="ea0e9139-677a-4087-db7f-516d0e19be72" id="MchSSaTgkTyk"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="oqEVWtVJkTyk"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model9_test_features.npy', features_model)
# + [markdown] id="W31nIewtkTyk"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="be222e9a-89b4-48d1-f54c-4314ef563a92" id="woED7AU6foOh" executionInfo={"status": "ok", "timestamp": 1643387780506, "user_tz": -60, "elapsed": 127957, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="Dbp5n2v1foOi" executionInfo={"status": "ok", "timestamp": 1643387781517, "user_tz": -60, "elapsed": 1014, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model9_distractor_features.npy', features_model)
# + [markdown] id="-R6qG88skgis"
# ## Model 10
# Load the model from drive:
# + id="tw7coul0kgit" executionInfo={"status": "ok", "timestamp": 1643387788869, "user_tz": -60, "elapsed": 7355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model10.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305314021, "user_tz": -60, "elapsed": 21, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="63ce1a59-975c-414e-9cdc-3c98698e8b39" id="egA0Y0hqkgit"
model.summary()
# + id="VomIU6H0kgiu" executionInfo={"status": "ok", "timestamp": 1643387788870, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305314723, "user_tz": -60, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="990cc266-aff4-42ed-e08e-40df7da8b0f8" id="LVkZluvIkgiu"
a.summary()
# + [markdown] id="1RXsNMEZkgiu"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305560425, "user_tz": -60, "elapsed": 245707, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="606ac191-0eda-4d5a-d0d5-fc57b85a8255" id="bm3USwJQkgiu"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="xhv_UXkQkgiv"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model10_train_features.npy', features_model)
# + [markdown] id="Orvcp0dPkgiv"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305570565, "user_tz": -60, "elapsed": 8588, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="bf0a534b-04ba-41a9-9853-50f216fb1d58" id="-pWLyDwukgiv"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="JTJriH8kkgiv"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model10_test_features.npy', features_model)
# + [markdown] id="dJnE5Z0ekgiw"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="f72f0904-4b3a-49e1-8eef-7e065155e7be" id="yfGEBdtXf4Wq" executionInfo={"status": "ok", "timestamp": 1643387918136, "user_tz": -60, "elapsed": 129270, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="Bd5hR8UZf4Wr" executionInfo={"status": "ok", "timestamp": 1643387918868, "user_tz": -60, "elapsed": 736, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model10_distractor_features.npy', features_model)
# + [markdown] id="BHlL9DVvkpZ4"
# ## Model 11
# Load the model from drive:
# + id="3Ss1tqS4kpZ5" executionInfo={"status": "ok", "timestamp": 1643387926154, "user_tz": -60, "elapsed": 7289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet50v2/model11.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305579100, "user_tz": -60, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="e1bd284b-1c61-432d-be73-e2b56568ba06" id="4TmUZgQYkpZ6"
model.summary()
# + id="ALlqnWZJkpZ6" executionInfo={"status": "ok", "timestamp": 1643387926521, "user_tz": -60, "elapsed": 370, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305579491, "user_tz": -60, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="9f6fa1fd-50f7-48e7-f3b3-f5e2521502de" id="1HRnuXV0kpZ6"
a.summary()
# + [markdown] id="sHpQhN9dkpZ7"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305825515, "user_tz": -60, "elapsed": 246028, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="13f8b5f5-6260-4a4c-eaee-5e9497a302c2" id="IPvZ5ciCkpZ7"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="f_xI_BWykpZ7"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet50v2/model11_train_features.npy', features_model)
# + [markdown] id="16f6JEpqkpZ7"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643305835501, "user_tz": -60, "elapsed": 8392, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="020b629d-81f9-4c43-d5ea-bd20fb68936a" id="tRaPaq1ikpZ8"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="tougrDHKkpZ8"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet50v2/model11_test_features.npy', features_model)
# + [markdown] id="7FKZdx1FkpZ8"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="96e35884-4614-4f46-a91d-8f78e009a18c" id="YB70rEmGf8cC" executionInfo={"status": "ok", "timestamp": 1643388055185, "user_tz": -60, "elapsed": 128667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="fwhBTLT2f8cD" executionInfo={"status": "ok", "timestamp": 1643388055961, "user_tz": -60, "elapsed": 783, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet50v2/model11_distractor_features.npy', features_model)
# + [markdown] id="GLDCdFZ5UCJQ"
# # Resnet 101
# + [markdown] id="7e5tCU-qUXEc"
# ## Model 1
# Load the model from drive:
# + id="JsrnC978UXEd" executionInfo={"status": "ok", "timestamp": 1643388066903, "user_tz": -60, "elapsed": 10945, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet101v2/resNet101_model1.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643368559828, "user_tz": -60, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="939dd36f-bee0-4c5c-b906-8999a39aa066" id="L87SVO8bUXEd"
model.summary()
# + id="6X34v9pPUXEe" executionInfo={"status": "ok", "timestamp": 1643388067649, "user_tz": -60, "elapsed": 750, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643368561303, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="4df3f7de-50b0-4342-e3e3-64c2ef9f35e4" id="B14UolWnUXEe"
a.summary()
# + [markdown] id="fZTXTVFuUXEe"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643369041231, "user_tz": -60, "elapsed": 458343, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="64a51aa4-7459-454a-8816-da9edba64655" id="TjQMJf5jUXEf"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="UKxAt2ZEUXEf"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet101v2/model1_train_features.npy', features_model)
# + [markdown] id="IuGW33ICUXEf"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643369061694, "user_tz": -60, "elapsed": 18021, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="0bd41b24-a19a-4df8-9a5d-2cca601c4a84" id="DXaDXIXhUXEf"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="maNeOVd8UXEf"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet101v2/model1_test_features.npy', features_model)
# + [markdown] id="nweMTfZWUXEg"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="173939c3-9812-424b-abe0-7462aa4ab84f" id="HJAUbCA0gCN3" executionInfo={"status": "ok", "timestamp": 1643388331941, "user_tz": -60, "elapsed": 264296, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="yI0G0aP0gCN4" executionInfo={"status": "ok", "timestamp": 1643388332925, "user_tz": -60, "elapsed": 986, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet101v2/model1_distractor_features.npy', features_model)
# + [markdown] id="JT3Q7bRgUXEg"
# ## Model 2
# Load the model from drive:
# + id="kJFdOm02UXEg" executionInfo={"status": "ok", "timestamp": 1643388344892, "user_tz": -60, "elapsed": 11969, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
MODEL_PATH = '/content/drive/MyDrive/CV_Birds/models/ResNet101v2/resNet101_model2.keras'
model = ks.models.load_model(MODEL_PATH)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643369073876, "user_tz": -60, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="2ce6dd36-347c-4a0a-9e20-38d66875d559" id="hqEX_jkcUXEg"
model.summary()
# + id="Iym7XZLMUXEh" executionInfo={"status": "ok", "timestamp": 1643388346398, "user_tz": -60, "elapsed": 1520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
a = ks.models.Sequential(model.layers[:2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643369075625, "user_tz": -60, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="1dffad9c-f3a3-45b3-de7c-5b28f64d3ec3" id="1hscnCvrUXEh"
a.summary()
# + [markdown] id="HogIFJyyUXEh"
# Predict features for training set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643369520245, "user_tz": -60, "elapsed": 444623, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="f97ce364-4056-4c04-fad8-fc90bf520b42" id="vunLXc4RUXEh"
features_model = a.predict(training_images, batch_size=BATCH_SIZE, verbose=True)
# + id="0_wnr4ilUXEi"
np.save('/content/drive/MyDrive/CV_Birds/features/training/ResNet101v2/model2_train_features.npy', features_model)
# + [markdown] id="EW37sxsnUXEi"
# Predict features for test set and save them:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1643369537349, "user_tz": -60, "elapsed": 16055, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}} outputId="fae35931-8ba0-4f2f-b308-d2212eec12a9" id="90BOzh-3UXEi"
features_model = a.predict(test_images, batch_size=BATCH_SIZE, verbose=True)
# + id="kk8OR04qUXEi"
np.save('/content/drive/MyDrive/CV_Birds/features/test/ResNet101v2/model2_test_features.npy', features_model)
# + [markdown] id="rIe-HDhgUXEi"
# Predict features for the distractor and save them
# + colab={"base_uri": "https://localhost:8080/"} outputId="f2712260-edae-4125-fd94-438d2b776546" id="lVBVTD6_gHUz" executionInfo={"status": "ok", "timestamp": 1643388570004, "user_tz": -60, "elapsed": 223612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
features_model = a.predict(distractor_images, batch_size=BATCH_SIZE, verbose=True)
# + id="tqJjYg-ugHU0" executionInfo={"status": "ok", "timestamp": 1643388570742, "user_tz": -60, "elapsed": 743, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "11023693490829624613"}}
np.save('/content/drive/MyDrive/CV_Birds/features/distractor/ResNet101v2/model2_distractor_features.npy', features_model)
| Notebooks/Training/Features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import quandl
mydata = quandl.get('EIA/PET_RWTC_D')
import matplotlib.pyplot as plt
# %matplotlib inline
mydata.plot()
mydata
real_estate = quandl.get('ZILLOW/N2544_TURNAH')
real_estate
mydata1 = quandl.get('WIKI/AAPL')
mydata1.head()
mydata1 = quandl.get('WIKI/AAPL.1')
mydata1.head()
| 06-Data-Sources/Quandl.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Julia 超新手教學 II
#
# **by 杜岳華**
# + [markdown] slideshow={"slide_type": "slide"}
# # Outline
#
# * Collections
# * String and Operators
# * Functions
# * Types
# + [markdown] slideshow={"slide_type": "slide"}
# # Collections
# -
# 同類型的變數不只有一個怎麼辦?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Arrays
# -
# 在程式語言當中最基本的集合或是資料結構
# + [markdown] slideshow={"slide_type": "slide"}
# ### Create an array
# + slideshow={"slide_type": "fragment"}
x = []
# + [markdown] slideshow={"slide_type": "fragment"}
# Homogeneous: 同質性,Array中只能放入屬於同一型別的物件
# + slideshow={"slide_type": "fragment"}
Any[]
# + slideshow={"slide_type": "fragment"}
Int64[]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Type inference on array
# + slideshow={"slide_type": "fragment"}
x = [1, 2, 3]
# + slideshow={"slide_type": "fragment"}
x = [1, 1.2]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Specified array type
# + slideshow={"slide_type": "fragment"}
Int8[1, 2, 3, 4]
# + slideshow={"slide_type": "fragment"}
Array{Int8, 1}(5) # 尚未初始化
# + [markdown] slideshow={"slide_type": "slide"}
# ### Indexing
# -
# Index starts from 1.
#
# `☐ ☐ ☐`
#
# `1 2 3`
# + slideshow={"slide_type": "fragment"}
x
# + slideshow={"slide_type": "fragment"}
x[1]
# + slideshow={"slide_type": "fragment"}
x[2]
# + slideshow={"slide_type": "fragment"}
length(x)
# + slideshow={"slide_type": "slide"}
x = [6.0, 3.2, 7.6, 0.9, 2.3]
# + slideshow={"slide_type": "fragment"}
x[1:2]
# + slideshow={"slide_type": "fragment"}
x[3:end]
# + slideshow={"slide_type": "slide"}
x[1:2:end]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Assign value
# + slideshow={"slide_type": "fragment"}
x[2] = 7.5
# + slideshow={"slide_type": "fragment"}
x
# + [markdown] slideshow={"slide_type": "slide"}
# ### Useful operations
# + slideshow={"slide_type": "fragment"}
push!(x, 9.0)
# + slideshow={"slide_type": "slide"}
y = [10.0, 3.4]
append!(x, y)
# + slideshow={"slide_type": "fragment"}
x
# + slideshow={"slide_type": "slide"}
pop!(x)
# + slideshow={"slide_type": "fragment"}
x
# + slideshow={"slide_type": "slide"}
popfirst!(x)
# + slideshow={"slide_type": "fragment"}
x
# + slideshow={"slide_type": "slide"}
pushfirst!(x, 6.0)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Random array
# -
x = rand(5)
# + slideshow={"slide_type": "fragment"}
sort(x)
# + slideshow={"slide_type": "fragment"}
x
# + slideshow={"slide_type": "slide"}
sort!(x)
# + slideshow={"slide_type": "fragment"}
x
# + [markdown] slideshow={"slide_type": "slide"}
# ### 由大到小
# -
sort(x, rev=true)
# + [markdown] slideshow={"slide_type": "slide"}
# ### 依絕對值大小排序
# + slideshow={"slide_type": "fragment"}
x = randn(10)
# + slideshow={"slide_type": "fragment"}
sort(x, by=abs)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Iteration
# -
for i in x
println(i)
end
# + [markdown] slideshow={"slide_type": "slide"}
# #### Quiz 1
# + [markdown] slideshow={"slide_type": "-"}
# 請造出一個陣列,當中的數值是均勻分佈,從-345到957.6
# -
# 提示: $\LARGE y = \frac{x - min(x)}{max(x) - min(x)}$
# + [markdown] slideshow={"slide_type": "slide"}
# 其中一個答案
# -
(957.6 - (-345)) * rand(10) .+ (-345)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Quiz 2
# + [markdown] slideshow={"slide_type": "-"}
# 請造出一個陣列,當中的數值是服從常態分佈
# + [markdown] slideshow={"slide_type": "slide"}
# 其中一個答案
# -
randn(10)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Quiz 3
# + [markdown] slideshow={"slide_type": "-"}
# 請造出一個陣列,當中的數值是服從常態分佈,μ=3.5,σ=2.5
# -
# 提示: $\LARGE y = \frac{x - \mu}{\sigma}$
# + [markdown] slideshow={"slide_type": "slide"}
# 其中一個答案
# -
2.5 * randn(10) .+ 3.5
# + [markdown] slideshow={"slide_type": "slide"}
# ## Sets
# -
# 數學上的集合
# + slideshow={"slide_type": "slide"}
x = Set([1, 2, 3, 4])
# + slideshow={"slide_type": "fragment"}
push!(x, 5)
# + slideshow={"slide_type": "fragment"}
pop!(x)
# + slideshow={"slide_type": "fragment"}
x
# + [markdown] slideshow={"slide_type": "slide"}
# ### Exists
# + slideshow={"slide_type": "-"}
3 in x
# -
4 in x
# + [markdown] slideshow={"slide_type": "slide"}
# ### Equivalent
# -
x == Set([3, 2, 1, 5])
# + [markdown] slideshow={"slide_type": "slide"}
# ### Iteration
# -
for i in x
println(i)
end
# #### Quiz 4
# + [markdown] slideshow={"slide_type": "-"}
# 請告訴我以下資料有幾種數值
#
# [8, 4, 1, 2, 9, 4, 5, 4, 5, ...]
# + slideshow={"slide_type": "slide"}
x = rand([1, 2, 4, 5, 8, 9], 50);
# + slideshow={"slide_type": "fragment"}
Set(x)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dictionaries
# -
# key-value 的資料結構
# + slideshow={"slide_type": "slide"}
x = Dict("1" => 1, "2" => 2, "3" => 3)
# + slideshow={"slide_type": "fragment"}
x["1"]
# + slideshow={"slide_type": "fragment"}
x["A"]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Add new pair
# -
x["4"] = 4
x
# + [markdown] slideshow={"slide_type": "slide"}
# ### Overwrite
# + slideshow={"slide_type": "fragment"}
x["1"] = 5
# + slideshow={"slide_type": "-"}
x
# + [markdown] slideshow={"slide_type": "slide"}
# ### keys and values
# + slideshow={"slide_type": "fragment"}
keys(x)
# + slideshow={"slide_type": "fragment"}
values(x)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Iteration
# -
for (k, v) in x
println(k, "->", v)
end
# + [markdown] slideshow={"slide_type": "slide"}
# # Strings
# -
# 字串是很常用到的物件
#
# 但是字串並不是最基本的元素
# + [markdown] slideshow={"slide_type": "slide"}
# ## Characters
# -
# 字元是組成字串的基本單元
'A'
# + slideshow={"slide_type": "fragment"}
'a'
# + [markdown] slideshow={"slide_type": "slide"}
# ### 字元用單引號,字串用雙引號
# -
typeof('A')
# + slideshow={"slide_type": "fragment"}
typeof("A")
# + [markdown] slideshow={"slide_type": "slide"}
# ### 字元其實是用相對應的整數表示的
# + slideshow={"slide_type": "fragment"}
Int('A')
# + slideshow={"slide_type": "fragment"}
Char(65)
# + slideshow={"slide_type": "fragment"}
Int('B')
# + [markdown] slideshow={"slide_type": "slide"}
# ### 字元能適用加法嗎?
# + slideshow={"slide_type": "fragment"}
'A' + 1
# + slideshow={"slide_type": "fragment"}
'C' - 2
# + [markdown] slideshow={"slide_type": "slide"}
# ### 字元可以比較大小嗎?
# + slideshow={"slide_type": "fragment"}
'C' > 'A'
# + slideshow={"slide_type": "fragment"}
'a' > 'A'
# + slideshow={"slide_type": "fragment"}
Int('a')
# + slideshow={"slide_type": "fragment"}
'a' - 'A'
# + [markdown] slideshow={"slide_type": "slide"}
# ## Strings
# -
x = "Hello World!"
"""Hello World!"""
"""Hello
World
!
"""
# + [markdown] slideshow={"slide_type": "slide"}
# ### Indexing
# -
x[1]
x[end-1]
# + slideshow={"slide_type": "fragment"}
x[3:5]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Unicode and UTF-8
# -
s = "\u2200 x \U2203 y"
# + slideshow={"slide_type": "fragment"}
s[1]
# + slideshow={"slide_type": "fragment"}
s[2]
# + [markdown] slideshow={"slide_type": "slide"}
# ### 用來告訴你下一個index
# -
nextind(s, 1)
# + slideshow={"slide_type": "fragment"}
s[4]
# + [markdown] slideshow={"slide_type": "slide"}
# ## Operators
# + slideshow={"slide_type": "slide"}
length("123456")
# + [markdown] slideshow={"slide_type": "slide"}
# ### Interpolation
# + slideshow={"slide_type": "fragment"}
x = "Today"
y = "Sunday"
string(x, " is ", y)
# + slideshow={"slide_type": "fragment"}
"$x is $y"
# + slideshow={"slide_type": "fragment"}
"1 + 2 = $(1 + 2)"
# + [markdown] slideshow={"slide_type": "slide"}
# ### Equivalent
# + slideshow={"slide_type": "-"}
"1 + 2 = 3" == "1 + 2 = $(1 + 2)"
# + [markdown] slideshow={"slide_type": "slide"}
# ### Contains substring
# -
occursin("na", "banana")
# + [markdown] slideshow={"slide_type": "slide"}
# ### Repeat
# -
repeat(x, 10)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Join strings
# -
join(["apples", "bananas", "pineapples"], ", ", " and ")
# + [markdown] slideshow={"slide_type": "slide"}
# ### Split strings
# -
split("1,2,3,4,5,6", ",")
# + [markdown] slideshow={"slide_type": "slide"}
# ### Replace
# -
replace("Hello, world!", "world" => "Julia")
# + [markdown] slideshow={"slide_type": "slide"}
# #### Quiz 5
# + [markdown] slideshow={"slide_type": "-"}
# 如果我們要把以下的文字解析成電腦可以運算的數字,要怎麼做呢?
# -
matrix = """1, 2, 3, 4
5, 6, 7, 8
9, 10, 11, 12"""
# + [markdown] slideshow={"slide_type": "slide"}
# 其中一個答案:
#
# 我們要對文字做處理,可以先針對不同行先切分,所以分隔符是 "\n",這是代表 換行 的符號,他也是一種跳脫字元,在 Julia 中,跳脫字元會以 \ 做起始,他可以用來表示那些不可列印的字元。
# + slideshow={"slide_type": "fragment"}
rows = split(matrix, "\n")
# + [markdown] slideshow={"slide_type": "slide"}
# 接著,可以用兩層的 for 迴圈分別去處理列以及每一個元素,要把每一列也依據分隔符切開,切開後的元素需要經由 parse 函式來轉成整數,然後把整數存進陣列中。
# + slideshow={"slide_type": "fragment"}
A = Int64[]
for row in rows
elements = split(row, ", ")
for e in elements
append!(A, Meta.parse(e))
end
end
# + slideshow={"slide_type": "slide"}
A
# + [markdown] slideshow={"slide_type": "slide"}
# # Functions
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is function?
# -
# 當有些程式行為需要不斷被重複使用,只需要更改行為的一部份即可
#
# 這些行為就可以被**抽出來(abstract)**,成為 function
#
# 讓這部份程式可以有更**廣泛的(generic)**用處,而不是**狹隘而特定的(specific)**
# + slideshow={"slide_type": "slide"}
function f(x, y)
return x + y
end
# + slideshow={"slide_type": "fragment"}
f(1, 2)
# + [markdown] slideshow={"slide_type": "slide"}
# 當你呼叫函式 `f(1, 2)` 的時候,`x=1` 與 `y=2` 會被傳送給 `f`。
#
# 函式就會進行後續的運算,並把運算結果透過 `return` 進行回傳。
#
# 當函數被呼叫,記憶體會空出一塊空間給函式,是函式的運算空間。
# + slideshow={"slide_type": "fragment"}
f(f(1, 2), 3)
# -
# 當以上函式被呼叫,最內部的函式 `f(1, 2)` 會先被運算,等運算結果回傳之後,才運算外層的函式 `f(3, 3)`。
# + [markdown] slideshow={"slide_type": "slide"}
# 短小輕巧的函式在Julia很常見
# + slideshow={"slide_type": "fragment"}
h(x, y) = x + y
# + slideshow={"slide_type": "fragment"}
h(1, 2)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Specify input and output datatype
# + slideshow={"slide_type": "fragment"}
function g(x::Int64, y::Int64)::Int64
return x + y
end
# + slideshow={"slide_type": "fragment"}
g(1, 2)
# + slideshow={"slide_type": "fragment"}
g(1.2, 2.3)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Argument passing
# + [markdown] slideshow={"slide_type": "slide"}
# ***call-by-value***
# -
# 複製一份變數的值到函式中
#
# e.g. C, primitive values in Java
# + [markdown] slideshow={"slide_type": "-"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ***call-by-reference***
# -
# 在函式中製造一個參考(reference),參考指向變數
#
# e.g. Python, object in Java
# + [markdown] slideshow={"slide_type": "-"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ***pass-by-sharing***
# -
# 傳參數時,並不會複製一份給函式,但是參數本身會作為一個新的變數**綁定(bind)**到原本值的位址
# + [markdown] slideshow={"slide_type": "-"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 如何驗證以上的行為?
# + slideshow={"slide_type": "slide"}
println(objectid(1))
# + slideshow={"slide_type": "fragment"}
x = 1
println(objectid(x))
# + slideshow={"slide_type": "fragment"}
function sharing(x)
println(objectid(x))
x = 2
println(objectid(x))
end
# + slideshow={"slide_type": "fragment"}
sharing(x)
# + slideshow={"slide_type": "fragment"}
x
# + [markdown] slideshow={"slide_type": "slide"}
# ## Operators are functions
# -
1 + 2 + 3 + 4 + 5 + 6
# + slideshow={"slide_type": "fragment"}
+(1, 2, 3, 4, 5, 6)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Anonymous functions
# + slideshow={"slide_type": "fragment"}
a = () -> println("Calling function a.")
# + slideshow={"slide_type": "fragment"}
a()
# + slideshow={"slide_type": "slide"}
b = x -> println(x)
# -
b(5)
# + slideshow={"slide_type": "fragment"}
c = (x, y) -> x + y
# -
c(2, 3)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tuples
# -
x = (1, 2, 3)
# + slideshow={"slide_type": "fragment"}
x[1]
# + slideshow={"slide_type": "fragment"}
x[2:3]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Tuple is immutable
# + slideshow={"slide_type": "fragment"}
objectid(x)
# + slideshow={"slide_type": "fragment"}
objectid(x[2:3])
# + [markdown] slideshow={"slide_type": "slide"}
# ### Unpacking
# -
a, b, c = x
# + slideshow={"slide_type": "fragment"}
a
# -
b
c
# + [markdown] slideshow={"slide_type": "slide"}
# ### Swap
# + slideshow={"slide_type": "fragment"}
b, a = a, b
# + slideshow={"slide_type": "fragment"}
a
# -
b
# + [markdown] slideshow={"slide_type": "slide"}
# ### Tuple is the data structure that pass arguments to function
# -
h(1, 2)
# + [markdown] slideshow={"slide_type": "slide"}
# ## `return` keyword
# -
function sumproduct(x, y, z)
return (x + y) * z
end
# + slideshow={"slide_type": "fragment"}
function sumproduct(x, y, z)
(x + y) * z
end
# + [markdown] slideshow={"slide_type": "slide"}
# ## Multiple return values
# -
function shuffle_(x, y, z)
(y, z, x)
end
# + [markdown] slideshow={"slide_type": "slide"}
# ## Argument destruction
# -
x = [1, 2, 3]
shuffle_(x...)
# 等價於 `shuffle_(1, 2, 3)`
# + [markdown] slideshow={"slide_type": "slide"}
# ## Vectorizing functions
# -
# 適用 operators 跟 functions
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
# + slideshow={"slide_type": "fragment"}
x .^ 2
# + [markdown] slideshow={"slide_type": "slide"}
# ### User-defined function
# + slideshow={"slide_type": "-"}
f(x) = 3x
# + slideshow={"slide_type": "fragment"}
f.(x)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Quiz 6
# -
# 撰寫簡短的程式計算 $f(x, y) = x^2 + y^2 + 5xy + 3$ 的結果,並將以下的數值帶入求值:
data = [(1, 1), (2, 3), (-78, 96), (0, 7), (6, 6)]
# + [markdown] slideshow={"slide_type": "slide"}
# 其中一個答案
# -
f(x, y) = x^2 + y^2 + 5x*y + 3
# + slideshow={"slide_type": "fragment"}
f.(data)
# + slideshow={"slide_type": "slide"}
f(tup::Tuple) = f(tup...)
# + slideshow={"slide_type": "fragment"}
f.(data)
# + [markdown] slideshow={"slide_type": "slide"}
# # Types
# + slideshow={"slide_type": "fragment"}
struct Point
x::Float64
y::Float64
end
# + slideshow={"slide_type": "fragment"}
p = Point(3.0, 4.0)
# + slideshow={"slide_type": "slide"}
p.x
# -
p.y
# + slideshow={"slide_type": "slide"}
import Base.length
# + slideshow={"slide_type": "-"}
length(p::Point) = sqrt(p.x^2 + p.y^2)
# + slideshow={"slide_type": "fragment"}
length(p)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Quiz 7
# -
# 定義時間的型別,當中需要紀錄小時、分鐘跟秒。定義 `format` 函式,可以將時間物件格式化成 "HH:MM:SS" 輸出。
# + [markdown] slideshow={"slide_type": "slide"}
# # Q & A
# -
| notebook/2_organize_it.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlp
# language: python
# name: nlp
# ---
# # Extract Keywords from a website: counting words
from langdetect import detect
from newspaper import Article
import string
import unidecode
from collections import Counter
import cleantext
## remove accents
def remove_accents(s:str)->str:
s = unidecode.unidecode(s.lower())
return s
# url
url = 'https://es.wikipedia.org/wiki/Distancia_euclidiana'
# build article object
article = Article(url)
# download
article.download()
# parse
article.parse()
# get text
text = article.text.replace('\n',' ').replace('\t',' ')
# language detection
lang = detect(text)
# language name conversion
dconverter = {'es':'spanish', 'en':'english'}
language = dconverter[lang]
# text cleaning
text_cleaned = cleantext.clean(text,
all= False, # Execute all cleaning operations
extra_spaces=True , # Remove extra white spaces
stemming=False , # Stem the words
stopwords=True ,# Remove stop words
lowercase=True ,# Convert to lowercase
numbers=True ,# Remove all digits
punct=True ,# Remove all punctuations
stp_lang=language) # Language for stop words
text_cleaned
# ### get most frequent words
# final text cleaning
clean_text=[]
#remove punctuations
for word in text_cleaned.split(' '):
if word not in string.punctuation and len(word)>1 and detect(word) == lang:
clean_text.append(remove_accents(word))
count_each_word = Counter(clean_text)
count_each_word.most_common(30)
# ### get keywords with newspaper
# build article object
article = Article(url, language = lang)
# download
article.download()
# parse
article.parse()
# nlp
article.nlp()
# keywords
print('keywords:', article.keywords)
# summary
print(article.summary.replace('\n',''))
| notebooks/nlp/keywords-from_website_counting_words.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#import relevant libraries
import pandas as pd
import json
from pandas.io.json import json_normalize
# +
# Reading the json file as a dictionary
with open('./data/sample-ocds-award-data.json') as data:
ocds_award = json.load(data)
# +
# Printing out the keys of the sample ocds data
ocds_award.keys()
# +
# flattening out 'releases'
all_releases = json_normalize(ocds_award['releases'])
pd.DataFrame(all_releases).head()
# +
# flattening out 'awards' to see all the details it contains
award_releases = json_normalize(ocds_award, 'releases', ['awards'], errors='ignore', record_prefix='awards/')
pd.DataFrame(award_releases).head()
# +
#as we're still not getting supplier details let's further flatten 'awards/awards.'
#fist, let's read the column in a dataframe
award_details = pd.DataFrame(award_releases['awards/awards'])
award_details
# +
#unpacking all details in the new dataframe 'award_details'
def unpack(award_details):
award_details_unpacked = []
for i in award_details:
if type(i) != list:
award_details_unpacked.append(i)
else:
award_details_unpacked = award_details_unpacked + unpack(i)
return award_details_unpacked
award_details_unpacked = {}
for k, v in award_details.items():
award_details_unpacked[k] = unpack(v)
# +
#printing out unpacked dataframe
award_details_unpacked
| analysis-sample-json-ocds-award-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from IPython.display import HTML
# Cell visibility - COMPLETE:
tag = HTML('''<style>
div.input {
display:none;
}
</style>''')
display(tag)
# #Cell visibility - TOGGLE:
# tag = HTML('''<script>
# code_show=true;
# function code_toggle() {
# if (code_show){
# $('div.input').hide()
# } else {
# $('div.input').show()
# }
# # code_show = !code_show
# }
# $( document ).ready(code_toggle);
# </script>
# <p style="text-align:right">
# Toggle cell visibility <a href="javascript:code_toggle()">here</a>.</p>''')
# display(tag)
# -
# ## Integrals of polynomials
#
# In this interactive example you can visualize some interesting aspects concerning the integral of a polynomial function. For a given polynomial (which can be set by making use of coefficient sliders), indefinite integral will be dynamically calculated and presented, both in the plot and in the mathematical notation.
#
# Furthermore, by setting lower and upper limit (using dedicated slider widgets), the respective area under curve will be highlighted and calculated. Please note that the lower limit has to be smaller than the upper limit, in order for definite integral to be valid.
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import sympy as sym
from IPython.display import Latex, display, clear_output, Markdown # For displaying Markdown and LaTeX code
from ipywidgets import widgets
from ipywidgets import interactive
import matplotlib.patches as mpatches
from scipy.integrate import quad
from IPython.display import HTML
red_patch = mpatches.Patch(color='red', label='$f(x)$')
blue_patch = mpatches.Patch(color='blue', label='Indefinite integral of $f(x)$')
gray_patch = mpatches.Patch(color='lightgray', label='Area under the curve')
XLIM = 10
YLIM = 30
x = sym.symbols('x')
# Polynomial coeficients
a = 0
b = 0
c = 0
d = 0
e = 0
C = 0
# Sliders
fs_a = widgets.FloatSlider(description='$a$', min=-10.0, max=10.0, step=0.5, continuous_update=False)
fs_b = widgets.FloatSlider(description='$b$', min=-10.0, max=10.0, step=0.5, continuous_update=False)
fs_c = widgets.FloatSlider(description='$c$',min=-10.0, max=10.0, step=0.5, continuous_update=False)
fs_d = widgets.FloatSlider(description='$d$',min=-10.0, max=10.0, step=0.5, continuous_update=False)
fs_e = widgets.FloatSlider(description='$e$',min=-10.0, max=10.0, step=0.5, continuous_update=False)
w_C = widgets.FloatSlider(description='$C$:',min=-10.0, max=10.0, step=0.5, continuous_update=False)
lower_limit = widgets.FloatSlider(description='Lower limit:',min=-10.0, max=10.0, step=0.5, continuous_update=False)
upper_limit = widgets.FloatSlider(description='Upper limit:',min=-10.0, max=10.0, step=0.5, continuous_update=False)
# Mathematical notation of a specific (user-defined) polynomial, shown as Markdown
fourth_order = "e + d * x + c * x ** 2 + b * x ** 3 + a * x ** 4"
third_order = "d + c * x + b * x ** 2 + a * x ** 3"
second_order = "c + b * x + a * x ** 2"
first_order = "b + a * x"
zero_order = "a"
tf = sym.sympify(fourth_order)
w_mark = Markdown('$%s$' %sym.latex(tf))
# General mathematical notation of a polynomial (shown in Label widget)
fourth_order_html = "$f(x)=ax^4$ + $bx^3$ + $cx^2$ + $dx$ + $e$"
third_order_html = "$f(x)=ax^3$ + $bx^2$ + $cx$ + $d$"
second_order_html = "$f(x)=ax^2$ + $bx$ + $c$"
first_order_html = "$f(x)=ax$ + $b$"
zero_order_html = "$f(x)=a$"
w_funLabel = widgets.Label(layout=widgets.Layout(width='40%', margin='0px 0px 0px 50px'),)
dd_order = widgets.Dropdown(
options=['4', '3', '2', '1', '0'],
value='4',
description='Select order of the polynomial [0-4]:',
disabled=False,
style = {'description_width': 'initial'},
)
def dropdown_eventhandler(change):
fs_a.layout.visibility = 'hidden'
fs_b.layout.visibility = 'hidden'
fs_c.layout.visibility = 'hidden'
fs_d.layout.visibility = 'hidden'
fs_e.layout.visibility = 'hidden'
if (dd_order.value == '4'):
fs_a.layout.visibility = 'visible'
fs_a.description = '$a$'
fs_b.layout.visibility = 'visible'
fs_b.description = '$b$'
fs_c.layout.visibility = 'visible'
fs_c.description = '$c$'
fs_d.layout.visibility = 'visible'
fs_d.description = '$d$'
fs_e.layout.visibility = 'visible'
fs_e.description = '$e$'
w_funLabel.value=fourth_order_html
if (dd_order.value == '3'):
fs_a.value = 0
fs_b.layout.visibility = 'visible'
fs_b.description = '$a$'
fs_c.layout.visibility = 'visible'
fs_c.description = '$b$'
fs_d.layout.visibility = 'visible'
fs_d.description = '$c$'
fs_e.layout.visibility = 'visible'
fs_e.description = '$d$'
w_funLabel.value=third_order_html
if (dd_order.value == '2'):
fs_a.value = 0
fs_b.value = 0
fs_c.layout.visibility = 'visible'
fs_c.description = '$a$'
fs_d.layout.visibility = 'visible'
fs_d.description = '$b$'
fs_e.layout.visibility = 'visible'
fs_e.description = '$c$'
w_funLabel.value=second_order_html
if (dd_order.value == '1'):
fs_a.value = 0
fs_b.value = 0
fs_c.value = 0
fs_d.layout.visibility = 'visible'
fs_d.description = '$a$'
fs_e.layout.visibility = 'visible'
fs_e.description = '$b$'
w_funLabel.value=first_order_html
if (dd_order.value == '0'):
fs_a.value = 0
fs_b.value = 0
fs_c.value = 0
fs_d.value = 0
fs_e.layout.visibility = 'visible'
fs_e.description = '$a$'
w_funLabel.value=zero_order_html
dd_order.observe(dropdown_eventhandler, names='value')
# Functions
def polynomial_function(X_quad, X_cubed, X_squared, X, const, x):
return const + X * x + X_squared * x ** 2 + X_cubed * x ** 3 + X_quad * x ** 4
def fun(x):
global a, b, c, d, e
return e + d * x + c * x ** 2 + b * x ** 3 + a * x ** 4
def f_integral(fx):
if not fx.is_zero:
return sym.integrate(fx, x)
else:
return ""
def convert(base_text, ss):
if ss != "":
tf = sym.sympify(ss)
display(Markdown(base_text + '$%s$' %sym.latex(tf)))
# Plotting
def plot_limits(X_quad, X_cubed, X_squared, X, const, ax, a_limit, b_limit):
ix = np.linspace(a_limit, b_limit)
iy = polynomial_function(X_quad, X_cubed, X_squared, X, const, ix)
verts = [(a_limit, 0), *zip(ix, iy), (b_limit, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
ax.add_patch(poly)
def plot_function(X_quad, X_cubed, X_squared, X, const, C, llimit, ulimit):
global a, b, c, d, e, output, x
a = X_quad
b = X_cubed
c = X_squared
d = X
e = const
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(1, 1, 1)
# Plot input function
x_p = np.linspace(-XLIM, XLIM, num=1000)
y_p = polynomial_function(X_quad, X_cubed, X_squared, X, const, x_p)
plt.plot(x_p, y_p, 'r-')
# Plot indefinite integral of the input function
integ = f_integral(fun(x))
#integ = integ + str(C)
if integ != "":
if C < 0:
integ = str(integ) + "-" + str(abs(C))
else:
integ = str(integ)+ "+" + str(C)
f_integrate = sym.lambdify(x, integ) # from str to function
x_p = np.linspace(-XLIM, XLIM, num=1000)
y_p = f_integrate(x_p)
ax.plot(x_p, y_p, 'b-', linewidth=2)
# Plot integral limits (area under curve)
if ulimit < llimit:
display(Markdown('Upper limit and lower limit not consistent'))
res = "";
else:
plot_limits(X_quad, X_cubed, X_squared, X, const, ax, llimit, ulimit)
res, err = quad(fun, llimit, ulimit)
plt.grid(True)
plt.xlim(-XLIM, XLIM)
plt.ylim(-YLIM, YLIM)
plt.axhline(y=0,lw=0.8,color='k')
plt.axvline(x=0,lw=0.8,color='k')
plt.xlabel('x')
plt.ylabel('$f(x)$, indefinite integral of $f(x)$')
plt.legend(handles=[red_patch, blue_patch, gray_patch])
plt.show()
convert("Input function $f(x)$: ", fun(x))
if integ != "":
if C == 0:
integ_str = str(integ) + "+ C"
else:
integ_str = str(integ)
convert("Indefinite integral of $f(x)$: ", integ_str)
if res != "":
display(Markdown('Area under the curve: ' + str(res)))
w_funLabel.value=fourth_order_html
control_widgets = widgets.HBox()
control_widgets.children=[dd_order, w_funLabel]
display(control_widgets)
interactive(plot_function, const=fs_e, X=fs_d, X_squared=fs_c, X_cubed=fs_b, X_quad = fs_a, C = w_C, llimit=lower_limit, ulimit=upper_limit)
# -
| ICCT_en/examples/01/.ipynb_checkpoints/M-05_Integrals_of_polynomials-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torchvision.models as models
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib import rcParams
from itertools import combinations
# https://pytorch.org/hub/pytorch_vision_vgg/
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
# https://becominghuman.ai/extract-a-feature-vector-for-any-image-with-pytorch-9717561d1d4c
#
vgg16 = models.vgg16(pretrained=True)
# +
#print(vgg16.summary())
# +
# Load Images
from PIL import Image
# Open Image
input_image1 = Image.open('/Users/zixiaochen/Documents/NYU/Spring_2021/DS-GA-1016/CCM_SimilarityRatings/Images/Cardinal_0010_18894.jpg')
input_image2 = Image.open('/Users/zixiaochen/Documents/NYU/Spring_2021/DS-GA-1016/CCM_SimilarityRatings/Images/Cardinal_0014_17389.jpg')
# +
# Resize and preprocess image
from torchvision import transforms
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
])
input_tensor = preprocess(input_image1)
input_batch1 = input_tensor.unsqueeze(0) # creating minibatch for model
input_tensor2 = preprocess(input_image2)
input_batch2 = input_tensor2.unsqueeze(0) # creating minibatch for model
# +
#model.extract_features(input_batch1).shape
# -
def get_vector(image_name):
# 1. Load the image with Pillow library
img = Image.open(image_name)
# 2. Create a PyTorch Variable with the transformed image
t_img = Variable(normalize(to_tensor(scaler(img))).unsqueeze(0))
# 3. Create a vector of zeros that will hold our feature vector
# The 'avgpool' layer has an output size of 512
my_embedding = torch.zeros(512)
# 4. Define a function that will copy the output of a layer
def copy_data(m, i, o):
my_embedding.copy_(o.data)
# 5. Attach that function to our selected layer
h = layer.register_forward_hook(copy_data)
# 6. Run the model on our transformed image
model(t_img)
# 7. Detach our copy function from the layer
h.remove()
# 8. Return the feature vector
return my_embedding
# **Another trial**
# +
import tensorflow as tf
vgg_model = tf.keras.applications.vgg16.VGG16(weights='imagenet', include_top=True, input_shape = (224, 224, 3))
v_model = tf.keras.Sequential()
for l in vgg_model.layers[:-1]:
v_model.add(l)
v_model.summary()
# -
import glob
import re
import numpy as np
from keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
# +
import glob
import os
import re
import cv2
import numpy as np
from PIL import Image
from keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
data = {}
name=[]
path = "/Users/zixiaochen/Documents/NYU/Spring_2021/DS-GA-1016/CCM_SimilarityRatings/Selected Bird Images/*.jpg"
for file in glob.glob(path):
temp1 = image.load_img(file)
temp2=os.path.basename(file).split(".")[0]
data.update({temp2 : temp1})
mapping = {}
for i in data:
img = data[i].resize((224, 224))
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feature = v_model.predict(img)
mapping.update({i : feature})
mapping[i] = np.reshape(mapping[i],4096)
print(i)
# -
print(len(mapping.keys()))
# +
name.sort()
li = []
for i in data:
li.append(mapping[i])
F = np.asarray(li)
F = np.reshape(F, (18,4096))
print(F)
#Mat = F.dot(F.transpose())
#OrigSimMat = Mat
#Mat = np.reshape(Mat, (324))
#print(Mat)
# -
F.shape
model = np.savetxt("vgg_mat.csv", F, delimiter=",")
# !pwd
from model_hum_corr import *
model = 'vgg_mat.csv'
human_mat = 'caffe_net/avg_hum_ratings.csv'
calc_corr(model,human_mat)
| VGG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''base'': conda)'
# name: python3
# ---
# +
import mne
def getMNErawArray(np_data_raw):
fs = 256
ch_names = ['P1','P3','P5','P7','P10','O2','O7','O8','stim']
ch_types = (['eeg'] * 8) + ['stim']
info = mne.create_info(ch_names, sfreq=fs, ch_types=ch_types)
raw = mne.io.RawArray(np_data_raw, info) # index 1-10 only for EEG and stim channels
return raw
# +
import numpy as np
from pathlib import Path
results_dir = Path("Risultati")
output_trials_dir = Path("trials")
files = sorted(list(results_dir.glob("*.npy")))
for filepath in files:
raw_data = np.load(filepath)
mne_data = getMNErawArray(raw_data)
# find events and create epochs
stim_events = {'9Hz': 1, '10Hz': 2, '11Hz': 3, '13Hz': 4}
events = mne.find_events(mne_data, stim_channel='stim')
TRIAL_DURATION = 7.35
epochs = mne.Epochs(mne_data, events, event_id=stim_events, tmin=-0.005, tmax=TRIAL_DURATION, picks=['eeg'], baseline=None) # each trial is about 7.35 s from onset stimulus
# fix naming issues
if "1" in filepath.stem:
new_filename = f"subject_1_session_1_{str(filepath.stem)[3:-1]}"
elif "2" in filepath.stem:
new_filename = f"subject_1_session_2_{str(filepath.stem)[3:-1]}"
elif "3" in filepath.stem:
new_filename = f"subject_2_session_1_{str(filepath.stem)[3:-1]}"
elif"4" in filepath.stem:
new_filename = f"subject_2_session_2_{str(filepath.stem)[3:-1]}"
np.save(output_trials_dir / new_filename, epochs.get_data())
| Pre-Processing/transform_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="eeMrMI0-1Dhu"
from IPython.display import display, SVG
import numpy as np
import os
import pydot
import sys
from pydrake.all import (
Adder, AddMultibodyPlantSceneGraph, Demultiplexer, DiagramBuilder,
InverseDynamicsController, MakeMultibodyStateToWsgStateSystem,
MeshcatVisualizerCpp, MultibodyPlant, Parser, PassThrough,
SchunkWsgPositionController, StateInterpolatorWithDiscreteDerivative
)
from manipulation.meshcat_cpp_utils import StartMeshcat
from manipulation.scenarios import AddIiwa, AddWsg, AddRgbdSensors
from manipulation.utils import FindResource
from manipulation import running_as_notebook
if running_as_notebook and sys.platform == "linux" and os.getenv("DISPLAY") is None:
from pyvirtualdisplay import Display
virtual_display = Display(visible=0, size=(1400, 900))
virtual_display.start()
# -
# Start the visualizer.
meshcat = StartMeshcat()
# # Set up a basic ManipulationStation diagram
#
# Completely in pydrake. Feel free to modify it as you see fit. You might also look at the [C++ version](https://github.com/RobotLocomotion/drake/blob/master/examples/manipulation_station/manipulation_station.cc#L193) if you want inspiration for tables / cupboards / bins, etc that you might add. Here is [a link to the scenarios file](https://github.com/RussTedrake/manipulation/blob/master/manipulation/scenarios.py), in case you need to modify `AddIiwa` and friends.
# +
def MakeManipulationStation(time_step=0.002):
builder = DiagramBuilder()
# Add (only) the iiwa, WSG, and cameras to the scene.
plant, scene_graph = AddMultibodyPlantSceneGraph(
builder, time_step=time_step)
iiwa = AddIiwa(plant)
wsg = AddWsg(plant, iiwa)
Parser(plant).AddModelFromFile(
FindResource("models/camera_box.sdf"), "camera0")
plant.Finalize()
num_iiwa_positions = plant.num_positions(iiwa)
# I need a PassThrough system so that I can export the input port.
iiwa_position = builder.AddSystem(PassThrough(num_iiwa_positions))
builder.ExportInput(iiwa_position.get_input_port(), "iiwa_position")
builder.ExportOutput(iiwa_position.get_output_port(), "iiwa_position_command")
# Export the iiwa "state" outputs.
demux = builder.AddSystem(Demultiplexer(
2 * num_iiwa_positions, num_iiwa_positions))
builder.Connect(plant.get_state_output_port(iiwa), demux.get_input_port())
builder.ExportOutput(demux.get_output_port(0), "iiwa_position_measured")
builder.ExportOutput(demux.get_output_port(1), "iiwa_velocity_estimated")
builder.ExportOutput(plant.get_state_output_port(iiwa), "iiwa_state_estimated")
# Make the plant for the iiwa controller to use.
controller_plant = MultibodyPlant(time_step=time_step)
controller_iiwa = AddIiwa(controller_plant)
AddWsg(controller_plant, controller_iiwa, welded=True)
controller_plant.Finalize()
# Add the iiwa controller
iiwa_controller = builder.AddSystem(
InverseDynamicsController(
controller_plant,
kp=[100]*num_iiwa_positions,
ki=[1]*num_iiwa_positions,
kd=[20]*num_iiwa_positions,
has_reference_acceleration=False))
iiwa_controller.set_name("iiwa_controller")
builder.Connect(
plant.get_state_output_port(iiwa), iiwa_controller.get_input_port_estimated_state())
# Add in the feed-forward torque
adder = builder.AddSystem(Adder(2, num_iiwa_positions))
builder.Connect(iiwa_controller.get_output_port_control(),
adder.get_input_port(0))
# Use a PassThrough to make the port optional (it will provide zero values if not connected).
torque_passthrough = builder.AddSystem(PassThrough([0]*num_iiwa_positions))
builder.Connect(torque_passthrough.get_output_port(),
adder.get_input_port(1))
builder.ExportInput(torque_passthrough.get_input_port(),
"iiwa_feedforward_torque")
builder.Connect(adder.get_output_port(),
plant.get_actuation_input_port(iiwa))
# Add discrete derivative to command velocities.
desired_state_from_position = builder.AddSystem(
StateInterpolatorWithDiscreteDerivative(
num_iiwa_positions, time_step, suppress_initial_transient=True))
desired_state_from_position.set_name("desired_state_from_position")
builder.Connect(desired_state_from_position.get_output_port(),
iiwa_controller.get_input_port_desired_state())
builder.Connect(iiwa_position.get_output_port(),
desired_state_from_position.get_input_port())
# Export commanded torques.
#builder.ExportOutput(adder.get_output_port(), "iiwa_torque_commanded")
#builder.ExportOutput(adder.get_output_port(), "iiwa_torque_measured")
# Wsg controller.
wsg_controller = builder.AddSystem(SchunkWsgPositionController())
wsg_controller.set_name("wsg_controller")
builder.Connect(
wsg_controller.get_generalized_force_output_port(),
plant.get_actuation_input_port(wsg))
builder.Connect(plant.get_state_output_port(wsg),
wsg_controller.get_state_input_port())
builder.ExportInput(wsg_controller.get_desired_position_input_port(),
"wsg_position")
builder.ExportInput(wsg_controller.get_force_limit_input_port(),
"wsg_force_limit")
wsg_mbp_state_to_wsg_state = builder.AddSystem(
MakeMultibodyStateToWsgStateSystem())
builder.Connect(plant.get_state_output_port(wsg),
wsg_mbp_state_to_wsg_state.get_input_port())
builder.ExportOutput(wsg_mbp_state_to_wsg_state.get_output_port(),
"wsg_state_measured")
builder.ExportOutput(wsg_controller.get_grip_force_output_port(),
"wsg_force_measured")
# Cameras.
AddRgbdSensors(builder, plant, scene_graph)
# Export "cheat" ports.
builder.ExportOutput(scene_graph.get_query_output_port(), "geometry_query")
builder.ExportOutput(plant.get_contact_results_output_port(),
"contact_results")
builder.ExportOutput(plant.get_state_output_port(),
"plant_continuous_state")
diagram = builder.Build()
return diagram
diagram = MakeManipulationStation()
display(SVG(pydot.graph_from_dot_data(diagram.GetGraphvizString())[0].create_svg()))
# +
def TestWithMeshcat():
builder = DiagramBuilder()
station = builder.AddSystem(MakeManipulationStation())
MeshcatVisualizerCpp.AddToBuilder(
builder, station.GetOutputPort("geometry_query"), meshcat)
diagram = builder.Build()
context = diagram.CreateDefaultContext()
diagram.Publish(context)
TestWithMeshcat()
# -
| manipulation_station.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import signal
t = np.linspace(0, 5, 100)
x = t + np.random.normal(size=100)
plt.plot(t, x, linewidth=3)
plt.show()
plt.plot(t, signal.detrend(x), linewidth=3)
plt.show()
# -
| python/scipy_signal_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
from torch.autograd import Variable
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class ConvRelu(nn.Module):
def __init__(self, in_, out):
super().__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class NoOperation(nn.Module):
def forward(self, x):
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.block(x)
class DecoderBlockV2(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True,
output_padding=0):
super(DecoderBlockV2, self).__init__()
self.in_channels = in_channels
if is_deconv:
"""
Paramaters for Deconvolution were chosen to avoid artifacts, following
link https://distill.pub/2016/deconv-checkerboard/
"""
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
padding=1, output_padding=output_padding),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class Interpolate(nn.Module):
def __init__(self, mode='nearest', scale_factor=2,
align_corners=False, output_padding=0):
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
self.pad = output_padding
def forward(self, x):
if self.mode in ['linear','bilinear','trilinear']:
x = self.interp(x, mode=self.mode,
scale_factor=self.scale_factor,
align_corners=self.align_corners)
else:
x = self.interp(x, mode=self.mode,
scale_factor=self.scale_factor)
if self.pad > 0:
x = nn.ZeroPad2d((0, self.pad, 0, self.pad))(x)
return x
class DecoderBlockV3(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels,
is_deconv=True, output_padding=0):
super(DecoderBlockV3, self).__init__()
self.in_channels = in_channels
if is_deconv:
"""
Paramaters for Deconvolution were chosen to avoid artifacts, following
link https://distill.pub/2016/deconv-checkerboard/
"""
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels, middle_channels, kernel_size=4, stride=2,
padding=1, output_padding=output_padding),
ConvRelu(middle_channels, out_channels),
)
else:
self.block = nn.Sequential(
Interpolate(mode='nearest', scale_factor=2,
output_padding=output_padding),
# nn.Upsample(scale_factor=2, mode='bilinear'),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class SE_Resnext(nn.Module):
def __init__(self, num_classes, num_filters=32,
pretrained=True, is_deconv=False):
super().__init__()
self.num_classes = num_classes
self.conv4to3 = nn.Conv2d(4, 3, 1)
self.encoder = pretrainedmodels.__dict__['se_resnext50_32x4d'](num_classes=1000,
pretrained='imagenet')
# self.pool = nn.MaxPool2d(2, 2)
# self.convp = nn.Conv2d(1056, 512, 3)
self.csize = 2048 * 1 * 1
self.fc1 = nn.Linear(self.csize, num_classes)
# self.fc2 = nn.Linear(108, 54)
# self.fc3 = nn.Linear(54, num_classes)
def forward(self, x):
# set to True for debugging
print_sizes = False
if print_sizes:
print('')
print('x',x.shape)
# print layer dictionary
# print(self.encoder.features)
x = self.conv4to3(x)
if print_sizes: print('4to3',x.shape)
m = self.encoder._modules
layer_names = list(m.keys())
mx = {}
for i,f in enumerate(m):
x = m[f](x)
mx[layer_names[i]] = x
if print_sizes:
if isinstance(x,tuple):
print(i,layer_names[i],x[0].size(),x[1].size())
else:
print(i,layer_names[i],x.size())
if layer_names[i]=='avg_pool': break
# x = self.pool(F.relu(mx['cell_15']))
# # x = self.pool(F.relu(self.convp(x)))
# x = F.relu(self.convp(x))
# if print_sizes: print('convp',x.shape)
x = mx['avg_pool'].view(-1, self.csize)
if print_sizes: print('view',x.size())
x = self.fc1(x)
# x = F.relu(self.fc1(x))
if print_sizes: print('fc1',x.size())
# x = F.relu(self.fc2(x))
# if print_sizes: print('fc2',x.size())
# x = self.fc3(x)
# if print_sizes: print('fc3',x.size())
return x
| wienerschnitzelgemeinschaft/src/Russ/se_resnext0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <img src="images/meme.png">
# </center>
#
# # Машинное обучение
# > Компьютерная программа обучается на основе опыта $E$ по отношению к некоторому классу задач $T$ и меры качества $P$, если качество решения задач из $T$, измеренное на основе $P$, улучшается с приобретением опыта $E$. (<NAME>)
#
# ### Формулировка задачи:
# $X$ $-$ множество объектов
# $Y$ $-$ множество меток классов
# $f: X \rightarrow Y$ $-$ неизвестная зависимость
# **Дано**:
# $x_1, \dots, x_n \subset X$ $-$ обучающая выборка
# $y_i = f(x_i), i=1, \dots n$ $-$ известные метки классов
# **Найти**:
# $a∶ X \rightarrow Y$ $-$ алгоритм, решающую функцию, приближающую $f$ на всём множестве $X$.
# !conda install -c intel scikit-learn -y
# +
import numpy
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
import warnings
warnings.simplefilter('ignore')
numpy.random.seed(7)
# %matplotlib inline
# +
iris = load_iris()
X = iris.data
Y = iris.target
print(X.shape)
random_sample = numpy.random.choice(X.shape[0], 10)
for i in random_sample:
print(f"{X[i]}: {iris.target_names[Y[i]]}")
# -
# ## Типы задач
#
# ### Задача классификации
#
# $Y = \{ -1, +1 \}$ $-$ классификация на 2 класса;
# $Y = \{1, \dots , K \}$ $-$ на $K$ непересекающихся классов;
# $Y = \{0, 1 \}^K$ $-$ на $K$ классов, которые могут пересекаться.
#
# Примеры: распознавание текста по рукописному вводу, определение предмета на фотографии.
#
# ### Задача регрессии
#
# $Y = \mathbb{R}$ или $Y = \mathbb{R}^k$.
#
# Примеры: предсказание стоимости акции через полгода, предсказание прибыли магазина в следующем месяце.
#
# ### Задача ранжирования
#
# $Y$ $-$ конечное упорядоченное множество.
#
# Пример: выдача поискового запроса.
#
# ### Задачи уменьшения размерности
#
# Научиться описывать данные не $M$ признаками, а меньшим числом для повышения точности модели или последующей визуализации. В качестве примера помимо необходимости для визуализации можно привести сжатие данных.
#
# ### Задачи кластеризации
#
# Разбиение данных множества объектов на подмножества (кластеры) таким образом, чтобы объекты из одного кластера были более похожи друг на друга, чем на объекты из других кластеров по какому-либо критерию.
#
# <center>
# <img src="images/ml_map.png">
# </center>
# +
from sklearn.svm import SVC
model = SVC(random_state=7)
model.fit(X, Y)
y_pred = model.predict(X)
for i in random_sample:
print(f"predicted: {iris.target_names[y_pred[i]]}, actual: {iris.target_names[Y[i]]}")
f"differences in {(Y != y_pred).sum()} samples"
# -
# # Оценка качества
#
# ## Метрика
#
# ### Задача классификации
#
# Определим матрицу ошибок. Допустим, что у нас есть два класса и алгоритм, предсказывающий принадлежность каждого объекта одному из классов, тогда матрица ошибок классификации будет выглядеть следующим образом:
#
# | $ $ | $y=1$ | $y=0$ |
# |-------------|---------------------|---------------------|
# | $\hat{y}=1$ | True Positive (TP) | False Positive (FP) |
# | $\hat{y}=0$ | False Negative (FN) | True Negative (TN) |
#
# Здесь $\hat{y}$ $-$ это ответ алгоритма на объекте, а $y$ $-$ истинная метка класса на этом объекте.
# Таким образом, ошибки классификации бывают двух видов: *False Negative (FN)* и *False Positive (FP)*.
#
# - $\textit{accuracy} = \frac{TP + TN}{TP + FP + FN + TN}$
# - $\textit{recall} = \frac{TP}{TP + FN}$
# - $\textit{precision} = \frac{TP}{TP + FP}$
# - $\textit{f1-score} = \frac{2 \cdot \textit{recall} \cdot \textit{precision}}{\textit{precision} + \textit{recall}}$
#
# ### Задача регрессии
#
# - $MSE = \frac{1}{n} \sum_{i=1}^n (y_i - \hat{y}_i)^2$
# - $RMSE = \sqrt{MSE}$
# - $MAE = \frac{1}{n} \sum_{i=1}^n |y_i - \hat{y}_i|$
#
# ## Отложенная выборка
#
# $X \rightarrow X_{train}, X_{val}, X_{test}$
#
# - $X_{train}$ $-$ используется для обучения модели
# - $X_{val}$ $-$ подбор гиперпараметров ($ \approx{30\%}$ от тренировочной части)
# - $X_{test}$ $-$ оценка качества конечной модели
# +
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
# 1/3 всего датасета возьмём для тестовой выборки
# затем 30% от тренировочной будет валидационной
test_index = numpy.random.choice(X.shape[0], X.shape[0] // 3)
train_index = [i for i in range(X.shape[0]) if i not in test_index]
X_test = X[test_index]
Y_test = Y[test_index]
X_train, X_val, Y_train, Y_val = train_test_split(X[train_index], Y[train_index], test_size=0.3, shuffle=True, random_state=7)
print(f"train size: {X_train.shape[0]}")
print(f"val size: {X_val.shape[0]}")
print(f"test size: {X_test.shape[0]}")
# +
best_score = -1
best_c = None
for c in [0.01, 0.1, 1, 10]:
model = SVC(C=c, random_state=7)
model.fit(X_train, Y_train)
y_pred = model.predict(X_val)
cur_score = f1_score(Y_val, y_pred, average='micro')
if cur_score > best_score:
best_score = cur_score
best_c = c
f"best score is {best_score} for C {best_c}"
# -
full_model = SVC(C=1.0, random_state=7)
full_model.fit(X[train_index], Y[train_index])
y_pred = full_model.predict(X_test)
f"test score is {f1_score(Y_test, y_pred, average='micro')}"
# # Алгоритмы классификации
#
# ## Линейный классификатор
#
# Построение разделяющей гиперплоскости
#
# $$
# y = \textit{sign}(Wx + b)
# $$
#
# <center>
# <img src="images/linear_classifier.png">
# </center>
#
# ### Стандартизация величин
#
# При использование линейных моделей, иногда полезно стандартизировать их значения, например, оценки пользователей.
#
# $$
# X_{stand} = \frac{X - X_{mean}}{X_{std}}
# $$
#
# Для этого в `sklearn` есть класс $-$ `StandartScaler`
#
#
# ### Логистическая регрессия
#
# Использование функции логита для получения вероятности
#
# <center>
# <img src="images/logit.png">
# </center>
#
# ## Метод опорных векторов (Support vector machine)
#
# Построение "полоски" максимальной ширины, которая разделяет выборку
#
# <center>
# <img src="images/svm.png">
# </center>
#
#
# ## Дерево решений (Decision tree)
#
# В каждой вершине определяется критерий, по которому разбивается подвыборка.
#
# <center>
# <img src="images/decision_tree.png">
# </center>
#
# ## Случайный лес (Random forest)
#
# Множество деревьев решений, каждое из которых обучается на случайной подвыборке.
#
# <center>
# <img src="images/random_forest.png">
# </center>
#
# ## Метод ближайших соседей (K-neighbors)
#
# Решение базируется на основе $k$ ближайших известных примеров.
#
# <center>
# <img src="images/knn.png">
# </center>
# +
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=1000, n_features=50, n_informative=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=7)
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
models = [
LogisticRegression(random_state=7, n_jobs=6),
SVC(random_state=7),
DecisionTreeClassifier(random_state=7),
RandomForestClassifier(random_state=7),
KNeighborsClassifier(n_jobs=6)
]
for model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(f"model {model.__class__.__name__} scores {round(f1_score(y_test, y_pred, average='micro'), 2)}")
# +
from sklearn.preprocessing import StandardScaler
standart_scaler = StandardScaler()
standart_scaler.fit(X_train)
X_train_scaled = standart_scaler.transform(X_train)
X_test_scaled = standart_scaler.transform(X_test)
model = SVC(random_state=7)
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
f"test score is {f1_score(y_test, y_pred, average='micro')}"
# -
# # Inclass task #1
#
# Реализуйте модель, которая классифицирует цифры по рисунку.
#
# Ваша задача получить f1-score $0.98$ на тестовом датасете.
#
# Можете пользоваться как алгоритмами выше, так и любыми другими реализованными в `sklearn`.
# +
from sklearn.datasets import fetch_openml
# Load data from https://www.openml.org/d/554
X, Y = fetch_o2penml('mnist_784', return_X_y=True)
print(f"shape of X is {X.shape}")
# +
plt.gray()
fig, axes = plt.subplots(2, 5, figsize=(15, 5))
for i, num in enumerate(numpy.random.choice(X.shape[0], 10)):
axes[i // 5, i % 5].matshow(X[num].reshape(28, 28))
axes[i // 5, i % 5].set_title(Y[num])
axes[i // 5, i % 5].axis('off')
plt.show()
# +
test_shuffle = numpy.random.permutation(X.shape[0])
X_test, X_train = X[test_shuffle[:10000]], X[test_shuffle[10000:]]
Y_test, Y_train = Y[test_shuffle[:10000]], Y[test_shuffle[10000:]]
print(f"train size: {X_train.shape[0]}")
print(f"test size: {X_test.shape[0]}")
# +
from sklearn.svm import SVC
model = SVC(C=10)
model.fit(X_train, Y_train)
# +
from sklearn.metrics import f1_score
y_pred = model.predict(X_test)
print(f"test score is {f1_score(Y_test, y_pred, average='micro')}")
# -
# # Алгоритмы регрессии
#
# Деревья решений, случайный лес и метод ближайших соседей легко обобщаются на случай регрессии. Ответ, как правило, это среднее из полученных значений (например, среднее значение ближайших примеров).
#
# ## Линейная регрессия
#
# $y$ линейно зависим от $x$, т.е. имеет место уравнение
# $$
# y = Wx + b = W <x; 1>
# $$
#
# Такой подход имеет аналитическое решение, однако он требует вычисление обратной матрицы $X$, что не всегда возможно.
# Другой подход $-$ минимизация функции ошибки, например $MSE$, с помощью техники градиентного спуска.
#
# ## Регуляризация
#
# Чтобы избегать переобучения (когда модель хорошо работает только на тренировочных данных) используют различные техники *регуляризации*.
# Один из признаков переобучения $-$ модель имеет большие веса, это можно контролировать путём добавления $L1$ или $L2$ нормы весов к функции ошибки.
# То есть, итоговая ошибка, которая будет распространятся на веса модели, считается по формуле:
# $$
# Error(W) = MSE(W, X, y) + \lambda ||W||
# $$
#
# Такие модели, так же реализованы в `sklearn`:
# - Lasso
# - Ridge
# +
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=7)
print(X_train.shape[0], X_test.shape[0])
# +
from sklearn.linear_model import Lasso, Ridge, LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
models = [
Lasso(random_state=7),
Ridge(random_state=7),
LinearRegression(n_jobs=6),
RandomForestRegressor(random_state=7, n_jobs=6),
KNeighborsRegressor(n_jobs=6),
SVR()
]
for model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(f"model {model.__class__.__name__} scores {round(mean_squared_error(y_test, y_pred), 2)}")
# -
# # Inclass task #2
#
# Реализуйте модель, которая предсказывает стоимость медицинской страховки. В данных есть текстовые бинарные признаки (`sex` и `smoker`), не забудьте конвертировать их в `0` и `1`. Признак `region` имеет $4$ разных значения, вы можете конвертировать их в числа $0-4$ или создать $4$ бинарных признака. Для этого вам может помочь `sklearn.preprocessing.LabelEncoder` и `pandas.get_dummies`.
#
# Ваша задача получить RMSE-score меньше $5000$ на тестовом датасете.
#
# Можете пользоваться как алгоритмами выше, так и любыми другими реализованными в `sklearn`.
def rmse(y_true, y_pred):
return numpy.sqrt(mean_squared_error(y_true, y_pred))
# +
import pandas
from sklearn.preprocessing import LabelEncoder
data = pandas.read_csv('data/insurance.csv')
le = LabelEncoder()
data = data.replace({'smoker': 'no', 'sex': 'male'}, 0)
data = data.replace({'smoker': 'yes', 'sex': 'female'}, 1)
data['region'] = le.fit_transform(data['region'])
data.head()
# +
X = data.drop(['charges'], axis=1)
y = data['charges'].values
rand_st = 42
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=rand_st)
print(f"train size: {X_train.shape[0]}")
print(f"test size: {X_test.shape[0]}")
# -
model = RandomForestRegressor(random_state=rand_st, n_jobs=6)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(f"test score is {rmse(y_test, y_pred)}")
| assignment_4/sklearn_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 自然语言推断:使用注意力
# :label:`sec_natural-language-inference-attention`
#
# 我们在 :numref:`sec_natural-language-inference-and-dataset`中介绍了自然语言推断任务和SNLI数据集。鉴于许多模型都是基于复杂而深度的架构,Parikh等人提出用注意力机制解决自然语言推断问题,并称之为“可分解注意力模型” :cite:`Parikh.Tackstrom.Das.ea.2016`。这使得模型没有循环层或卷积层,在SNLI数据集上以更少的参数实现了当时的最佳结果。在本节中,我们将描述并实现这种基于注意力的自然语言推断方法(使用MLP),如 :numref:`fig_nlp-map-nli-attention`中所述。
#
# 
# :label:`fig_nlp-map-nli-attention`
#
# ## 模型
#
# 与保留前提和假设中词元的顺序相比,我们可以将一个文本序列中的词元与另一个文本序列中的每个词元对齐,然后比较和聚合这些信息,以预测前提和假设之间的逻辑关系。与机器翻译中源句和目标句之间的词元对齐类似,前提和假设之间的词元对齐可以通过注意力机制灵活地完成。
#
# 
# :label:`fig_nli_attention`
#
# :numref:`fig_nli_attention`描述了使用注意力机制的自然语言推断方法。从高层次上讲,它由三个联合训练的步骤组成:对齐、比较和汇总。我们将在下面一步一步地对它们进行说明。
#
# + origin_pos=2 tab=["pytorch"]
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
# + [markdown] origin_pos=3
# ### 注意(Attending)
#
# 第一步是将一个文本序列中的词元与另一个序列中的每个词元对齐。假设前提是“我确实需要睡眠”,假设是“我累了”。由于语义上的相似性,我们不妨将假设中的“我”与前提中的“我”对齐,将假设中的“累”与前提中的“睡眠”对齐。同样,我们可能希望将前提中的“我”与假设中的“我”对齐,将前提中的“需要”和“睡眠”与假设中的“累”对齐。请注意,这种对齐是使用加权平均的“软”对齐,其中理想情况下较大的权重与要对齐的词元相关联。为了便于演示, :numref:`fig_nli_attention`以“硬”对齐的方式显示了这种对齐方式。
#
# 现在,我们更详细地描述使用注意力机制的软对齐。用$\mathbf{A} = (\mathbf{a}_1, \ldots, \mathbf{a}_m)$和$\mathbf{B} = (\mathbf{b}_1, \ldots, \mathbf{b}_n)$表示前提和假设,其词元数量分别为$m$和$n$,其中$\mathbf{a}_i, \mathbf{b}_j \in \mathbb{R}^{d}$($i = 1, \ldots, m, j = 1, \ldots, n$)是$d$维的词向量。对于软对齐,我们将注意力权重$e_{ij} \in \mathbb{R}$计算为:
#
# $$e_{ij} = f(\mathbf{a}_i)^\top f(\mathbf{b}_j),$$
# :eqlabel:`eq_nli_e`
#
# 其中函数$f$是在下面的`mlp`函数中定义的多层感知机。输出维度$f$由`mlp`的`num_hiddens`参数指定。
#
# + origin_pos=5 tab=["pytorch"]
def mlp(num_inputs, num_hiddens, flatten):
net = []
net.append(nn.Dropout(0.2))
net.append(nn.Linear(num_inputs, num_hiddens))
net.append(nn.ReLU())
if flatten:
net.append(nn.Flatten(start_dim=1))
net.append(nn.Dropout(0.2))
net.append(nn.Linear(num_hiddens, num_hiddens))
net.append(nn.ReLU())
if flatten:
net.append(nn.Flatten(start_dim=1))
return nn.Sequential(*net)
# + [markdown] origin_pos=6
# 值得注意的是,在 :eqref:`eq_nli_e`中,$f$分别输入$\mathbf{a}_i$和$\mathbf{b}_j$,而不是将它们一对放在一起作为输入。这种*分解*技巧导致$f$只有$m + n$个次计算(线性复杂度),而不是$mn$次计算(二次复杂度)
#
# 对 :eqref:`eq_nli_e`中的注意力权重进行规范化,我们计算假设中所有词元向量的加权平均值,以获得假设的表示,该假设与前提中索引$i$的词元进行软对齐:
#
# $$
# \boldsymbol{\beta}_i = \sum_{j=1}^{n}\frac{\exp(e_{ij})}{ \sum_{k=1}^{n} \exp(e_{ik})} \mathbf{b}_j.
# $$
#
# 同样,我们计算假设中索引为$j$的每个词元与前提词元的软对齐:
#
# $$
# \boldsymbol{\alpha}_j = \sum_{i=1}^{m}\frac{\exp(e_{ij})}{ \sum_{k=1}^{m} \exp(e_{kj})} \mathbf{a}_i.
# $$
#
# 下面,我们定义`Attend`类来计算假设(`beta`)与输入前提`A`的软对齐以及前提(`alpha`)与输入假设`B`的软对齐。
#
# + origin_pos=8 tab=["pytorch"]
class Attend(nn.Module):
def __init__(self, num_inputs, num_hiddens, **kwargs):
super(Attend, self).__init__(**kwargs)
self.f = mlp(num_inputs, num_hiddens, flatten=False)
def forward(self, A, B):
# A/B的形状:(批量大小,序列A/B的词元数,embed_size)
# f_A/f_B的形状:(批量大小,序列A/B的词元数,num_hiddens)
f_A = self.f(A)
f_B = self.f(B)
# e的形状:(批量大小,序列A的词元数,序列B的词元数)
e = torch.bmm(f_A, f_B.permute(0, 2, 1))
# beta的形状:(批量大小,序列A的词元数,embed_size),
# 意味着序列B被软对齐到序列A的每个词元(beta的第1个维度)
beta = torch.bmm(F.softmax(e, dim=-1), B)
# beta的形状:(批量大小,序列B的词元数,embed_size),
# 意味着序列A被软对齐到序列B的每个词元(alpha的第1个维度)
alpha = torch.bmm(F.softmax(e.permute(0, 2, 1), dim=-1), A)
return beta, alpha
# + [markdown] origin_pos=9
# ### 比较
#
# 在下一步中,我们将一个序列中的词元与与该词元软对齐的另一个序列进行比较。请注意,在软对齐中,一个序列中的所有词元(尽管可能具有不同的注意力权重)将与另一个序列中的词元进行比较。为便于演示, :numref:`fig_nli_attention`对词元以*硬*的方式对齐。例如,上述的“注意”(attending)步骤确定前提中的“need”和“sleep”都与假设中的“tired”对齐,则将对“疲倦-需要睡眠”进行比较。
#
# 在比较步骤中,我们将来自一个序列的词元的连结(运算符$[\cdot, \cdot]$)和来自另一序列的对齐的词元送入函数$g$(一个多层感知机):
#
# $$\mathbf{v}_{A,i} = g([\mathbf{a}_i, \boldsymbol{\beta}_i]), i = 1, \ldots, m\\ \mathbf{v}_{B,j} = g([\mathbf{b}_j, \boldsymbol{\alpha}_j]), j = 1, \ldots, n.$$
#
# :eqlabel:`eq_nli_v_ab`
#
# 在 :eqref:`eq_nli_v_ab`中,$\mathbf{v}_{A,i}$是指,所有假设中的词元与前提中词元$i$软对齐,再与词元$i$的比较;而$\mathbf{v}_{B,j}$是指,所有前提中的词元与假设中词元$i$软对齐,再与词元$i$的比较。下面的`Compare`个类定义了比较步骤。
#
# + origin_pos=11 tab=["pytorch"]
class Compare(nn.Module):
def __init__(self, num_inputs, num_hiddens, **kwargs):
super(Compare, self).__init__(**kwargs)
self.g = mlp(num_inputs, num_hiddens, flatten=False)
def forward(self, A, B, beta, alpha):
V_A = self.g(torch.cat([A, beta], dim=2))
V_B = self.g(torch.cat([B, alpha], dim=2))
return V_A, V_B
# + [markdown] origin_pos=12
# ### 聚合
#
# 现在我们有有两组比较向量$\mathbf{v}_{A,i}$($i = 1, \ldots, m$)和$\mathbf{v}_{B,j}$($j = 1, \ldots, n$)。在最后一步中,我们将聚合这些信息以推断逻辑关系。我们首先求和这两组比较向量:
#
# $$
# \mathbf{v}_A = \sum_{i=1}^{m} \mathbf{v}_{A,i}, \quad \mathbf{v}_B = \sum_{j=1}^{n}\mathbf{v}_{B,j}.
# $$
#
# 接下来,我们将两个求和结果的连结提供给函数$h$(一个多层感知机),以获得逻辑关系的分类结果:
#
# $$
# \hat{\mathbf{y}} = h([\mathbf{v}_A, \mathbf{v}_B]).
# $$
#
# 聚合步骤在以下`Aggregate`类中定义。
#
# + origin_pos=14 tab=["pytorch"]
class Aggregate(nn.Module):
def __init__(self, num_inputs, num_hiddens, num_outputs, **kwargs):
super(Aggregate, self).__init__(**kwargs)
self.h = mlp(num_inputs, num_hiddens, flatten=True)
self.linear = nn.Linear(num_hiddens, num_outputs)
def forward(self, V_A, V_B):
# 对两组比较向量分别求和
V_A = V_A.sum(dim=1)
V_B = V_B.sum(dim=1)
# 将两个求和结果的连结送到多层感知机中
Y_hat = self.linear(self.h(torch.cat([V_A, V_B], dim=1)))
return Y_hat
# + [markdown] origin_pos=15
# ### 整合代码
#
# 通过将注意步骤、比较步骤和聚合步骤组合在一起,我们定义了可分解注意力模型来联合训练这三个步骤。
#
# + origin_pos=17 tab=["pytorch"]
class DecomposableAttention(nn.Module):
def __init__(self, vocab, embed_size, num_hiddens, num_inputs_attend=100,
num_inputs_compare=200, num_inputs_agg=400, **kwargs):
super(DecomposableAttention, self).__init__(**kwargs)
self.embedding = nn.Embedding(len(vocab), embed_size)
self.attend = Attend(num_inputs_attend, num_hiddens)
self.compare = Compare(num_inputs_compare, num_hiddens)
# 有3种可能的输出:蕴涵、矛盾和中性
self.aggregate = Aggregate(num_inputs_agg, num_hiddens, num_outputs=3)
def forward(self, X):
premises, hypotheses = X
A = self.embedding(premises)
B = self.embedding(hypotheses)
beta, alpha = self.attend(A, B)
V_A, V_B = self.compare(A, B, beta, alpha)
Y_hat = self.aggregate(V_A, V_B)
return Y_hat
# + [markdown] origin_pos=18
# ## 训练和评估模型
#
# 现在,我们将在SNLI数据集上对定义好的可分解注意力模型进行训练和评估。我们从读取数据集开始。
#
# ### 读取数据集
#
# 我们使用 :numref:`sec_natural-language-inference-and-dataset`中定义的函数下载并读取SNLI数据集。批量大小和序列长度分别设置为$256$和$50$。
#
# + origin_pos=19 tab=["pytorch"]
batch_size, num_steps = 256, 50
train_iter, test_iter, vocab = d2l.load_data_snli(batch_size, num_steps)
# + [markdown] origin_pos=20
# ### 创建模型
#
# 我们使用预训练好的100维GloVe嵌入来表示输入词元。我们将向量$\mathbf{a}_i$和$\mathbf{b}_j$在 :eqref:`eq_nli_e`中的维数预定义为100。 :eqref:`eq_nli_e`中的函数$f$和 :eqref:`eq_nli_v_ab`中的函数$g$的输出维度被设置为200.然后我们创建一个模型实例,初始化它的参数,并加载GloVe嵌入来初始化输入词元的向量。
#
# + origin_pos=22 tab=["pytorch"]
embed_size, num_hiddens, devices = 100, 200, d2l.try_all_gpus()
net = DecomposableAttention(vocab, embed_size, num_hiddens)
glove_embedding = d2l.TokenEmbedding('glove.6b.100d')
embeds = glove_embedding[vocab.idx_to_token]
net.embedding.weight.data.copy_(embeds);
# + [markdown] origin_pos=23
# ### 训练和评估模型
#
# 与 :numref:`sec_multi_gpu`中接受单一输入(如文本序列或图像)的`split_batch`函数不同,我们定义了一个`split_batch_multi_inputs`函数以小批量接受多个输入,如前提和假设。
#
# + [markdown] origin_pos=25
# 现在我们可以在SNLI数据集上训练和评估模型。
#
# + origin_pos=27 tab=["pytorch"]
lr, num_epochs = 0.001, 4
trainer = torch.optim.Adam(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss(reduction="none")
d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,
devices)
# + [markdown] origin_pos=28
# ### 使用模型
#
# 最后,定义预测函数,输出一对前提和假设之间的逻辑关系。
#
# + origin_pos=30 tab=["pytorch"]
#@save
def predict_snli(net, vocab, premise, hypothesis):
"""预测前提和假设之间的逻辑关系"""
net.eval()
premise = torch.tensor(vocab[premise], device=d2l.try_gpu())
hypothesis = torch.tensor(vocab[hypothesis], device=d2l.try_gpu())
label = torch.argmax(net([premise.reshape((1, -1)),
hypothesis.reshape((1, -1))]), dim=1)
return 'entailment' if label == 0 else 'contradiction' if label == 1 \
else 'neutral'
# + [markdown] origin_pos=31
# 我们可以使用训练好的模型来获得对示例句子的自然语言推断结果。
#
# + origin_pos=32 tab=["pytorch"]
predict_snli(net, vocab, ['he', 'is', 'good', '.'], ['he', 'is', 'bad', '.'])
# + [markdown] origin_pos=33
# ## 小结
#
# * 可分解注意模型包括三个步骤来预测前提和假设之间的逻辑关系:注意、比较和聚合。
# * 通过注意力机制,我们可以将一个文本序列中的词元与另一个文本序列中的每个词元对齐,反之亦然。这种对齐是使用加权平均的软对齐,其中理想情况下较大的权重与要对齐的词元相关联。
# * 在计算注意力权重时,分解技巧会带来比二次复杂度更理想的线性复杂度。
# * 我们可以使用预训练好的词向量作为下游自然语言处理任务(如自然语言推断)的输入表示。
#
# ## 练习
#
# 1. 使用其他超参数组合训练模型。你能在测试集上获得更高的准确度吗?
# 1. 自然语言推断的可分解注意模型的主要缺点是什么?
# 1. 假设我们想要获得任何一对句子的语义相似级别(例如,0到1之间的连续值)。我们应该如何收集和标注数据集?你能设计一个有注意力机制的模型吗?
#
# + [markdown] origin_pos=35 tab=["pytorch"]
# [Discussions](https://discuss.d2l.ai/t/5728)
#
| submodules/resource/d2l-zh/pytorch/chapter_natural-language-processing-applications/natural-language-inference-attention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# --- Day 6: Chronal Coordinates ---
# The device on your wrist beeps several times, and once again you feel like you're falling.
#
# "Situation critical," the device announces. "Destination indeterminate. Chronal interference detected. Please specify new target coordinates."
#
# The device then produces a list of coordinates (your puzzle input). Are they places it thinks are safe or dangerous? It recommends you check manual page 729. The Elves did not give you a manual.
#
# If they're dangerous, maybe you can minimize the danger by finding the coordinate that gives the largest distance from the other points.
#
# Using only the Manhattan distance, determine the area around each coordinate by counting the number of integer X,Y locations that are closest to that coordinate (and aren't tied in distance to any other coordinate).
#
# Your goal is to find the size of the largest area that isn't infinite. For example, consider the following list of coordinates:
#
# 1, 1
# 1, 6
# 8, 3
# 3, 4
# 5, 5
# 8, 9
# If we name these coordinates A through F, we can draw them on a grid, putting 0,0 at the top left:
#
# ..........
# .A........
# ..........
# ........C.
# ...D......
# .....E....
# .B........
# ..........
# ..........
# ........F.
# This view is partial - the actual grid extends infinitely in all directions. Using the Manhattan distance, each location's closest coordinate can be determined, shown here in lowercase:
#
# aaaaa.cccc
# aAaaa.cccc
# aaaddecccc
# aadddeccCc
# ..dDdeeccc
# bb.deEeecc
# bBb.eeee..
# bbb.eeefff
# bbb.eeffff
# bbb.ffffFf
# Locations shown as . are equally far from two or more coordinates, and so they don't count as being closest to any.
#
# In this example, the areas of coordinates A, B, C, and F are infinite - while not shown here, their areas extend forever outside the visible grid. However, the areas of coordinates D and E are finite: D is closest to 9 locations, and E is closest to 17 (both including the coordinate's location itself). Therefore, in this example, the size of the largest area is 17.
#
# What is the size of the largest area that isn't infinite?
# +
example_input = """1, 1
1, 6
8, 3
3, 4
5, 5
8, 9"""
with open('input/day06.txt', 'r') as f:
actual_input = f.read()
actual_input = actual_input.strip()
print(actual_input[0:10])
# +
def get_coords(input):
co = []
for row in input.split('\n'):
points = row.split(',')
acoord = (int(points[0].strip()), int(points[1].strip()))
co.append(acoord)
return co
print(get_coords(example_input))
print(get_coords(actual_input))
# +
import numpy as np
from scipy.spatial.distance import cityblock
def get_dimensions(input):
coords = get_coords(input)
max_x = np.max([x[0] for x in coords])
max_y = np.max([x[1] for x in coords])
return max_x, max_y
def get_closest(point, coords):
min_coord = [coords[0]]
min_distance = abs(cityblock(min_coord, point))
for acoord in coords[1:]:
#print(point, acoord, min_coord, min_distance)
if cityblock(acoord, point) < min_distance:
min_distance = abs(cityblock(acoord, point))
min_coord = [acoord]
elif cityblock(acoord, point) == min_distance:
min_coord.append(acoord)
if len(min_coord) > 1:
return None
return min_coord
def get_grid(input):
dimx, dimy = get_dimensions(input)
coords = get_coords(input)
#create grid
grid = [[' '] * dimy for i in range(dimx)]
#fill grid
for x in range(dimx):
for y in range(dimy):
#print(get_closest((x,y), coords))
grid[x][y] = get_closest((x,y), coords)
return grid
print(get_grid(example_input))
# +
from collections import Counter
def exclude_edges(grid):
edge_list = []
for x in range(len(grid)):
for y in range(len(grid[0])):
if x == 0 or y == 0:
edge_list.append(grid[x][y])
return edge_list
def calculate_most(input):
grid = get_grid(input)
edges = exclude_edges(grid)
counts = []
for x in range(len(grid)):
for y in range(len(grid[0])):
if grid[x][y] not in edges and grid[x][y] is not None:
#count it
counts.append(str(grid[x][y]))
#print(counts)
counter = Counter(counts)
return counter.most_common()[0][1]
print(calculate_most(example_input))
print(calculate_most(actual_input))
# + active=""
# --- Part Two ---
# On the other hand, if the coordinates are safe, maybe the best you can do is try to find a region near as many coordinates as possible.
#
# For example, suppose you want the sum of the Manhattan distance to all of the coordinates to be less than 32. For each location, add up the distances to all of the given coordinates; if the total of those distances is less than 32, that location is within the desired region. Using the same coordinates as above, the resulting region looks like this:
#
# ..........
# .A........
# ..........
# ...###..C.
# ..#D###...
# ..###E#...
# .B.###....
# ..........
# ..........
# ........F.
# In particular, consider the highlighted location 4,3 located at the top middle of the region. Its calculation is as follows, where abs() is the absolute value function:
#
# Distance to coordinate A: abs(4-1) + abs(3-1) = 5
# Distance to coordinate B: abs(4-1) + abs(3-6) = 6
# Distance to coordinate C: abs(4-8) + abs(3-3) = 4
# Distance to coordinate D: abs(4-3) + abs(3-4) = 2
# Distance to coordinate E: abs(4-5) + abs(3-5) = 3
# Distance to coordinate F: abs(4-8) + abs(3-9) = 10
# Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30
# Because the total distance to all coordinates (30) is less than 32, the location is within the region.
#
# This region, which also includes coordinates D and E, has a total size of 16.
#
# Your actual region will need to be much larger than this example, though, instead including all locations with a total distance of less than 10000.
#
# What is the size of the region containing all locations which have a total distance to all given coordinates of less than 10000?
# -
def get_closest2(point, coords):
min_coord = [coords[0]]
min_distance = abs(cityblock(min_coord, point))
total_distance = min_distance
for acoord in coords[1:]:
adistance = abs(cityblock(acoord, point))
total_distance = total_distance + adistance
return total_distance
# +
def get_grid2(input):
dimx, dimy = get_dimensions(input)
coords = get_coords(input)
#create grid
grid = [[' '] * dimy for i in range(dimx)]
#fill grid
for x in range(dimx):
for y in range(dimy):
#print(get_closest((x,y), coords))
grid[x][y] = get_closest2((x,y), coords)
return grid
def calculate_most2(input, threshold=32):
grid = get_grid2(input)
counts = 0
for x in range(len(grid)):
for y in range(len(grid[0])):
if grid[x][y] < threshold:
counts = counts + 1
return counts
print(calculate_most2(example_input, 32))
print(calculate_most2(actual_input, 10000))
# -
| day06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# Convert to grayscale
# ====================
#
# This example shows how to use squidpy.im.process to convert an image
# layer to grayscale.
#
# You can convert any layer of squidpy.im.ImageContainer to grayscale. We
# use the argument `method="gray"` to convert the image. This calls
# skimage.color.rgb2gray in the background.
#
# +
import squidpy as sq
import matplotlib.pyplot as plt
# -
# First, we load an H&E stained tissue image. Here, we only load a cropped
# dataset to speed things up. In general, squidpy.im.process can also
# process very large images (see
# sphx\_glr\_auto\_examples\_image\_compute\_process\_hires.py).
#
img = sq.datasets.visium_hne_image_crop()
# Then, we convert the image to grayscale and plot the result. With the
# argument `layer` we can select the image layer that should be processed.
# When converting to grayscale, the channel dimensions change from 3 to 1.
# By default, the name of the resulting channel dimension will be
# `'{{original_channel_name}}_gray'`. Use the argument `channel_dim` to
# set a new channel name explicitly. By default, the resulting image is
# saved in the layer `image_gray`. This behavior can be changed with the
# arguments `copy` and `layer_added`.
#
# +
sq.im.process(img, layer="image", method="gray")
fig, axes = plt.subplots(1, 2)
img.show("image", ax=axes[0])
_ = axes[0].set_title("original")
img.show("image_gray", cmap="gray", ax=axes[1])
_ = axes[1].set_title("grayscale")
| docs/source/auto_examples/image/compute_gray.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# metadata:
# interpreter:
# hash: 799275936fb7c37caa15961302e1f6bc5b6f09e92bdf39e5acfe019a9d46a476
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from scipy import stats
from sklearn.cluster import DBSCAN
from collections import Counter
from sklearn.preprocessing import StandardScaler
# No artigo do Machine Learning Mastery (https://machinelearningmastery.com/model-based-outlier-detection-and-removal-in-python/) são apresentados quatro métodos para tratar outliers. Adicionar os modelos Z-score e DBSCAN utilizando a mesma base de dados e o baseline do artigo. Apresentar os resultados comparando-os com os do artigo.
df = pd.read_csv("https://raw.githubusercontent.com/jbrownlee/Datasets/master/housing.csv", sep=',', header=None)
#Conhecendo a base de dados
df.shape
#Conhecendo as variáveis da base de dados
df.head()
#Visualizando os dados estatísticos
df.describe()
# As variáveis 0 e 1 apresentam um desvio padrão maior que a média da variável, indicando que estas variáveis contém valores espalhados em uma ampla gama de valores.
#Separando a base em variáveis de entradas e resposta
df = df.values
X, y = df[:, :-1], df[:, -1]
#Separando a base em treino e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.33, random_state=1)
# +
#Regressão linear sem tratamento de outliers
#Treinando o modelo
model = LinearRegression()
model.fit(X_train, y_train)
#Avaliando o modelo
y_tr = model.predict(X_test)
# -
#Utiliznado a métrica da média de erro absoluto
mae_wout = mean_absolute_error(y_test, y_tr)
print('MAE: ', mae_wout)
# O resultado da média de erro foi de 3.5694, utilizando a base sem realizar a detecão e remoção dos valores discrepantes.
# +
#Detectando outliers utilizando o Z-score
z = np.abs(stats.zscore(df))
#Selecionando as colunas com valor absoluto menor que 3
filt_result = (z < 3).all(axis=1)
#Criando o dataset sem os outliers
df_z = df[filt_result]
df_z.shape
# -
# O tamanho do dataset, foi reduzido em 91 linhas. Estas linhas apresentavam valores discrepantes em relação ao restante do dataset.
#Divisão da base em Treino e Teste(Z-score)
Xz, yz = df_z[:, :-1], df_z[:, -1]
Xz_train, Xz_test, yz_train, yz_test = train_test_split(Xz, yz, train_size=0.33, random_state=1)
# +
#Executando a regressão linear sem outliers(Z-score)
model = LinearRegression()
model.fit(Xz_train, yz_train)
#Avaliando o modelo
y_tr_z = model.predict(X_test)
# -
#Utiliznado a métrica da média de erro absoluto sem outliers(Z-score)
mae_no_out_z = mean_absolute_error(y_test, y_tr_z)
print('MAE_z: ', mae_no_out_z)
# Houve uma leve melhora na acurácia, ao remover valores discrepantes do dataset. Comparado com a primeira execução que inclui os outliers.
# +
#Normalizando os dados para treinamento com DBSCAN
ss = StandardScaler()
df_d = ss.fit_transform(df)
#Detectando outliers utilizando o DBSCAN
modelo = DBSCAN(eps=3.0, min_samples=30).fit(df_d)
#Quantidade de outliers encontrados
print(Counter(modelo.labels_))
#Visualizando os outliers
filtro = modelo.labels_ != -1
df_d = df_d[filtro]
print(df_d.shape)
# -
# Foram encontrados 47 registros no dataset que foram identificados, como fora dos grupos determinados pelos DBSCAN. Sendo considerados outliers.
# O dataset foi reduzido em 47 linhas, que continham outliers. Foram necessário algumas execuções até encontra os valores ideais para epsilon e o mínimo de amostras.
#Divisão da base em Treino e Teste(DBSCAN)
Xd, yd = df_d[:, :-1], df_d[:, -1]
Xd_train, Xd_test, yd_train, yd_test = train_test_split(Xd, yd, train_size=0.33, random_state=1)
# +
#Executando a regressão linear sem outliers(DBSCAN)
model = LinearRegression()
model.fit(Xd_train, yd_train)
#Avaliando o modelo
y_tr_d = model.predict(Xd_test)
# -
#Utiliznado a métrica da média de erro absoluto sem outliers(DBSCAN)
mae_no_out_d = mean_absolute_error(yd_test, y_tr_d)
print('MAE_d: ', mae_no_out_d)
# O resultado apresentado após aplicação do modelo apresenta uma redução drástica na média de erro absoluto.
#Comparando a execução entre as três execuções
print('MAE: ', mae_wout)
print('MAE_Z-score: ', mae_no_out_z)
print('MAE_DBSCAN: ', mae_no_out_d)
# A média de erro absoluto apresentado para a execução da base de dados sem a remoção dos dados apresentou uma leve melhora na acurácia. Este resultado corrobora com a literatura apresentada durantes os estudos, que a remoção de dados tem um baixo impacto no aumento da acurácia dos modelos. O resultado apresentado pelo DBSCAN, mostrou um resultado muito distante do esperado, trazendo um resultado enviesado.
# O artigo apresentado utilizou outros algoritmos para identificação automática de outliers, mas o resultado de erro absoluto após remoção destes dados. Apresenta uma leve melhora na acurácia, assim como apresentado neste experimento.
| missao_8/smd_outlier_detction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_amazonei_tensorflow_p36
# language: python
# name: conda_amazonei_tensorflow_p36
# ---
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tensorflow as tf
print(tf.__version__)
import boto3
from sagemaker import get_execution_role
tf.compat.v1.enable_eager_execution()
import utils
import data
import extractor
TRAIN = tf.estimator.ModeKeys.TRAIN
EVAL = tf.estimator.ModeKeys.EVAL
# PREDICT = tf.estimator.ModeKeys.PREDICT
SOURCE_DATASETDIR = 0
SOURCE_LOOPDIR = 1
WAIT_SECONDS = 60
# add test
# -
a
# +
# s3 configuration
config = {
# 'AWS_ACCESS_KEY_ID':'AKIAR66VYUC6IKHLEWOV', # Credentials only needed if connecting to a private endpoint
# 'AWS_SECRET_ACCESS_KEY':'<KEY>',
'AWS_REGION':'us-east-2', # Region for the S3 bucket, this is not always needed. Default is us-east-1.
'S3_ENDPOINT':'s3.us-east-2.amazonaws.com', # The S3 API Endpoint to connect to. This is specified in a HOST:PORT format.
'S3_USE_HTTPS':'1', # Whether or not to use HTTPS. Disable with 0.
'S3_VERIFY_SSL':'1',
}
os.environ.update(config)
# +
role = get_execution_role()
bucket='sagemaker-cs281'
data_key = 'deephol-data/deepmath/deephol/proofs/human'
ddir = 's3://{}/{}'.format(bucket, data_key)
evalddir = None
# +
class DataInfo(object):
def __init__(self,dataset_dir,eval_dataset_dir):
self.dataset_dir = dataset_dir
self.eval_dataset_dir = eval_dataset_dir
self.ratio_neg_examples=7
self.ratio_max_hard_negative_examples=5
self.batch_size = 4
def generate(self):
return {'dataset_dir': self.dataset_dir, 'eval_dataset_dir': self.eval_dataset_dir, 'ratio_neg_examples':
self.ratio_neg_examples, 'ratio_max_hard_negative_examples': self.ratio_max_hard_negative_examples,
'batch_size': self.batch_size,
}
d = DataInfo(ddir,evalddir)
hparams = d.generate()
params = utils.Params(**hparams)
# -
params
# +
train_data = data.get_holparam_dataset(TRAIN, params)
eval_data = data.get_holparam_dataset(EVAL, params)
# need to implement tristan_parser
train_parsed = train_data.map(functools.partial(data.pairwise_thm_parser, params=params))
print(train_parsed)
# test for checking what train_parsed contains
# for raw_record in train_parsed.take(10):
# print(repr(raw_record))
# -
input_fn = data.get_input_fn(dataset_fn=data.get_train_dataset, mode=TRAIN, params=params,
shuffle_queue=10000,
repeat=False)
features, labels = input_fn()
| deepmath/deephol/train/Other_notuseful/test3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import prody as pry
import os
from glob import glob
import re
from modeller import *
from modeller.automodel import *
# -
amino3to1dict = {'ALA': 'A','CYS': 'C','ASP': 'D','GLU': 'E','PHE': 'F','GLY': 'G',
'HIS': 'H','ILE': 'I','LYS': 'K','LEU': 'L','MET': 'M','ASN': 'N',
'PRO': 'P','GLN': 'Q','ARG': 'R','SER': 'S','THR': 'T','VAL': 'V',
'TRP': 'W','TYR': 'Y'}
def make_folder(folder_result = "results"):
if not os.path.exists(folder_result):
os.makedirs(folder_result)
return folder_result
def pdb_paths(pdbsdir="modelled_mutations/"):
paths = glob(pdbsdir+"*.pdb")
paths = [os.path.abspath(x) for x in paths]
return paths
def split_mutations(mut_str):
"""Function to split a [mutation] string, searching the mutaion patter and return groups.
Output is a list with 3 elements [wt_aa,aa_number,mut_aa] """
search_mut = re.search("([A-Z])([0-9]+[A-Z]*)([A-Z])",mut_str,flags=re.I)
m_splits = search_mut.groups()
return list(m_splits)
def aa2replace(m_splits):
"""Function to get the 3letters aa in amino3to1dict
using the split_mutations() output"""
for k,v in amino3to1dict.items():
if v == m_splits[0]:
return k
# ## Basado en https://salilab.org/modeller/manual/node250.html
def do_mutate(pdbname,pdbwt,restype,pdbsdir):
# This will read a PDB file, change its sequence a little, build new
# coordinates for any of the additional atoms using only the internal
# geometry, and write the mutant PDB file. It can be seen as primitive
# but rapid comparative modeling for substitution mutants. For more
# sophisticated modeling, see http://salilab.org/modeller/wiki/Mutate%20model
#
# For insertion and deletion mutants, follow the standard comparative
# modeling procedure.
env = environ()
env.io.atom_files_directory = [pdbsdir]
# Read the topology library with non-hydrogen atoms only:
env.libs.topology.read(file='$(LIB)/top_heav.lib')
# To produce a mutant with all hydrogens, uncomment this line:
#env.libs.topology.read(file='$(LIB)/top_allh.lib')
# Read the CHARMM parameter library:
env.libs.parameters.read(file='$(LIB)/par.lib')
# Read the original PDB file and copy its sequence to the alignment array:
code = pdbname
aln = alignment(env)
mdl = model(env, file=code)
aln.append_model(mdl, atom_files=code, align_codes=code)
#get original chain names
template_chains = [c.name for c in mdl.chains]
# Select the residues to be mutated: in this case all ASP residues:
#sel = selection(mdl).only_residue_types('ASP')
# The second example is commented out; it selects residues '1' and '10'.
sel = selection(mdl.residues['%s:%s'% (m_splits[1].upper(),chain_mutated)])
# Mutate the selected residues into HIS residues (neutral HIS):
sel.mutate(residue_type=restype)
# Add the mutated sequence to the alignment arrays (it is now the second
# sequence in the alignment):
aln.append_model(mdl, align_codes=pdbwt)
# Generate molecular topology for the mutant:
mdl.clear_topology()
mdl.generate_topology(aln[pdbwt])
# Transfer all the coordinates you can from the template native structure
# to the mutant (this works even if the order of atoms in the native PDB
# file is not standard):
mdl.transfer_xyz(aln)
# Build the remaining unknown coordinates for the mutant:
mdl.build(initialize_xyz=False, build_method='INTERNAL_COORDINATES')
# Transfer the residue and chain ids and write out the new MODEL:
for ct,cm in zip(template_chains,mdl.chains):
cm.name = ct
# Write the mutant to a file:
mdl.write(file=pdbwt+'.pdb')
# # Run proccesing
# +
ab_bind_original = pd.read_excel("PRO-25-393-s002.xlsx")
ab_bind_mCSM = pd.read_table("ab_bind_dataset.csv")
#Obtener datos de los modelos HM que no estan en el dataframe de mCSM
ab_bind_HMdata = ab_bind_original.loc[ab_bind_original["#PDB"].str.startswith("HM")]
#Agregar la columna chain con la cadena mutada, y reescribir la columna Mutation
ab_bind_HMdata = ab_bind_HMdata.assign(Chain= ab_bind_HMdata['Mutation'].str.split(':').str[0],Mutation= ab_bind_HMdata['Mutation'].str.split(':').str[1])
# -
#ab_bind_mCSM = pd.read_table("ab_bind_dataset.csv")
pdbfiles = pdb_paths(pdbsdir="modelled_mutations/")
# +
# Ordenar la lista pdbfiles en orden secuencial para que coincida con el orden del dataframe
def extract_num(pdb):
return int(pdb.split("/")[-1].split(".")[2])
pdbfiles.sort(key=extract_num)
# -
ab_bind_mCSM_HM = ab_bind_mCSM.append(ab_bind_HMdata,sort=True)
ab_bind_mCSM_HM.to_csv("ab_bind_mCSM_HM.csv")
# # modelando estructuras WT
# +
ab_bind_mCSM_HM = pd.read_csv("../data/ab_bind_mCSM_HM.csv",index_col=0)
pdbfiles = pdb_paths(pdbsdir="../data/modelled_mutations/")
# Ordenar la lista pdbfiles en orden secuencial para que coincida con el orden del dataframe
def extract_num(pdb):
return int(pdb.split("/")[-1].split(".")[2])
pdbfiles.sort(key=extract_num)
# +
pdbs_dir= os.path.abspath("../data/modelled_mutations/")
old_dir = os.getcwd()
try:
contador = 0
os.chdir(make_folder("wt_modells"))
for pdb,tuples in zip(pdbfiles,ab_bind_mCSM_HM.itertuples()):
chain_mutated = tuples.Chain
m_splits = split_mutations(tuples.Mutation)
#Define name, WT name and WT .ali file
name = os.path.basename(pdb)[:-4]
name_wt = name+".WT"
reswt = aa2replace(m_splits)
do_mutate(name,name_wt,reswt,pdbs_dir)
contador +=1
finally:
os.chdir(old_dir)
| notebooks/Wt_modells-final.ipynb |
# ---
# jupyter:
# jupytext:
# formats: python_scripts//py:percent,notebooks//ipynb
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Solution for Exercise 03
#
# The goal of this exercise is to evaluate the impact of feature preprocessing on a pipeline that uses a decision-tree-based classifier instead of logistic regression.
#
# - The first question is to empirically evaluate whether scaling numerical feature is helpful or not;
#
# - The second question is to evaluate whether it is empirically better (both from a computational and a statistical perspective) to use integer coded or one-hot encoded categories.
#
#
# Hint: `HistGradientBoostingClassifier` does not yet support sparse input data. You might want to use
# `OneHotEncoder(handle_unknown="ignore", sparse=False)` to force the use a dense representation as a workaround.
# %%
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
df = pd.read_csv("https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
# %%
target_name = "class"
target = df[target_name].to_numpy()
data = df.drop(columns=[target_name, "fnlwgt"])
# %%
numerical_columns = [c for c in data.columns
if data[c].dtype.kind in ["i", "f"]]
categorical_columns = [c for c in data.columns
if data[c].dtype.kind not in ["i", "f"]]
categories = [data[column].unique()
for column in data[categorical_columns]]
# %% [markdown]
# ## Reference pipeline (no numerical scaling and integer-coded categories)
#
# First let's time the pipeline we used in the main notebook to serve as a reference:
# %%
# %%time
preprocessor = ColumnTransformer([
('categorical', OrdinalEncoder(categories=categories), categorical_columns),
], remainder="passthrough")
model = make_pipeline(
preprocessor,
HistGradientBoostingClassifier()
)
scores = cross_val_score(model, data, target)
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
# %% [markdown]
# ## Scaling numerical features
# %%
# %%time
from sklearn.preprocessing import StandardScaler
preprocessor = ColumnTransformer([
('numerical', StandardScaler(), numerical_columns),
('categorical', OrdinalEncoder(categories=categories), categorical_columns),
])
model = make_pipeline(
preprocessor,
HistGradientBoostingClassifier()
)
scores = cross_val_score(model, data, target)
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
# %% [markdown]
# ### Analysis
#
# We can observe that both the accuracy and the training time are approximately the same as the reference pipeline (any time difference you might observe is not significant).
#
# Scaling numerical features is indeed useless for most decision tree models in general and for `HistGradientBoostingClassifier` in particular.
# %% [markdown]
# ## One-hot encoding of categorical variables
#
# For linear models, we have observed that integer coding of categorical
# variables can be very detrimental. However for
# `HistGradientBoostingClassifier` models, it does not seem to be the
# case as the cross-validation of the reference pipeline with
# `OrdinalEncoder` is good.
#
# Let's see if we can get an even better accuracy with `OneHotEncoding`:
# %%
# %%time
from sklearn.preprocessing import OneHotEncoder
preprocessor = ColumnTransformer([
('categorical',
OneHotEncoder(handle_unknown="ignore", sparse=False),
categorical_columns),
], remainder="passthrough")
model = make_pipeline(
preprocessor,
HistGradientBoostingClassifier()
)
scores = cross_val_score(model, data, target)
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
# %% [markdown]
# ### Analysis
#
# From an accuracy point of view, the result is almost exactly the same.
# The reason is that `HistGradientBoostingClassifier` is expressive
# and robust enough to deal with misleading ordering of integer coded
# categories (which was not the case for linear models).
#
# However from a computation point of view, the training time is
# significantly longer: this is caused by the fact that `OneHotEncoder`
# generates approximately 10 times more features than `OrdinalEncoder`.
#
# Note that the current implementation `HistGradientBoostingClassifier`
# is still incomplete, and once sparse representation are handled
# correctly, training time might improve with such kinds of encodings.
#
# The main take away message is that arbitrary integer coding of
# categories is perfectly fine for `HistGradientBoostingClassifier`
# and yields fast training times.
| notebooks/03_basic_preprocessing_categorical_variables_exercise_02_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle as pk
import pandas as pd
import numpy as np
import datetime
import matplotlib.dates as dates
with open('/home/jovyan/data/botpt/2019bottom_pressure15s_F.pkl', 'rb') as E:
botpt_data = pk.load(E)
df_botptF = pd.DataFrame(botpt_data)
df_botptF['bottom_pressure'] = df_botptF['bottom_pressure'].astype(float)
df_botptF['depth']=df_botptF['bottom_pressure'].astype(float) * 0.670
#MJ03F_cal_depths = [MJ03F_pressure * 0.0670 for MJ03F_pressure in MJ03F_pressure]
#list comprehention
epoch= [i.timestamp() for i in df_botptF.index.to_pydatetime()]
df_botptF['epoch'] = epoch
df_botptF= df_botptF.sort_index()
df_botptF.index.name= 'Date'
del df_botptF['epoch']
del df_botptF['bottom_pressure']
df_botptF.tail()
with open('/home/jovyan/data/botpt/2019bottom_pressure15s_E.pkl', 'rb') as E:
botpt_data = pk.load(E)
df_botptE = pd.DataFrame(botpt_data)
df_botptE['bottom_pressure'] = df_botptE['bottom_pressure'].astype(float)
df_botptE['depth']=df_botptE['bottom_pressure'].astype(float) * 0.670
#MJ03F_cal_depths = [MJ03F_pressure * 0.0670 for MJ03F_pressure in MJ03F_pressure]
#list comprehention
epoch= [i.timestamp() for i in df_botptE.index.to_pydatetime()]
df_botptE['epoch'] = epoch
df_botptE= df_botptE.sort_index()
df_botptE.index.name= 'Date'
del df_botptE['epoch']
del df_botptE['bottom_pressure']
df_botptE.head()
# #### Merge BOTPT E and BOTPT F
test = pd.merge(df_botptF, df_botptE,how='outer', indicator=True, left_index=True, right_index=True, suffixes=('_F', '_E'))
df_botptMerge = test[test['_merge'] == 'both']
del df_botptMerge['_merge']
del df_botptF
del df_botptE
df_botptMerge = df_botptMerge.loc['2017-1-1 00:00:00':'2017-01-30 00:00:00']
df_botptMerge
# #### Calculate Depth difference
depthDiff = df_botptMerge['depth_E'].values - df_botptMerge['depth_F'].values
depthDiff
df_botptMerge['diff'] = depthDiff
# df_botptMerge['diff'] = abs(depthDiff)
df_botptMerge['diff'].head(5)
depthDiff = df_botptMerge['diff'].abs()
depthDiff.head()
df_botptMerge['diff'] = depthDiff
# #### Create time and height vectors for plotting
# time = list(df_botptMerge.index.values)
#height = x.tolist()
height = df_botptMerge['diff'].tolist()
time_int = []
time = list(pd.to_datetime(df_botptMerge.index.values))
for i in time:
i = np.datetime64(i).astype(datetime.datetime)
time_int.append(dates.date2num(i))
# #### Use Groupby to create one day mean measurements
df_botptMerge['date']=pd.DatetimeIndex(df_botptMerge.index).date
df_botptMerge
df_botptMean=df_botptMerge.groupby('date').mean()
df_botptMean.tail(100)
df_test = df_botptMean.head(1000)
df_test.head(10)
df_test['newdiff'] = df_test['diff'].diff()
df_test.head(10)
df_test['newdiff'].plot()
# +
max = 0
for index,row in df_test.iterrows():
if row['diff']>max:
max = row['diff']
df_test.at[index,'state'] = 2
else:
if row['newdiff']>0:
df_test.at[index,'state'] = 1
else:
df_test.at[index,'state'] = -1
df_test.at[index,'new'] = max
df_test.head(20)
# -
df_test.state.plot(marker='.',linestyle='')
| notebooks/loop_depthdiff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Newton-Euler equations for rigid bodies
#
# > <NAME>, <NAME>
#
# > Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/))
# > Federal University of ABC, Brazil
# + [markdown] slideshow={"slide_type": "skip"}
# ## Mechanics
#
# In Mechanics we are interested in the study of motion (including deformation) and forces (and the relation between them) of anything in nature.
#
# As a good rule of thumb, we model the phenomenon of interest as simple as possible, with just enough complexity to understand the phenomenon.
#
# For example, we could model a person jumping as a particle (the center of gravity, with no size) moving in one direction (the vertical) if all we want is to estimate the jump height and relate that to the external forces to the human body. So, mechanics of a particle might be all we need.
#
# However, if the person jumps and performs a somersault, to understand this last part of the motion we have to model the human body as one of more objects which displaces and rotates in two or three dimensions. In this case, we would need what is called mechanics of rigid bodies.
#
# If, besides the gross motions of the segments of the body, we are interested in understanding the deformation in the the human body segments and tissues, now we would have to describe the mechanical behavior of the body (e.g., how it deforms) under the action of forces. In this case we would have to include some constitutive laws describing the mechanical properties of the body.
#
# In the chapter mechanics of rigid bodies, the body deformation is neglected, i.e., the distance between every pair of points in the body is considered constant. Consequently, the position and orientation of a rigid body can be completely described by a corresponding coordinate system attached to it.
#
# Let's review some Newton's laws of motion for a particle and then extend these equations to motion of rigid bodies.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Recapitulation
#
# ### Newton's laws of motion
#
# The Newton's laws of motion describe the relationship between the forces acting on a body and the resultant linear motion due to those forces:
#
# - **First law**: An object will remain at rest or in uniform motion in a straight line unless an external force acts on the body.
# - **Second law**: The acceleration of an object is directly proportional to the net force acting on the object and inversely proportional to the mass of the object: $\mathbf{\vec{F}} = m\mathbf{\vec{a}}.$
# - **Third law**: Whenever an object exerts a force $\mathbf{\vec{F}}_1$ (action) on a second object, this second object simultaneously exerts a force $\mathbf{\vec{F}}_2$ on the first object with the same magnitude but opposite direction (reaction): $\mathbf{\vec{F}}_2 = −\mathbf{\vec{F}}_1.$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Linear momentum
#
# The linear momentum, or quantity of motion, is defined as the product between mass and velocity:
#
# $$ \mathbf{\vec{L}} = m\mathbf{\vec{v}} $$
#
# ### Angular momentum
#
# In analogy to the linear momentum, the angular momentum is the quantity of movement of a particle rotating around an axis passing through any point O at a distance $\mathbf{\vec{r}}$ to the particle:
#
# $$ \mathbf{\vec{H_O}} = \mathbf{\vec{r_{O}}} \times \mathbf{\vec{L}} $$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Torque (moment of force)
#
# In analogy to the second Newton's law for the linear case, torque or moment of force (or simply moment) is the time derivative of angular momentum:
#
# $$ \mathbf{\vec{M_O}} = \frac{d\mathbf{\vec{H_O}}}{dt} = \frac{d}{dt}(\mathbf{\mathbf{\vec{r}} \times \mathbf{\vec{L}}}) = \frac{d\mathbf{\vec{r_O}}}{dt} \times \mathbf{\vec{L}} + \mathbf{\vec{r_O}} \times \frac{d\mathbf{\vec{L}}}{dt} = \frac{d\mathbf{\vec{r_O}}}{dt} \times (m\mathbf{\mathbf{\vec{v}}}) + \mathbf{\vec{r_O}} \times \frac{d(m\mathbf{\vec{v}})}{dt} = \mathbf{\vec{v}} \times (m\mathbf{\mathbf{\vec{v}}}) + \mathbf{\vec{r_O}} \times \frac{d(m\mathbf{\vec{v}})}{dt} = 0 + \mathbf{\vec{r_O}} \times \mathbf{\vec{F}} $$
#
# $$ \mathbf{\vec{M_O}} = \mathbf{\vec{r_O}} \times \mathbf{\vec{F}} $$
#
# $$ \mathbf{\vec{M_O}} = (r_{O_x}\:\mathbf{\hat{i}}+r_{O_y}\:\mathbf{\hat{j}}+r_{O_z}\:\mathbf{\hat{k}}) \times (F_x\:\mathbf{\hat{i}}+F_y\:\mathbf{\hat{j}}+F_z\:\mathbf{\hat{k}}) $$
#
# Where the symbol $\times$ stands for the [cross product](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ScalarVector.ipynb) mathematical function.
# The the moment of force can be calculated as the determinant of the following matrix:
#
# $$ \mathbf{\vec{M_O}} = \begin{bmatrix}
# \mathbf{\hat{i}} & \mathbf{\hat{j}} & \mathbf{\hat{k}} \\
# r_{O_x} & r_{O_y} & r_{O_z} \\
# F_x & F_y & F_z
# \end{bmatrix} $$
#
# $$ \mathbf{\vec{M_O}} = (r_{O_y}F_z-r_{O_z}F_y)\mathbf{\hat{i}}+(r_{O_z}F_x-r_{O_x}F_z)\mathbf{\hat{j}}+(r_{O_x}F_y-r_{O_y}F_x)\mathbf{\hat{k}} $$
# + [markdown] slideshow={"slide_type": "slide"}
# The magnitude of moment of force can also be calculated by the geometric equivalent formula:
#
# $$ ||\mathbf{\vec{M_O}}|| = ||\mathbf{r_O} \times \mathbf{\vec{F}}|| = ||\mathbf{\vec{r_O}}||\:||\mathbf{\vec{F}}||\sin(\theta) $$
#
# Where $\theta$ is the angle between the vectors $\mathbf{\vec{r_O}}$ and $\mathbf{\vec{F}}$.
#
# The animation below illustrates the relationship between force, torque, and momentum vectors:
#
# <figure><img src="../images/TorqueAnim.gif" alt="Torque animation" width="300"/><figcaption><center><i>Figure. Relationship between force ($\mathbf{\vec{F}}$), torque ($\mathbf{\vec{M}}$), linear momentum ($\mathbf{\vec{L}}$) and angular momentum ($\mathbf{\vec{H}}$). Adapted from [Wikipedia](http://en.wikipedia.org/wiki/File:Torque_animation.gif).</i></center></figcaption></figure>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Moment of inertia
#
# Let's use the example above, where $\mathbf{\vec{r_O}}$ and $\mathbf{\vec{F}}$ are orthogonal and derive an expression for the magnitude of these quantities as the equivalent of Newton's second law for angular motion:
#
# $$ M_O = r_OF = r_Oma $$
#
# Replacing the linear acceleration $a$ by the angular acceleration $\alpha$:
#
# $$ M_O = r_Omr_O\alpha = mr_O^2 \alpha $$
#
# In analogy to Newton's second law, where the constant of proportionality between $a$ and $F$ is called inertia (mass), the constant of proportionality between $M_O$ and $\alpha$ is called rotational inertia or moment of inertia, $I_O=mr_O^2$ for a particle with mass $m$ rotating at a distance $r$ from the center of rotation O.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Principle of transmissibility and Principle of moments
#
# On the effects of forces, there are two important principles:
#
# ### Principle of transmissibility
#
# > *For rigid bodies with no deformation, an external force can be applied at any point on its line of action without changing the resultant effect of the force.*
#
# ### Varignon's Theorem (Principle of Moments)
#
# > *The moment of a force about a point is equal to the sum of moments of the components of the force about the same point.*
# Note that the components of the force don't need to be orthogonal.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Equivalent systems
#
#
# A set of forces and moments is considered equivalent if its resultant force and sum of the moments computed relative to a given point are the same. Normally, we want to reduce all the forces and moments being applied to a body into a single force and a single moment.
#
# We have done this with particles for the resultant force. The resultant force is simply the sum of all the forces being applied to the body.
#
# \begin{equation}
# \vec{\bf{F}} = \sum\limits_{i=1}^n \vec{\bf{F_i}}
# \end{equation}
#
#
# where $\vec{\bf{F_i}}$ is each force applied to the body.
# + [markdown] slideshow={"slide_type": "slide"}
# Similarly, the total moment applied to the body relative to a point O is:
#
# \begin{equation}
# \vec{\bf{M_O}} = \sum\limits_{i}\vec{\bf{r_{i/O}}} \times \vec{\bf{F_i}}
# \end{equation}
#
# where $\vec{\bf{r_{i/O}}} $ is the vector from the point O to the point where the force $\vec{\bf{F_i}}$ is being applied.
#
# But where the resultant force should be applied in the body? If the resultant force were applied to any point different from the point O, it would produce an additional moment to the body relative to point O. So, the resultant force must be applied to the point O.
#
# So, any set of forces can be reduced to a moment relative to a chosen point O and a resultant force applied to the point O.
#
# To compute the resultant force and moment relative to another point O', the new moment is:
#
# \begin{equation}
# \vec{\bf{M_{O'}}} = \vec{\bf{M_O}} + \vec{\bf{r_{O'/O}}} \times \vec{\bf{F}}
# \end{equation}
#
# And the resultant force is the same.
#
# It is worth to note that if the resultant force $\vec{\bf{F}}$ is zero, than the moment is the same relative to any point.
#
# <figure><img src="./../images/equivalentSystem.png" width=850/></figure>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mechanics (dynamics) of rigid bodies
#
# A [rigid body](https://en.wikipedia.org/wiki/Rigid_body) is a model (an idealization) for a body in which deformation is neglected, i.e., the distance between every pair of points in the body is considered constant. This definition also also implies that the total mass of a rigid body is constant.
#
# Consequently, the motion of a rigid body can be completely described by its pose (position and orientation) in space. In a three-dimensional space, at least three coordinates and three angles are necessary to describe the pose of the rigid body, totalizing six degrees of freedom for a rigid body. This also implies that we will need six equations of motion for these components to describe the dynamics of a rigid body.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Euler's laws of motion (for a rigid body)
#
# Euler's laws of motion extend Newton's laws of motion for particles for the motion of a rigid body.
#
# **First law**: The linear momentum of a body is equal to the product of the mass of the body and the velocity of its center of mass:
#
# $$ \mathbf{\vec{L}} = m\mathbf{\vec{v}}_{cm} $$
#
# And calculating the time derivative of this equation:
#
# $$ \mathbf{\vec{F}} = m\mathbf{\vec{a}}_{cm} $$
#
# **Second law**: The rate of change of angular momentum about a point that is fixed in an inertial reference frame is equal to the resultant external moment of force about that point:
#
# $$ \mathbf{\vec{M_O}} = \frac{d\mathbf{\vec{H_O}}}{dt} $$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Derivation of the Euler's laws of motion
#
# **First law**:
#
# The sum of the linear momentum of all the particles of a rigid body (considering the body as a discrete sum of elements, but this also holds for the continuous case):
#
# $$ \mathbf{\vec{L}} = \sum m_i\mathbf{\vec{v}}_i $$
#
# Looking at the definition of center of mass:
#
# $$ \mathbf{\vec{r}}_{cm} = \frac{1}{m_{B}}\sum m_{i}\mathbf{\vec{r}}_i \quad \text{where} \quad m_{B} = \sum m_{i} $$
#
# By differentiation, the velocity of the center of mass is:
#
# $$ \mathbf{\vec{v}}_{cm} = \frac{1}{m_{B}}\sum m_{i}\mathbf{\vec{v}}_i $$
#
# And finally:
#
# $$ \mathbf{\vec{L}} = m_{B} \mathbf{\vec{v}}_{cm} = m_B \mathbf{\vec{v}}_{cm} $$
# + [markdown] slideshow={"slide_type": "slide"}
# We can get the second equation of the first law calculating the time derivative of the equation above.
# Another way to derive this second equation is considering the effects of all forces acting on each particle of the rigid body and apply Newton's second law to them:
#
# $$ \sum \mathbf{\vec{F}}_i = \sum m_i\mathbf{\vec{a}}_i $$
#
# With respect to the origin of these forces, they can be divided in two types: external and internal forces to the rigid body. Internal forces are interaction forces between particles inside the body and because of Newton's third law (action and reaction) they cancel each other. So, the equation above becomes:
#
# $$ \sum \mathbf{\vec{F}}_{external} = \sum m_i\mathbf{\vec{a}}_i $$
#
# But the acceleration of the center of mass is:
#
# $$ \mathbf{\vec{a}}_{cm} = \frac{1}{m_B}\sum m_{i}\mathbf{\vec{a}}_i $$
#
# And finally:
#
# $$ \mathbf{\vec{F}} = \sum \mathbf{\vec{F}}_{external} = m_B\mathbf{\vec{a}}_{cm} $$
#
# This means that for a rigid body the internal forces between the particles of the body do not contribute to changing the total momentum nor changing the acceleration of the center of mass.
# + [markdown] slideshow={"slide_type": "slide"}
# **Second law**:
#
# For a complete derivation of the second Euler's law of motion, see for example Taylor (2005) or [http://emweb.unl.edu/NEGAHBAN/EM373/note19/note19.htm](http://emweb.unl.edu/NEGAHBAN/EM373/note19/note19.htm).
#
# Let's derive the second Euler's law of motion for a simpler case of a rigid body rotating in a plane.
#
# First, a general consideration about the total angular momentum of a rotting rigid body:
# The total angular momentum of a rigid body rotating around a point $O$ can be expressed as the angular momentum of the body center of mass around the point $O$ plus the sum of the angular momentum of each particle around the body center of mass (for a proof see page 368 of Taylor, 2005):
#
# $$ \mathbf{\vec{H_O}} = \mathbf{\vec{r}}_{cm/O} \times m\mathbf{\vec{v}}_{cm/O} + \sum \mathbf{\vec{r}}_{i/cm} \times m_i\mathbf{\vec{v}}_{i/cm} $$
#
# For a two-dimensional case, where the rigid body rotates around its own center of mass and also rotates around another parallel axis (fixed), the second term of the right side of the equation above can be simplified to $\sum (m_i\mathbf{r}^2_{i/cm}) \mathbf{\vec{\omega}}$ and calculating the time derivative of the whole equation, the second Euler's law of motion simplifies to:
#
# $$ \mathbf{\vec{M_O}} = \mathbf{\vec{r}}_{cm/O} \times m\mathbf{\vec{a}}_{cm} + I_{cm} \mathbf{\vec{\alpha}} $$
#
# where $\mathbf{\vec{r}}_{cm}$ is the position vector of the center of mass with respect to the point $O$ about which moments are summed, $\mathbf{\vec{\alpha}}$ is the angular acceleration of the body about its center of mass, and $I_{cm}$ is the moment of inertia of the body about its center of mass.
#
# If $d$ is the (shortest) distance between the point $O$ and the line of the acceleration vector, then the equation above becomes:
#
# $$ \mathbf{M} = ma_{cm}d + I_{cm} \mathbf{\alpha} $$
#
# Note that if the acceleration of the center of mass is zero or the sum of moments of force is calculated around the center of mass (then $\mathbf{r}_{cm}=0$), this case of rotation in a plane simplifies to the well-known simple equation:
#
# $$ \mathbf{\vec{M_{cm}}} = I_{cm} \mathbf{\vec{\alpha}} $$
# + [markdown] slideshow={"slide_type": "slide"}
# *Three-dimensional case*
#
# In the three-dimensional space, if we describe the rotation of a rigid body using a rotating reference frame with axes parallel to the principal axes of inertia (referred by the subscripts 1,2,3) of the body, the Euler's second law becomes:
#
# $$ M_1 = I_1\dot{\omega_1} + (I_3-I_2)\omega_2\omega_3 $$
#
# $$ M_2 = I_2\dot{\omega_2} + (I_1-I_3)\omega_3\omega_1 $$
#
# $$ M_3 = I_3\dot{\omega_3} + (I_2-I_1)\omega_1\omega_2 $$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problems
#
# 1. (Recap.) Solve problems 11.2.1, 11.2..2, 11.2.9, 11.2.11 and 11.2.21 of Ruina and Rudra (2013).
#
# 2. Calculate the magnitude of the moment about the base point *O* of the 600-N force in five different ways for the structure shown below (hint: use the equation for torque in different ways, and also the principles of moments and of transmissibility).
#
# <figure><img src="http://ebm.ufabc.edu.br/wp-content/uploads/2013/02/torque2.jpg" alt="Torque" width="250"/></figure>
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## References
#
# - <NAME>, <NAME> (2013) [Introduction to Statics and Dynamics](http://ruina.tam.cornell.edu/Book/index.html). Oxford University Press.
# - <NAME> (2005) [Classical Mechanics](https://books.google.com.br/books?id=P1kCtNr-pJsC). University Science Books.
| notebooks/newton_euler_equations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 载入必要的库
# +
import mxnet as mx
from mxnet import autograd
from mxnet import gluon
from mxnet import image
from mxnet import init
from mxnet import nd
from mxnet.gluon import nn
from mxnet.gluon.data import vision
from mxnet.gluon.model_zoo import vision as models
import numpy as np
import pandas as pd
from tqdm import tqdm
import cv2
import h5py
import os
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
ctx = [mx.gpu(i) for i in range(1)] # 如果是单卡,需要修改这里
df = pd.read_csv('D:/dataset/dogbreed/sample_submission.csv')
synset = list(df.columns[1:])
# -
# # 载入数据集
# +
from glob import glob
n = len(glob('D:/dataset/Stanford_dogbreed/images/Images/*/*.jpg'))
X_224 = nd.zeros((n, 3, 224, 224))
X_299 = nd.zeros((n, 3, 299, 299))
y = nd.zeros((n,))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
for i, file_name in tqdm(enumerate(glob('D:/dataset/Stanford_dogbreed/images/Images/*/*.jpg')), total=n):
img = cv2.imread(file_name)
img_224 = ((cv2.resize(img, (224, 224))[:,:,::-1] / 255.0 - mean) / std).transpose((2, 0, 1))
img_299 = ((cv2.resize(img, (299, 299))[:,:,::-1] / 255.0 - mean) / std).transpose((2, 0, 1))
X_224[i] = nd.array(img_224)
X_299[i] = nd.array(img_299)
y[i] = synset.index(file_name.split('\\')[1][10:].lower())
nd.waitall()
# -
# # 定义得到预训练模型特征的函数
def get_features(model_name, data_iter):
net = models.get_model(model_name, pretrained=True, ctx=ctx)
features = []
for data in tqdm(data_iter):
# 并行预测数据,如果是单卡,需要修改这里
for data_slice in gluon.utils.split_and_load(data, ctx, even_split=False):
feature = net.features(data_slice)
feature = gluon.nn.Flatten()(feature)
features.append(feature.as_in_context(mx.cpu()))
nd.waitall()
features = nd.concat(*features, dim=0)
return features
# # 计算几个预训练模型输出的特征并拼接起来
# +
batch_size = 4
data_iter_224 = gluon.data.DataLoader(gluon.data.ArrayDataset(X_224), batch_size=batch_size)
data_iter_299 = gluon.data.DataLoader(gluon.data.ArrayDataset(X_299), batch_size=batch_size)
# +
model_names = ['inceptionv3', 'resnet152_v1']
features = []
for model_name in model_names:
if model_name == 'inceptionv3':
features.append(get_features(model_name, data_iter_299))
else:
features.append(get_features(model_name, data_iter_224))
# -
features = nd.concat(*features, dim=1)
data_iter_train = gluon.data.DataLoader(gluon.data.ArrayDataset(features, y), batch_size, shuffle=True)
# # 定义一些函数
def build_model():
net = nn.Sequential()
with net.name_scope():
net.add(nn.BatchNorm())
net.add(nn.Dense(1024))
net.add(nn.BatchNorm())
net.add(nn.Activation('relu'))
net.add(nn.Dropout(0.5))
net.add(nn.Dense(120))
net.initialize(ctx=ctx)
return net
# +
ctx = mx.gpu() # 训练的时候为了简化计算,使用了单 GPU
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
def accuracy(output, labels):
return nd.mean(nd.argmax(output, axis=1) == labels).asscalar()
def evaluate(net, data_iter):
loss, acc, n = 0., 0., 0.
steps = len(data_iter)
for data, label in data_iter:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
output = net(data)
acc += accuracy(output, label)
loss += nd.mean(softmax_cross_entropy(output, label)).asscalar()
return loss/steps, acc/steps
# -
# # 训练模型
# +
net = build_model()
epochs = 100
batch_size = 128
lr_sch = mx.lr_scheduler.FactorScheduler(step=1500, factor=0.5)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': 1e-3, 'lr_scheduler': lr_sch})
for epoch in range(epochs):
train_loss = 0.
train_acc = 0.
steps = len(data_iter_train)
for data, label in data_iter_train:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(batch_size)
train_loss += nd.mean(loss).asscalar()
train_acc += accuracy(output, label)
print("Epoch %d. loss: %.4f, acc: %.2f%%" % (epoch+1, train_loss/steps, train_acc/steps*100))
# -
# # 计算在训练集上的 loss 和准确率
evaluate(net, data_iter_train)
# # 读取之前导出的测试集特征
features_test = [nd.load('features_test_%s.nd' % model_name)[0] for model_name in model_names]
features_test = nd.concat(*features_test, dim=1)
# # 预测并输出到 csv 文件
output = nd.softmax(net(features_test.as_in_context(ctx))).asnumpy()
# +
df_pred = pd.read_csv('D:/dataset/dogbreed/sample_submission.csv')
for i, c in enumerate(df_pred.columns[1:]):
df_pred[c] = output[:,i]
df_pred.to_csv('pred_stan.csv', index=None)
# -
# # 和之前的提交进行对比,确认没有错位
zip(np.argmax(pd.read_csv('pred.csv').values[:,1:], axis=-1), np.argmax(df_pred.values[:,1:], axis=-1))[:10]
# # 压缩为 zip 文件
# !rm pred.zip
# !zip pred.zip pred.csv
| Computer-vision/@ypwhs kaggle-DogBreed-gluon/stanford.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="iHMqWczsik6_" colab_type="text"
# - Prove the following facts: Supose $f$ is a function satisfying
# - $f(0) = f_{min},$ and $\lim_{x\to \infty}f(x) = f_{max}$
# - $f$ is continuous
# - $f$ is strictly increasing
#
# then, for any $p\in (f_{min}, f_{max})$,
# - there exists unique $\hat \sigma$, such that $f(\hat \sigma) = p$ and
# $$\hat \sigma = \arg\min_{\sigma\in (0,\infty)} | f(\sigma) - p|.$$
# + [markdown] id="F9tYcXcNcbil" colab_type="text"
# - Now we denote by $f(\sigma)$ the BSM put price with the following parameters:
# - vol_ratio = $\sigma$; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.
#
# Answer the following questions:
# - What is $f_{min}$ and $f_{max}$?
# - Is $f$ strictly increasing on $(0,\infty)$? Justify your answer.
# - If the market put price is $10$, then what's the implied volatility?
# + [markdown] id="Yb5WeJlQp971" colab_type="text"
# - Find its implied volatility with the following parameters:
# - BSM call price is 10.; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.
#
#
# + id="beGz9O5zqRXK" colab_type="code" colab={}
| src/20iv_hw01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Bokeh's drawing tools are the basis for a wide range of functionality in EarthSim, using the convenient interface provided by [HoloViews](http://holoviews.org). They make it simple to build systems for annotating existing data, highlighting regions of interest, and drawing and editing shapes that can be used as input to simulators or other programs. This user guide will give a basic introduction to the drawing tools, including how to access the resulting data from within Python code.
#
# For more detail about the underlying Bokeh tools, see the [Bokeh user guide](https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#userguide-tools-edit). Note that most of the discussion here is not specific to EarthSim, and applies to any usage of the drawing tools in practice, apart from a few I/O routines imported from `earthsim` when used below.
#
# <style>.container { width:100% !important; }</style>
# +
import os
import numpy as np
import holoviews as hv
import geoviews as gv
import cartopy.crs as ccrs
from holoviews.streams import PointDraw, PolyEdit, BoxEdit, PolyDraw, FreehandDraw
tiles = gv.tile_sources.Wikipedia
hv.extension('bokeh')
# -
# ## Drawing Points
#
# All drawing tools are added by instantiating a corresponding [HoloViews stream](http://holoviews.org/user_guide/Responding_to_Events.html), which also syncs the data. Here we will use the ``PointDraw`` stream, which allows adding points, dragging points, and deleting points. The ``num_objects`` parameter, if set, will limit the number of points that can be drawn, ensuring that when the limit is reached the oldest point is dropped.
#
# **Add point**: Tap anywhere on the plot; each tap adds one point.
#
# **Move point**: Tap and drag an existing point, which will be dropped once you let go of the mouse button.
#
# **Delete point**: Tap a point to select it, then press the Backspace key (sometimes labeled "Delete") while the mouse is within the plot area.
#
# Note that to use the `PointDraw` tool or any of the other drawing tools, you first need to select the icon for it in the toolbar:<img src="https://bokeh.pydata.org/en/latest/_images/PointDraw.png">
# %%opts Points [width=900 height=500 tools=['hover']] (size=10 color='red')
points = gv.Points(np.random.rand(10, 2)*10)
point_stream = PointDraw(source=points, num_objects=10)
tiles * points
# Note that here and in the other examples below, we have provided initial values for the `source`, just so that there will be objects in the map when this notebook is rendered as a web page or otherwise shared. In practice, the `source` here and in every case below can be an empty list `[]` if you don't want any initial values.
#
# Once points are available on the map, we can wrap them in a GeoViews Points object, project them back into longitude and latitude, and then convert the resulting object to a dataframe for use in any Python code:
if point_stream.data:
display(point_stream.element.dframe())
# Of course, the dataframe output above will only contain the points that were present at the time that cell was executed, so the cell will need to be re-run if you add points to the main plot.
#
# ## Drawing bounding boxes
#
# The ``BoxEdit`` stream adds a tool that allows drawing, dragging, and deleting rectangular bounding boxes, once you have selected it in the toolbar: <img src="https://bokeh.pydata.org/en/latest/_images/BoxEdit.png">
#
# The ``num_objects`` parameter, if set, will limit the number of boxes that can be drawn, causing the oldest box to be dropped when the limit is exceeded.
#
# **Add box**: Hold shift, then click and drag anywhere on the plot.
#
# **Move box**: Click and drag an existing box; the box will be dropped once you let go of the mouse button.
#
# **Delete box**: Tap a box to select it, then press the Backspace (or Delete) key while the mouse is within the plot area.
# +
# %%opts Polygons [width=900 height=500]
# %%opts Polygons (fill_alpha=0 line_color='black' selection_fill_color='red')
sample_box = hv.Bounds((-90.99, 32.25, -90.85, 32.37))
box_poly = gv.Polygons([sample_box])
box_stream = BoxEdit(source=box_poly, num_objects=3)
tiles * box_poly
# -
# Note that `BoxEdit` accepts a `Polygon` element, as there is not yet a vectorized Box type that would let it generate boxes directly, and so we will need to convert the returned polygons into boxes manually:
# +
def bbox(poly):
"Convert the polygon returned by the BoxEdit stream into a bounding box tuple"
xs,ys = poly.array().T
return (xs[0], ys[0], xs[2], ys[2])
if box_stream.element:
polygons = box_stream.element.split()
bboxes = [bbox(p) for p in polygons]
print(bboxes)
# -
# (Of course, boxes will only be printed above if they were drawn on the map before the cell above is executed.)
# ## Polygon Editing
#
# The ``PolyEdit`` stream adds a Bokeh tool to the source plot that allows drawing, dragging, and deleting vertices on polygons and making the drawn data available to Python:<img src="https://bokeh.pydata.org/en/latest/_images/PolyEdit.png">
#
# The tool supports the following actions:
#
# **Show vertices**: Double tap an existing patch or multi-line
#
# **Add vertex**: Double tap an existing vertex to select it, then the tool will draw the next point; to add it tap in a new location. To finish editing and add a point, double tap; otherwise press the ESC key to cancel.
#
# **Move vertex**: Drag an existing vertex and let go of the mouse button to release it.
#
# **Delete vertex**: After selecting one or more vertices press Backspace (or Delete) while the mouse cursor is within the plot area.
# +
# %%opts Polygons [width=900 height=500 tools=['box_select']] (alpha=0.5)
shapefile = '../data/vicksburg_watershed/watershed_boundary.shp'
mask_poly = gv.Shape.from_shapefile(shapefile)
vertex_stream = PolyEdit(source=mask_poly)
tiles * mask_poly
# -
# Once the shape has been edited, it can be pulled out into its own file for later usage, and displayed separately:
# %%opts Shape [width=600 height=400] (alpha=0.5)
from earthsim.io import save_shapefile
if vertex_stream.data:
edited_shape_fname = '../data/vicksburg_watershed_edited/watershed_boundary.shp'
dir_name = os.path.dirname(edited_shape_fname)
if not os.path.isdir(dir_name): os.makedirs(dir_name)
save_shapefile(vertex_stream.data, edited_shape_fname, shapefile)
mask_shape = gv.Shape.from_shapefile(edited_shape_fname)
mask_shape = mask_shape.opts() # Clear options to avoid adding edit tool
mask_shape
# ## Freehand Drawing
#
# The ``FreehandDraw`` tool allows drawing polygons or paths (polylines), depending on whether it is given a Path or Polygon source, using simple click and drag actions:<img src="https://bokeh.pydata.org/en/latest/_images/FreehandDraw.png">
#
# The ``num_objects`` parameter, if set, will limit the number of lines/polygons that can be drawn, causing the oldest object to be dropped when the limit is exceeded.
#
# **Add patch/multi-line**: Click and drag to draw a line or polygon and release mouse to finish drawing
#
# **Delete patch/multi-line**: Tap a patch/multi-line to select it, then press Backspace/Delete while the mouse is within the plot area.
#
# %%opts Path (line_width=5 color='black') [width=900 height=500]
path = gv.Path([[(0, 52), (-74, 43)]])
freehand_stream = FreehandDraw(source=path, num_objects=3)
tiles * path
freehand_stream.element.data
# ## Drawing Polygons
#
# The ``PolyDraw`` tool allows drawing new polygons or paths (polylines) on a plot, depending on whether it is given a Path or Polygon source:<img src="https://bokeh.pydata.org/en/latest/_images/PolyDraw.png">
#
# The ``num_objects`` parameter, if set, will limit the number of lines/polygons that can be drawn, causing the oldest object to be dropped when the limit is exceeded. Additionally it is possible to display and snap to existing vertices by enabling the ``show_vertices`` parameter.
#
# **Add patch/multi-line**: Double tap to add the first vertex, then use tap to add each subsequent vertex. To finalize the draw action, double tap to insert the final vertex or press the ESC key to stop drawing.
#
# **Move patch/multi-line**: Tap and drag an existing patch/polyline; the point will be dropped once you let go of the mouse button.
#
# **Delete patch/multi-line**: Tap a patch/multi-line to select it, then press Backspace/Delete while the mouse is within the plot area.
# +
# %%opts Polygons [width=900 height=500] (fill_alpha=0.1 line_color='black')
# %%opts Path (line_width=5 color='black')
sample_poly=dict(
Longitude = [-90.86, -90.94, -91.0 , -90.92, -91.0 , -90.94],
Latitude = [ 32.33, 32.37, 32.34, 32.32, 32.27, 32.25])
sample_path=dict(
Longitude = [-90.99, -90.90, -90.90, -90.98],
Latitude = [ 32.35, 32.34, 32.32, 32.25])
new_polys = gv.Polygons([sample_poly])
new_paths = gv.Path([sample_path])
poly_stream = PolyDraw(source=new_polys, show_vertices=True)
path_stream = PolyDraw(source=new_paths, show_vertices=True)
tiles * new_polys * new_paths
# -
path_stream.element.data
# Notice that the toolbar has two `PolyDraw` tools here; if you select the first one you'll be able to add `Polygons` (drawn with thin lines), and if you select the other one you can add `Path` objects (poly-lines, drawn with a thick line). You'll need to have the appropriate copy of the tool selected if you want to move or delete an object associated with that stream.
#
# Once you have drawn some objects, you can extract the new paths or polygons from the stream (which will be blank unless you have drawn something above when the following cells are executed):
poly_stream.element.geom()
path_stream.element.geom()
# Here `.geom()` returns a [Shapely geometry](https://toblerity.org/shapely/shapely.geometry.html) with all of the shapes you drew of that type. If you would rather work with each shape separately, you can get them as a list with `poly_stream.element.split()` or `path_stream.element.split()`.
# ## Drawing and editing a polygon
#
# By adding tools for both polygon drawing and vertex editing on the same HoloViews object, we can both draw and edit polygons in the same plot:
# %%opts Polygons [width=900 height=500] (fill_alpha=0.2 line_color='black')
new_polys = gv.Polygons([sample_poly])
poly_stream = PolyDraw(source=new_polys)
vertex_stream = PolyEdit(source=new_polys)
tiles * new_polys
poly_stream.data
poly_stream.element
# The above examples should make it clear how to draw shapes and use the data from within Python. The next set of examples show how to associate data interactively with each point or object added, via [Annotators](Annotators.ipynb).
| examples/user_guide/Drawing_Tools.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started with DoWhy: A simple example
# This is a quick introduction to the DoWhy causal inference library.
# We will load in a sample dataset and estimate the causal effect of a (pre-specified) treatment variable on a (pre-specified) outcome variable.
#
# First, let us load all required packages.
# +
import numpy as np
import pandas as pd
from dowhy import CausalModel
import dowhy.datasets
# Avoid printing dataconversion warnings from sklearn and numpy
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=FutureWarning)
# Config dict to set the logging level
import logging.config
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'': {
'level': 'WARN',
},
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# -
# Now, let us load a dataset. For simplicity, we simulate a dataset with linear relationships between common causes and treatment, and common causes and outcome.
#
# Beta is the true causal effect.
data = dowhy.datasets.linear_dataset(beta=10,
num_common_causes=5,
num_instruments = 2,
num_effect_modifiers=1,
num_samples=20000,
treatment_is_binary=True,
num_discrete_common_causes=1)
df = data["df"]
print(df.head())
print(data["dot_graph"])
print("\n")
print(data["gml_graph"])
# Note that we are using a pandas dataframe to load the data. At present, DoWhy only supports pandas dataframe as input.
# ## Interface 1 (recommended): Input causal graph
# We now input a causal graph in the GML graph format (recommended). You can also use the DOT format.
#
# To create the causal graph for your dataset, you can use a tool like [DAGitty](http://dagitty.net/dags.html#) that provides a GUI to construct the graph. You can export the graph string that it generates. The graph string is very close to the DOT format: just rename `dag` to `digraph`, remove newlines and add a semicolon after every line, to convert it to the DOT format and input to DoWhy.
# With graph
model=CausalModel(
data = df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=data["gml_graph"]
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
# The above causal graph shows the assumptions encoded in the causal model. We can now use this graph to first identify
# the causal effect (go from a causal estimand to a probability expression), and then estimate the causal effect.
# ### DoWhy philosophy: Keep identification and estimation separate
#
# Identification can be achieved without access to the data, acccesing only the graph. This results in an expression to be computed. This expression can then be evaluated using the available data in the estimation step.
# It is important to understand that these are orthogonal steps.
#
# #### Identification
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
# Note the parameter flag *proceed\_when\_unidentifiable*. It needs to be set to *True* to convey the assumption that we are ignoring any unobserved confounding. The default behavior is to prompt the user to double-check that the unobserved confounders can be ignored.
# #### Estimation
causal_estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(causal_estimate)
print("Causal Estimate is " + str(causal_estimate.value))
# You can input additional parameters to the estimate_effect method. For instance, to estimate the effect on any subset of the units, you can specify the "target_units" parameter which can be a string ("ate", "att", or "atc"), lambda function that filters rows of the data frame, or a new dataframe on which to compute the effect. You can also specify "effect modifiers" to estimate heterogeneous effects across these variables. See `help(CausalModel.estimate_effect)`.
# Causal effect on the control group (ATC)
causal_estimate_att = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification",
target_units = "atc")
print(causal_estimate_att)
print("Causal Estimate is " + str(causal_estimate_att.value))
# ## Interface 2: Specify common causes and instruments
# Without graph
model= CausalModel(
data=df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
common_causes=data["common_causes_names"],
effect_modifiers=data["effect_modifier_names"])
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
# We get the same causal graph. Now identification and estimation is done as before.
#
# #### Identification
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
# #### Estimation
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(estimate)
print("Causal Estimate is " + str(estimate.value))
# ## Refuting the estimate
#
# Let us now look at ways of refuting the estimate obtained. Refutation methods provide tests that every correct estimator should pass. So if an estimator fails the refutation test (p-value is <0.05), then it means that there is some problem with the estimator.
#
# Note that we cannot verify that the estimate is correct, but we can reject it if it violates certain expected behavior (this is analogous to scientific theories that can be falsified but not proven true). The below refutation tests are based on either
# 1) **Invariant transformations**: changes in the data that should not change the estimate. Any estimator whose result varies significantly between the original data and the modified data fails the test;
#
# a) Random Common Cause
#
# b) Data Subset
#
#
# 2) **Nullifying transformations**: after the data change, the causal true estimate is zero. Any estimator whose result varies significantly from zero on the new data fails the test.
#
# a) Placebo Treatment
# ### Adding a random common cause variable
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
# ### Replacing treatment with a random (placebo) variable
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
# ### Removing a random subset of the data
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
# As you can see, the propensity score stratification estimator is reasonably robust to refutations.
# For reproducibility, you can add a parameter "random_seed" to any refutation method, as shown below.
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9, random_seed = 1)
print(res_subset)
# ### Adding an unobserved common cause variable
#
# This refutation does not return a p-value. Instead, it provides a _sensitivity_ test on how quickly the estimate changes if the identifying assumptions (used in `identify_effect`) are not valid. Specifically, it checks sensitivity to violation of the backdoor assumption: that all common causes are observed.
#
# To do so, it creates a new dataset with an additional common cause between treatment and outcome. To capture the effect of the common cause, the method takes as input the strength of common cause's effect on treatment and outcome. Based on these inputs on the common cause's effects, it changes the treatment and outcome values and then reruns the estimator. The hope is that the new estimate does not change drastically with a small effect of the unobserved common cause, indicating a robustness to any unobserved confounding.
#
# Another equivalent way of interpreting this procedure is to assume that there was already unobserved confounding present in the input data. The change in treatment and outcome values _removes_ the effect of whatever unobserved common cause was present in the original data. Then rerunning the estimator on this modified data provides the correct identified estimate and we hope that the difference between the new estimate and the original estimate is not too high, for some bounded value of the unobserved common cause's effect.
#
# **Importance of domain knowledge**: This test requires _domain knowledge_ to set plausible input values of the effect of unobserved confounding. We first show the result for a single value of confounder's effect on treatment and outcome.
res_unobserved=model.refute_estimate(identified_estimand, estimate, method_name="add_unobserved_common_cause",
confounders_effect_on_treatment="binary_flip", confounders_effect_on_outcome="linear",
effect_strength_on_treatment=0.01, effect_strength_on_outcome=0.02)
print(res_unobserved)
# It is often more useful to inspect the trend as the effect of unobserved confounding is increased. For that, we can provide an array of hypothesized confounders' effects.
res_unobserved_range=model.refute_estimate(identified_estimand, estimate, method_name="add_unobserved_common_cause",
confounders_effect_on_treatment="binary_flip", confounders_effect_on_outcome="linear",
effect_strength_on_treatment=np.array([0.001, 0.005, 0.01, 0.02]), effect_strength_on_outcome=0.01)
print(res_unobserved_range)
# The above plot shows how the estimate decreases as the hypothesized confounding on treatment increases. By domain knowledge, we may know that 0.5 is the maximum plausible confounding effect, and since we see that the effect changes by only 20%, we can safely conclude that the causal effect of treatment `v0` is positive.
#
# We can also vary the confounding effect on both treatment and outcome. We obtain a heatmap.
res_unobserved_range=model.refute_estimate(identified_estimand, estimate, method_name="add_unobserved_common_cause",
confounders_effect_on_treatment="binary_flip", confounders_effect_on_outcome="linear",
effect_strength_on_treatment=[0.001, 0.005, 0.01, 0.02],
effect_strength_on_outcome=[0.001, 0.005, 0.01,0.02])
print(res_unobserved_range)
# **Conclusion**: At least as long as the confounding parameters are limited to 0.02 in the real world, the causal effect can be concluded to be positive.
| docs/source/example_notebooks/dowhy_simple_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="yRynVVFqczch"
# Inspired by [paper](https://www.researchgate.net/publication/237135894_A_unifying_framework_for_complexity_measures_of_finite_systems)
#
# In this notebook I will implement TSE and Excess Entropy calculation and test it on following datasets:
#
#
#
# 1. Wikipedia
# 2. Simple English Wikipedia
#
# + [markdown] id="fNPwqgcCuoBC"
# # Preparation
# + colab={"base_uri": "https://localhost:8080/"} id="UMAfwK2Tuqk3" outputId="1e8dda29-af9c-46e7-a16d-afdc66ea0f75"
# !python3 -m pip install sentencepiece > /dev/null && echo 'OK'
# + colab={"base_uri": "https://localhost:8080/"} id="gPYhLHbjyW9j" outputId="19f245dd-9fe5-4cf9-9bad-4dc8786d0e37"
# !python3 -m pip install tensorflow_text > /dev/null && echo 'OK'
# + colab={"base_uri": "https://localhost:8080/"} id="ETFmTXXgyW3v" outputId="49ec24e0-4e8f-4fc0-dd44-4bf34530e90e"
# !python3 -m pip install tensorflow_datasets > /dev/null && echo 'OK'
# + colab={"base_uri": "https://localhost:8080/"} id="64qgPiHyyCgF" outputId="8e4daaba-0b52-433a-d81b-e5240d2db211"
# !python3 -m pip install tf_sentencepiece > /dev/null && echo 'OK'
# + [markdown] id="K_JtxlGAGrhP"
# ### Imports
# + id="QvRHcxDKukxK"
import sentencepiece as spm
import tensorflow_datasets as tfds
from tqdm.notebook import tqdm
import numpy as np
from typing import List, Tuple
import nltk
import matplotlib.pyplot as plt
# + [markdown] id="wNiRzbPddkZx"
# # Datasets
# + [markdown] id="sdwSY9Tpmnwb"
# ## Wikipedia
# + [markdown] id="NnlJ-q5quOeO"
# [link](https://www.tensorflow.org/datasets/catalog/wiki40b#wiki40ben_default_config) to dataset
# + id="fttAVOrWiFAi"
ds = tfds.load('wiki40b/en', split='train', shuffle_files=True)
# + id="Y0T-7CICjH-U"
MAX_TEXTS_SIZE = 1000
texts = []
for x in ds:
if len(texts) > MAX_TEXTS_SIZE:
break
s = x['text'].numpy().decode('utf-8')
text = s.replace('_NEWLINE_', ' ').replace('_START_ARTICLE_', '').replace('_START_PARAGRAPH_', '').replace('_START_SECTION_',' ').split('\n')
texts += list(filter(lambda x: len(x) > 20, text))
# + id="eHhMXOohxcbX"
with open('train_text.txt', 'w') as fout:
for text in texts:
fout.write(text)
fout.write('\n')
# + id="cyhbH3dH2H-D"
spm.SentencePieceTrainer.train('--input=train_text.txt --model_prefix=m --vocab_size=500')
# + colab={"base_uri": "https://localhost:8080/"} id="hxkkLHVy21f2" outputId="3a913b47-3b58-4ad6-b328-a802e64780a1"
sp = spm.SentencePieceProcessor()
sp.load('m.model')
# + colab={"base_uri": "https://localhost:8080/"} id="MzrOhFVB3VSY" outputId="b5af03b4-fa53-4cce-9de9-5ba15ec6eb07"
print(list(sp.id_to_piece(i) for i in range(sp.vocab_size())))
print(sp.vocab_size())
# + colab={"base_uri": "https://localhost:8080/"} id="lkdORK975AG4" outputId="27f54c64-520d-45be-ff1f-cfe4f13a3fe1"
print(sp.encode_as_ids('Hello, my friend'))
print(sp.encode_as_pieces('Hello, my friend'))
# + id="_PnD_rNLGVDv"
def collect_statistics(
texts: List[str],
sp: spm.SentencePieceProcessor
) -> Tuple[np.ndarray]:
"""
texts: the list of str texts
sp: pretrained sentencepieces tokenizer
Returns
- a nltk.FreqDist with counts for (i, (x_{i-1}, x_i))
- a nltk.FreqDist with counts for (i, x_i)
- a nltk.FreqDist with counts for (i, x_i), where i is the last position of the sequence
- a nltk.FreqDist with counts for i - the number of texts with i-th position
"""
vocab_size = sp.vocab_size()
bins = np.arange(vocab_size + 1)
F_pair = nltk.FreqDist()
F_single = nltk.FreqDist()
F_last = nltk.FreqDist()
F_pos = nltk.FreqDist()
for text in tqdm(texts):
tokenized_sequence = sp.encode_as_ids(text)
bgs = nltk.bigrams(tokenized_sequence)
F_pair += nltk.FreqDist(zip(range(1, len(tokenized_sequence)), bgs))
F_single += nltk.FreqDist(zip(range(len(tokenized_sequence)), tokenized_sequence))
F_last += nltk.FreqDist([(len(tokenized_sequence) - 1, tokenized_sequence[-1])])
F_pos += nltk.FreqDist(range(len(tokenized_sequence)))
return F_pair, F_single, F_last, F_pos
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["cbfeaa593bbb40c98f05933addf57e49", "5f7d827b0f854a6297abd0d410e59ff8", "4af4e952439d4f718ccd6e2e5c07a659", "0075ad52a8944c8dbb32376024471672", "bc825fd2119e433ca67ece3f77ac07b5", "9d05a7bd82334aa9bbaa54850395069e", "7c9ce8f3540e47cda3a204e0a1d5a2ff", "104bd25b80634ec9b8bea732dcf89f73"]} id="g_5OqF5EHu5m" outputId="daa0d519-b2b9-4f04-86d5-f28d601d811f"
F_pair, F_single, F_last, F_pos = collect_statistics(texts, sp)
# + [markdown] id="Qg5gBHJiaEr_"
# ## Simple English wikipedia
#
# some [link](https://github.com/LGDoor/Dump-of-Simple-English-Wiki)
# + colab={"base_uri": "https://localhost:8080/"} id="FS0ylolIJIGs" outputId="47b3766e-6e1d-4b01-8140-410bda897887"
# !git clone https://github.com/LGDoor/Dump-of-Simple-English-Wiki.git
# + id="uscpWhGJJLPk"
# !tar -xf Dump-of-Simple-English-Wiki/corpus.tgz
# + colab={"base_uri": "https://localhost:8080/"} id="HLijsCNjJtnC" outputId="66243cc3-82ae-4d36-8da1-6f7d6d6c349d"
# !ls -lh
# + id="8zmlVZp1JSFf"
SIMPLE_MAX_TEXTS_SIZE = 1000
simple_texts = []
with open('corpus.txt', 'r') as fin:
for s in fin:
if len(simple_texts) > SIMPLE_MAX_TEXTS_SIZE:
break
text = s.split('\n')
simple_texts += list(filter(lambda x: len(x) > 20, text))
# + id="GtHX8mftNcUo"
with open('simple_train_text.txt', 'w') as fout:
for text in simple_texts:
fout.write(text)
fout.write('\n')
# + id="5HvqI8BNKH6A"
spm.SentencePieceTrainer.train('--input=simple_train_text.txt --model_prefix=s --vocab_size=500')
# + colab={"base_uri": "https://localhost:8080/"} id="dP-N1tI9KXKJ" outputId="cda20240-48e9-4530-8f77-aede9427cc73"
simple_sp = spm.SentencePieceProcessor()
simple_sp.load('s.model')
# + colab={"base_uri": "https://localhost:8080/"} id="Tol35pR8KcjQ" outputId="0c7d2f40-243f-43f5-f113-53ec35d6dc87"
print(list(sp.id_to_piece(i) for i in range(sp.vocab_size())))
print(sp.vocab_size())
# + colab={"base_uri": "https://localhost:8080/"} id="Phy8hdU9KeXL" outputId="6c826e4d-ee41-4165-a4e1-57a0144d5820"
print(sp.encode_as_ids('Hello, my friend'))
print(sp.encode_as_pieces('Hello, my friend'))
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["6742d7bb938843a48ca86b4a14b7ecbf", "bddd0655a4dc4a06901bc6f9fb878bb7", "81d4f5f6ef334ff881ea543f3511824c", "f1ca3ffdcf3c4335b0c4e261c9771703", "4831dbe6f9cf47519aae22f56c61e924", "a904c220174847daa61f8fed6c533160", "0f3a6ca1bae045189d0bfe51951dabc9", "0ca7e65099f146c2954213940452f702"]} id="UZj9xY_nLaqh" outputId="08da70cd-0805-4b5a-ae3b-ecc5a7b66cdd"
simple_F_pair, simple_F_single, simple_F_last, simple_F_pos = collect_statistics(simple_texts, simple_sp)
# + [markdown] id="-mEdPDuTahqn"
# # Calculating metrics
# + [markdown] id="2JXHXbenajsb"
# ## Excess Entropy
#
# Friendly reminder:
#
# Suppose that we have a sequence of tokens $x_0, x_1, \ldots, x_{n-1}$.
# We want to calculate $E(x_0, x_1, \ldots, x_{n-1})$
#
# In original papers this metrics can be calculated only of some multidimensional random value, but not of sequence of numbers.
#
# So, $E(X_0, X_1, \ldots, X_{n-1}) = (n-1)\left(\frac{1}{n-1}\sum\limits_{i=0}^{n-1}H(X_0, X_1, \ldots, X_{i-1}, X_{i+1}, \ldots, X_{n-1}) - H(X_0, \ldots, X_{n-1})\right)$
#
# The problem is how to calculate $H(\ldots)$ if we have only a sequence of numbers.
#
# First of all, let's simplify the problem: we want to create such r.v. $\xi_i$ from our numbers in such a way that $H(\xi_0, \ldots, \xi_{n-1}) = H(\xi_0) + H(\xi_1|\xi_0) + \ldots + H(\xi_{n-1}|\xi_{n-2})$. How can we find such r.v.?
#
# Notice that $\xi_i$ should depends on the position ($i$) in the sequence and on number at this position ($x_i$).
#
# Let's construct r.v. by $x_i$. So, $x_i$ will generate r.v. $\xi^i_{x_i}$. What the distribution of such r.v. is?
#
# * $p(\xi^0_{x_0}) = \frac{\#\text{sequences with $x_0$ at the very beginning}}{\#\text{of first positions}}$
#
# * $p(\xi^i_{x_i}, \xi^{i-1}_{x_{i-1}}) = \frac{\#\text{sequences with $x_{i-1}$ at position $i-1$ and $x_i$ at position $x_i$}}{\#\text{sequences with $i$-th position}}$
#
# Then we can define Excess Entropy of sequence of numbers
#
# $E(x_0, \ldots, x_{n-1}) = E(\xi^0_{x_0}, \xi^1_{x_1}, \ldots, \xi^{n-1}_{x_{n-1}})$
#
# How to calculate it efficiently?
#
# Let's denote $\mu_i = \xi^i_{x_i}$
#
# * $\hat{H} = H(\mu_0, \ldots, \mu_{n-1}) = H(\mu_0) + H(\mu_1|\mu_0) + \ldots + H(\mu_{n-1} | \mu_{n-2})$
# * $H(\mu_i) = -p(\mu_i)\log p(\mu_i) - (1-p(\mu_i))\log (1 - p(\mu_i))$
# * $H(\mu_i,\mu_{i-1}) = $ entropy of pair of binary r.v.
# * $H(\mu_i|\mu_{i-1}) = H(\mu_i, \mu_{i-1}) - H(\mu_{i-1})$
# * $H(\mu_0, \ldots, \mu_{i-1}, \mu_{i+1}, \ldots, \mu_{n-1}) = \hat{H} - H(\mu_i|\mu_{i-1}) - H(\mu_{i+1}|\mu_i) + H(\mu_{i+1}|\mu_{i-1})$
# $=\hat{H} - H(\mu_i|\mu_{i-1}) - H(\mu_{i+1}|\mu_i) + H(\mu_{i+1})$
#
# So, we can easily calculate Excess Entropy in $O(n)$ time
#
# **Note**: if we will define r.v. which depends only on values then we can not easily calculate Excess entropy, because there is dependency between all r.v.
#
# **Note**: $E(x_0, \ldots, \mu_{n-1})=
# \left[\sum\limits_{i=0}^{n-2}H(\mu_0, \ldots, \mu_i)\right] +
# \left[\sum\limits_{i=1}^{n-1}H(\mu_i, \ldots, \mu_{n-1})\right] -
# (n - 1) H(\mu_0, \ldots, \mu_{n-1})$, which is sum of Entropies for each prefix and for each suffix minus entropy of full text multiplied by $(n-1)$
#
# **Note**: Let's rewrite the formula.
# $E(\mu_0, \ldots, \mu_{n-1}) = \hat{H} + \sum\limits_{i=0}^{n-1}\left[H(\mu_0,\ldots,\mu_{i-1},\mu_{i+1},\ldots,\mu_{n-1})-\hat{H}\right]=
# \hat{H} + \sum\limits_{i=0}^{n-1}\left[-H(\mu_i|\mu_{i-1})-H(\mu_{i+1}|\mu_i)+H(\mu_{i+1})\right]=
# \sum\limits_{i=0}^{n-1}\left[-H(\mu_i|\mu_{i-1})-H(\mu_{i+1}|\mu_i)+H(\mu_{i+1})+H(\mu_i|\mu_{i-1})\right]=
# \sum\limits_{i=0}^{n-1}\left[H(\mu_{i+1})-H(\mu_{i+1}|\mu_i)\right]=
# \sum\limits_{i=0}^{n-2}I(\mu_i\colon\mu_{i+1})$
#
# **Note**: How can we calculate $H(\mu_i)$?
# $H(\mu_i) = Entropy([p, 1 - p])$, where $p = \frac{\#(x_i,i)}{\#(i)}$
#
# **Note**: How can we calculate $H(\mu_i, \mu_{i-1})$?
# $H(\mu_i) = Entropy([p_{0,0}, p_{0,1}, p_{1, 0}, p_{1,1}])$, where
# * $p_{1,1} = \frac{\#(i,x_i,x_{i-1})}{\#(i)}$
# * $p_{1,0} = \frac{\#(i,x_i,\overline{x_{i-1}})}{\#(i)}$
# * $p_{0,1} = \frac{\#(i,\overline{x_i},x_{i-1})}{\#(i)}$
# * $p_{0,0} = \frac{\#(i,\overline{x_i},\overline{x_{i-1}})}{\#(i)}$
# + id="7HFDbvgSai3s"
def calculate_excess_entropy(
texts: List[str],
sp: spm.SentencePieceProcessor,
F_pair: nltk.FreqDist,
F_single: nltk.FreqDist,
F_last: nltk.FreqDist,
F_pos: nltk.FreqDist
) -> np.ndarray:
"""
texts: the list of str texts
sp: pretrained sentencepieces tokenizer
F_pair: nltk.FreqDist with counts for (i, (x_{i-1}, x_i))
F_single: nltk.FreqDist with counts for (i, x_i)
F_last: nltk.FreqDist with counts for (i, x_i), where i is the last position of the sequence
F_pos: nltk.FreqDist with counts for i - the number of texts with i-th position
Returns
- a (# of texts,) numpy array with excess entropy calculated for each text
"""
EPS = 1e-9
def calculate_entropy(p):
assert 0 <= p.min() <= p.max() <= 1
assert abs(p.sum() - 1) < EPS
return np.sum(-p * np.log(np.clip(p, EPS, 1 - EPS)))
def H_single(i, xi, verbose: bool = False):
p = F_single[(i, xi)] / F_pos[i]
if verbose:
print(p)
return calculate_entropy(np.array([1 - p, p]))
def H_pair(i, prev, cur, verbose: bool = False):
# p = F_pair[(i, (prev, cur))] / F_pos[i]
# if verbose:
# print(p)
# return calculate_entropy(p) - H_single(i - 1, prev)
T = F_pos[i]
c11 = F_pair[(i, (prev, cur))]
c1_ = F_single[(i - 1, prev)] - F_last[(i - 1, prev)]
c_1 = F_single[(i, cur)]
c10 = c1_ - c11
c01 = c_1 - c11
c00 = T - c11 - c01 - c10
p = np.array([c00, c01, c10, c11]) / T
return calculate_entropy(p) - calculate_entropy(p.reshape(2, 2).sum(axis=1))
ee = np.zeros(len(texts), dtype=float)
for id, text in tqdm(enumerate(texts)):
x = sp.encode_as_ids(text)
n = len(x)
H_hat = 0
delta = 0
for i in range(n):
if i == 0:
H_hat += H_single(0, x[i])
# else:
# H_hat += H_pair(i, x[i - 1], x[i])
# if i > 0:
# delta += -H_pair(i, x[i - 1], x[i])
if i + 1 < n:
delta += -H_pair(i + 1, x[i], x[i + 1]) + H_single(i + 1, x[i + 1])
ee[id] = delta + H_hat
return ee
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["5ea15e3439a14cda9e9aa528bff2cbf0", "497797d5fd56495eaff32e193d8951b7", "<KEY>", "<KEY>", "<KEY>", "0779f7ac30a74ff799b20f904ede9962", "5c1e5aef6a8f4d0cbe9c68d82e9f4ef4", "83c83762655c4cdf989f7830dfc25c1e"]} id="11JCBUFWgcpi" outputId="8ea5a989-75e7-46b2-9ec3-4abfde6553b7"
ee = calculate_excess_entropy(
texts,
sp,
F_pair,
F_single,
F_last,
F_pos
)
# + colab={"base_uri": "https://localhost:8080/"} id="JlCoXtb3jZLd" outputId="11483adc-4514-4259-e8f4-671609c94652"
print(ee.min(), ee.max(), ee.mean(), ee.std())
# + id="Q9ObqiGeRpId"
lens = np.array([len(sp.encode_as_ids(text)) for text in texts])
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["de0e68c6bfd04c1996963de512e97695", "77ffe3f5996747b4a50770b4ea8a54a4", "83cbc08841d54bad9e1b565493aca5ae", "344ceaf7be82417589daec81cfa5f1df", "d451cacf26bf4dfe9c15300dd239884c", "09b6bd3ce58143aa9659184ff5598848", "6c33a0489d0448a887550ed09817a18b", "72a213b9e4614ba59d15382bced15eb8"]} id="fNPWRBNELoTO" outputId="f5151978-3b86-4da8-91be-21bcf53831a3"
simple_ee = calculate_excess_entropy(
simple_texts,
simple_sp,
simple_F_pair,
simple_F_single,
simple_F_last,
simple_F_pos
)
# + colab={"base_uri": "https://localhost:8080/"} id="IGj1M7VyL4Nx" outputId="c2bfe096-43a9-4968-ac20-6c4d2846b7cd"
print(simple_ee.min(), simple_ee.max(), simple_ee.mean(), simple_ee.std())
# + id="hjRa-SYCL-fC"
simple_lens = np.array([len(sp.encode_as_ids(text)) for text in simple_texts])
# + colab={"base_uri": "https://localhost:8080/", "height": 625} id="XAkec2kbMCSt" outputId="8036ca4c-8724-472f-b5e6-850fccca1bd9"
plt.figure(figsize=(10, 10))
plt.xlabel('len')
plt.ylabel('Excess Entropy')
plt.scatter(lens, ee, color='blue', label='En Wiki')
plt.scatter(simple_lens, simple_ee, color='red', label='Simple En Wiki')
plt.legend()
;
# + [markdown] id="KfR0tCwQTyzG"
# ## TSE Complexity
#
# We have sequence of numbers (tokens): $x_0, \ldots, x_{n-1}$
#
# Let's denote
# * $V_i = \{0, 1, \ldots, i\}$
# * $V = V_{n-1}$
# * $\mu_A = \mu_{i_0}, \mu_{i_1}, \ldots, \mu_{i_{k-1}}$, where $A = \{i_0, i_1, \ldots, i_{k-1}\}$ and $i_{j} < i_{j+1}$
#
#
# TSE Complexity is $C(\mu_V) = \sum\limits_{k=1}^{n-1}\frac{k}{n}C^{(k)}(\mu_V)$, where
#
# $C^{(k)}(\mu_V) = \left[\frac{n}{k}\cdot\frac{1}{\binom{n}{k}}\sum\limits_{A\subset V, |A| = k}H(\mu_A)\right] - H(\mu_V)$
#
# We already know, how to calculate $H(\mu_V)$.
#
# How can we calculate left term of given formula. Let's simplify given formula:
#
# $\frac{1}{\binom{n}{k}}\sum\limits_{A\subset V, |A| = k}H(\mu_A)=
# \frac{1}{\binom{n}{k}}\sum\limits_{A\subset V, |A| = k}\left[H(\mu_{i_0}) + H(\mu_{i_1}|\mu_{i_0})
# # + \ldots + H(\mu_{i_{k-1}}|\mu_{i_{k-2}})\right] =
# \sum\limits_{i=1}^{n-1}H(\mu_i|\mu_{i-1})\alpha_i +
# \sum\limits_{i=1}^{n-1}H(\mu_i)\beta_i +
# H(\mu_0)\gamma$
#
# * $\alpha_i = \frac{\binom{n-2}{k-2}}{\binom{n}{k}} = \frac{k(k-1)}{n(n-1)}$
#
# * $\beta_i = \frac{\binom{n-2}{k-1}}{\binom{n}{k}} = \frac{k(n-k)}{n(n-1)}$
#
# * $\gamma = \frac{\binom{n-1}{k-1}}{\binom{n}{k}} = \frac{k}{n}$
# + id="BAyH5NuPT0Gs"
| notebooks/TSE_ExcessEntropy_calculation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.conv_learner import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
from fastai.imports import *
PATH = '/home/paperspace/data/materialist/'
sz=224
arch=resnext50
#arch=resnet34
bs=16
label_csv = f'{PATH}labels_2.csv'
label_df = pd.read_csv(label_csv)
n = len(list(open(label_csv))) -1
val_idxs = get_cv_idxs(n, val_pct=.21)
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_csv(PATH, f'{PATH}merged/train', f'{PATH}labels_2.csv', test_name='test',
val_idxs=val_idxs,
tfms=tfms,
bs=bs)
learn = ConvLearner.pretrained(arch, data, precompute=False, ps=.5)
learn.save('materialist_50_0')
lrf=learn.lr_find()
learn.sched.plot()
lr = 1e-3
learn.fit(lr, 2, cycle_len=1)
learn.save('materialist_50_1_precompture')
#learn.freeze()
learn.precompute = False
learn.fit(lr, 3, cycle_len=1)
learn.precompute = False
learn.load('materialist_50_2')
learn.unfreeze()
lr=np.array([1e-4,1e-3,1e-2])
learn.fit(lr, 2, cycle_len=1, cycle_mult=2)
learn.load('materialist_50_3_unfrozen')
def get_data(sz):
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
return ImageClassifierData.from_csv(PATH, f'{PATH}merged/train', f'{PATH}labels_2.csv', test_name='test',
val_idxs=val_idxs,
tfms=tfms,
bs=bs)
learn.set_data(get_data(299))
lr=np.array([1e-4,1e-3,1e-2])
learn.fit(lr, 2, cycle_len=1, cycle_mult=2)
learn.load('materialist_50_3_resized')
learn.freeze()
learn.fit(1e-2, 3, cycle_len=1)
learn.save('materialist_299')
learn.fit(1e-2, 3, cycle_len=1, cycle_mult=2)
learn.load('materialist_299_2')
log_preds,y = learn.TTA(n_aug=4, is_test=True)
preds = np.mean(log_preds, 0)
probs = np.mean(np.exp(log_preds), axis=0)
indices = probs.argmax(axis=1)
accuracy_np(probs, y)
preds = probs.argmax(axis=1)
indexed_preds = [data.classes[pred] for pred in preds]
def trim_jpeg(filename):
slash_index = filename.index('/') + 1
dot_index = filename.index('.')
return filename[slash_index:dot_index]
def get_filename_indices():
filenames = data.test_ds.fnames
filenames = [int(trim_jpeg(filename)) for filename in filenames]
return filenames
def get_filenames():
filenames = data.test_ds.fnames
filenames = [trim_jpeg(filename) for filename in filenames]
return filenames
def get_missing(indices):
acc = []
for x in range(1, 12800):
if x not in indices:
acc.append(x)
return acc
missing_indices = get_missing(get_filename_indices())
filenames = get_filenames()
missing_indices
with open(f'{PATH}large_submission.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(('id', 'predicted'))
writer.writerows(zip(filenames, indexed_preds))
for m in missing_indices:
writer.writerow((m, 1))
def get_missing(indices):
acc = []
for x in range(1, 12800):
if x not in indices:
acc.append(x)
return acc
def make_submission(preds, incidces):
missing = get_missing(indices)
with open(f'{PATH}submission.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(('id', 'predicted'))
writer.writerows(zip(indices, preds))
for m in missing:
writer.writerow((m, 1))
make_submission(indexed_preds, indices)
| courses/dl1/Materialist-50.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic Model of Natural Selection
# This Python notebook is the interactive version of the ["Basic Model of Natural Selection" Walk in the Forest post](https://walkintheforest.com/Content/Posts/Basic+Model+of+Natural+Selection).
# ## Table of Contents
# 1. [Module Imports](#module-imports)
# 2. [Defining the Fitness Functions and Variable](#def-fitness-variable)
# - [Plotting the Fitness Function](#plot-fit-functions)
# 3. [Building the Model](#build-model)
# - [Visualization Function](#visualization-func)
# - [Overall Model Function](#overall-model-func)
# - [No Selection Conditions](#no-select-conditions)
# - [Basic Selection](#basic-selection)
# - [Basic Selection (Medium-Complexity Environment)](#basic-selection-medium)
# - [Basic Selection (High-Complexity Environment)](#basic-selection-high)
# - [Improved Selection](#improved-selection)
# - [Plotting Averages of Multiple Runs](#improved-selection-averages)
# <a id='module-imports'></a>
# ## Module Imports
# +
import numpy as np
import math
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Import local python style
import os, sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import relative_pathing
import plotly_styles.walkintheforest_styles
# -
# <a id='def-fitness-variable'></a>
# ## Defining the Fitness Functions and Variable
# We first start by defining our fitness functions and range for our variable, `var1`.
# +
### Define grid of values for var1 ###
var1 = np.linspace(0,4*math.pi, 1000)
### Define our three fitness functions ###
# Simple Horizontal Line
simple_func = [2 for x in var1]
# Simple Periodic Function
def med_func(x):
""" Sin function
"""
return(2*np.sin(x) + 2)
# Complex Periodic Function
def high_func(x):
""" Complex periodic function
"""
return(np.sin(x) + 2*np.sin(1.2*x + 1)**2 + 1)
# -
# <a id='plot-fit-functions'></a>
# ### Plotting the Fitness Functions
# Before we start building our model, let's quickly plot all of our fitness functions.
# +
## Creating the overall figure
landscape_plots = make_subplots(rows=1, cols=3,
horizontal_spacing=0.08,
vertical_spacing=0.08,
subplot_titles=("<b>Low</b>", "<b>Medium</b>", "<b>High</b>"),
shared_yaxes=True)
## Adding each environment to the figure
landscape_plots.add_trace(go.Scatter(x=var1, y=simple_func, name="Low"), row=1, col=1)
landscape_plots.add_trace(go.Scatter(x=var1, y=med_func(var1), name="Medium"), row=1, col=2)
landscape_plots.add_trace(go.Scatter(x=var1, y=high_func(var1), name="High"), row=1,col=3)
## Clean up Axes
landscape_plots.update_yaxes(ticks="outside", row=1, col=1, range = [0,4])
landscape_plots.update_xaxes(ticks="outside")
## Clean up figure
landscape_plots.update_layout(title="Fitness Landscapes of Different Complexity",
template="walkintheforest-dark",
title_x = 0.5,
autosize=True,
showlegend=False)
# -
# <a id='build-model'></a>
# ## Building the Model
# While we will be building the model step-by-step and exploring different examples and levels of complexity, we can describe the model and its visualization as a set of six steps.
#
# 1. Initialize the starting variant
# 2. Use our generation algorithm to create the next generation
# 3. Evaluate the next generation using our fitness function
# 4. Determine the next generation
# 5. Repeat Steps 1-4 as many times as designated.
# 6. Visualize the model using a graphing library (Plotly)
#
# We can further subdivide the model into two parts: data generation (Steps 1-5) and visualization (Step 6).
# <a id='visualization-func'></a>
# ### Visualization Function
# While unconvetional, it will be easiest to introduce the general visualization function before building our model. We will be exploring multiple examples and levels of complexity during data generation and visualizing each example will be key to understanding the process. To visualize our model, we will use the [Plotly](https://plotly.com/python/) library. This function will take in data from a single model run and overlay it on a static graph of the fitness function one generation at a time.
#
# **Note: Understanding the animating code is not neccessary to understanding the model. It just provides a unique window into the process**
def make_plot(num_gens, var1, fit_func, gen_var1, gen_fitness, title):
""" Create animations of single runs of the natural selection model
Args:
num_gens (int): Number of generations to run
var1 (list): Grid of values for var1 for landscape plotting
fit_func (function): Fitness function
gen_var1 (list): List of var1 values for each generation
gen_fitness (list): List of fitness values for each generation
title (string): Title for the model to use for graphing
Returns:
fig (Plotly Figure): Final animated figure
"""
## Make initial subplot figure
fig = make_subplots(rows=2, cols=1,
subplot_titles=("<b>Fitness Landscape</b>", "<b>Fitness over Generations</b>"))
## Calculate landscape
fit_landscape = fit_func(var1)
max_fit = max(fit_landscape)
min_fit = min(fit_landscape)
# Add holding traces for animation frames
## Fitness Landscape
fig.add_trace(go.Scatter(x=var1,
y=fit_landscape),
row=1, col=1)
## Newest Generation
fig.add_trace(go.Scatter(x=[gen_var1[0]],
y=[gen_fitness[0]],
mode="markers",
marker=dict(size=15)),
row=1,col=1)
## Previous Generation
fig.add_trace(go.Scatter(mode="markers", line_color="#ff7f0e"),
row=1,col=1)
## Fitness for each Generation (second subplot)
fig.add_trace(go.Scatter(x=[0],
y=[gen_fitness[0]],
mode="markers+lines"),
row=2, col=1)
# Update subplot axies
fig.update_xaxes(title= "var1", row=1,col=1)
fig.update_yaxes(range=[min_fit-.2, max_fit+.2],title="Fitness", row=1,col=1)
fig.update_xaxes(range=[0,num_gens],title="Generation", row=2,col=1)
fig.update_yaxes(range=[min_fit-.2, max_fit+.2], title="Fitness", row=2,col=1)
# Create animation frames from data
frames = [dict(
name = k,
data = [go.Scatter(x=[gen_var1[k]], y=[gen_fitness[k]]),
go.Scatter(x=gen_var1[:(k+1)], y=gen_fitness[:(k+1)]),
go.Scatter(x=list(range(k+1)),y=gen_fitness[:(k+1)])
],
traces = [1,2,3]
) for k in range(num_gens)]
# Create Play and Pause Buttons
updatemenus = [
{
"buttons" : [
{
"label" : "Play",
"method" : "animate",
"args" : [None, {"fromcurrent": True}]
},
{
"label" : "Pause",
"method": "animate",
"args": [[None], {"frame": {"duration": 0, "redraw": False},
"mode": "immediate",
"transition": {"duration": 0}}]
}
],
"type" : "buttons",
"showactive": False,
"direction" : "right",
"xanchor" : "left",
"yanchor" : "top",
"x" : 0,
"y" : 0,
"pad" : {"r":10, "t":30}
}
]
# Final figure creation and updates
fig.update(frames=frames)
fig.update_yaxes(ticks="outside")
fig.update_xaxes(ticks="outside")
fig.update_layout(updatemenus=updatemenus, showlegend=False,
title=title, autosize=True,
template="walkintheforest-dark",
title_x = 0.5)
return(fig)
# <a id='overall-model-func'></a>
# ### Overall Model Function
# Because we will be running the same general model with different starting data, environment functions, and selection functions, let's define a general-purpose model-running function that focuses only on *data generation*.
def nat_sel_model(num_gens, std_dev, var1_start, state, fit_func, sel_func):
""" Overall function to generate data for a single run
Args:
num_gens (int): Number of generations to run
std_dev (float): Value used to generate next var1
var1_start (float): Initial generation's var1 value
state (RandomState): Numpy seed for random number generation
fit_func (Function): Fitness function for evaluating fitness
sel_func (Function): Selection algorithm to determine variant
Returns:
var1_list (list): List of var1 values for each generation
fit_list (list): List of fitness values for each generation
"""
# Initialize our model
var1_list, fit_list = init_gens(num_gens, var1_start, fit_func)
# Run the generation and selection algorithms for a set number of generations
sel_func(num_gens, var1_list, fit_list, fit_func, std_dev, state)
return(var1_list, fit_list)
# <a id='no-select-conditions'></a>
# ### No Selection Conditions
# Before we start creating and selecting new variants, we first need a way of storing the data for visualization and creating the initial generation. This represents Step 1 from our model outline.
def init_gens(num_gens, var1_start, fit_func):
""" Initialize lists and initial conditions for var1 and fitness
Args:
num_gens (int): Number of generations to run
var1_start (float): Initial generation's var1 value
fit_func (Function): Fitness function for evaluating fitness
Returns:
var1_list (list): List with only first var1 value filled
fit_list (list): List with only first fitness value filled
"""
var1_list = np.zeros(num_gens)
fit_list = np.zeros(num_gens)
var1_list[0] = var1_start
fit_list[0] = fit_func(var1_list[0])
return(var1_list, fit_list)
# Now that we have a function that can prepare our data storage and the first generation, let's create an additional function to help generate the next potential variant. This represents Step 2 from our model outline.
def repro_alg(prev_var1, fit_func, std_dev, state):
"""Generate a new variant and associated fitness
Args:
prev_var1 (float): Previous value for var1
fit_func (function): Fitness function to evaluate fitness using var1
std_dev (float): Value used to generate next var1
state (RandomState):
Returns:
new_var1 (float): New value for var1
new_fitness (float): New fitness value associated with new_var1
"""
new_var1 = state.normal(prev_var1, std_dev)
new_fit = fit_func(new_var1)
return(new_var1, new_fit)
# Finally, we define the method we will use to determine the next generation. For this initial example, we won't utilize any selection conditions—we will accept *any new variant*, regardless of whether it has a higher or lower fitness than the previous variant. This represents Step 3 from our model outline.
def no_selection(num_gens, var1_list, fit_list, fit_func, std_dev, state):
""" Generates data with no selection
Args:
num_gens (int): Number of generations to run
var1_list (list): List with only first var1 value filled
fit_list (list): List with only first fitness value filled
fit_func (Function): Fitness function for evaluating fitness
std_dev (float): Value used to generate next var1
state (RandomState): Numpy seed for random number generation
Returns:
bool: True for success. False otherwise.
"""
for i in range(1,num_gens):
var1_list[i], fit_list[i] = repro_alg(var1_list[i-1], fit_func, std_dev, state)
# Now that we have defined functions for all of the steps in the model, we can initiate a single run.
# +
### Define the model conditions and RNG Seed
ex1_num_gens = 50
ex1_std_dev = 0.3
ex1_start = 4
ex1_state = np.random.RandomState(123)
# Generate the data to run the model
ex1_var1, ex1_fit = nat_sel_model(ex1_num_gens, ex1_std_dev,
ex1_start, ex1_state, med_func, no_selection)
# Generate plot
no_sel_med_env_plot = make_plot(ex1_num_gens, var1, med_func, ex1_var1, ex1_fit,
title="No Selection (Medium Complexity)")
no_sel_med_env_plot.show()
# -
# <a id='basic-selection'></a>
# ### Basic Selection
# Normally, we would expect to see a trend towards and stabilizing at a maximum. However, in our first implementation, we only see a random distribution of points near the starting value. To implement this change, we are going to add a new condition during data generation process. Instead of keeping any variant regardless of its fitness score, we will **keep a variant only if it improves on the current generation**. If it doesn't improve, then we'll keep the current generation and move on to another round.
def basic_selection(num_gens, var1_list, fit_list, fit_func, std_dev, state):
""" Accepts every variant
Args:
num_gens (int): Number of generations to run
var1_list (list): List with only first var1 value filled
fit_list (list): List with only first fitness value filled
fit_func (Function): Fitness function for evaluating fitness
std_dev (float): Value used to generate next var1
state (RandomState): Numpy seed for random number generation
Returns:
bool: True for success. False otherwise.
"""
for i in range(1, num_gens):
var1_list[i], fit_list[i] = repro_alg(var1_list[i-1], fit_func, std_dev, state)
# Accept only if fitness increases
if fit_list[i] < fit_list[i-1]:
var1_list[i] = var1_list[i-1]
fit_list[i] = fit_list[i-1]
# <a id='basic-selection-medium'></a>
# #### Basic Selection (Medium-Complexity Environment)
# To start, let's implement this new process using the same medium-comeplexity environment used in the previous implementation.
# +
### Define the model conditions and RNG Seed
ex2_num_gens = 30
ex2_std_dev = 0.3
ex2_start = 5
ex2_state = np.random.RandomState(123)
# Generate the data to run the model
ex2_var1, ex2_fit = nat_sel_model(ex2_num_gens, ex2_std_dev,
ex2_start, ex2_state, med_func, basic_selection)
# Generate plot
basic_sel_med_env_plot = make_plot(ex2_num_gens, var1, med_func, ex2_var1, ex2_fit,
title="Basic Selection (Medium Complexity)")
basic_sel_med_env_plot.show()
# -
# <a id='basic-selection-high'></a>
# #### Basic Selection (High-Complexity Environment)
# Now, let's apply this method to the higher complexity environment, the rightmost curve from Figure 1.
# +
### Define the model conditions and RNG Seed
ex3_num_gens = 30
ex3_std_dev = 0.5
ex3_start = 4.8
ex3_state = np.random.RandomState(123)
# Generate the data to run the model
ex3_var1, ex3_fit = nat_sel_model(ex3_num_gens, ex3_std_dev,
ex3_start, ex3_state, high_func, basic_selection)
# Generate Plot
basic_sel_high_env_plot = make_plot(ex3_num_gens, var1, high_func, ex3_var1, ex3_fit,
title="Basic Selection (Medium Complexity)")
basic_sel_high_env_plot.show()
# -
# Now that we have a more complex landscape, you may see a potential problem in the current implementation: the trend gets stuck at *any peak* even if there are higher peaks around it. We call these smaller peaks "local maxima," since they represented a high point in a small region, but are not neccessarily the highest peak in the entire landscape. Our model will trend upwards to the nearest peak, but cannot jump across a valley because that would require a temporary drop in fitness.
# <a id='improved-selection'></a>
# ### Improved Selection
# Since our basic selection conditions didn't produce the behavior we are aiming for, we will add an additional step (a simplified Metropolis-Hastings algorithm) to accept variants with lower fitness with some probability defined by the magnitude of the loss in fitness.
def improved_selection(num_gens, var1_list, fit_list, fit_func, std_dev, state):
""" Generates data with no selection
Args:
num_gens (int): Number of generations to run
var1_list (list): List with only first var1 value filled
fit_list (list): List with only first fitness value filled
fit_func (Function): Fitness function for evaluating fitness
std_dev (float): Value used to generate next var1
state (RandomState): Numpy seed for random number generation
Returns:
bool: True for success. False otherwise.
"""
for i in range(1,num_gens):
new_var1, new_fitness = repro_alg(var1_list[i-1], fit_func, std_dev, state)
# Calculate Change in Fitness
delta_f = new_fitness - fit_list[i-1]
# Run Selection
if delta_f >= 0: # It improved
var1_list[i] = new_var1
fit_list[i] = new_fitness
else:
# Define threshold
prob_scale = 0.5 # Rescale delta_f to increase/decrease probability
threshold = np.exp(-abs(delta_f)/prob_scale)
# Run Check
if state.uniform(0,1) < threshold:
var1_list[i] = new_var1
fit_list[i] = new_fitness
else:
var1_list[i] = var1_list[i-1]
fit_list[i] = fit_list[i-1]
# +
### Initialize data storage
ex4_num_gens = 75
ex4_std_dev = 0.5
ex4_start = 4.9
ex4_state = np.random.RandomState(4) # Sets random seed for reproducibility
ex4_var1, ex4_fit = nat_sel_model(ex4_num_gens, ex4_std_dev,
ex4_start, ex4_state, high_func, improved_selection)
improv_sel_high_env_plot = make_plot(ex4_num_gens, var1, high_func,
ex4_var1, ex4_fit,
title="Improved Selection (High Complexity)")
improv_sel_high_env_plot.show()
# -
# <a id='improved-selection-averages'></a>
# #### Plotting Averages of Multiple Runs
# In all of the previous examples, we have visualized a single run of our model for a finite number of generations. However, it's important to understand the average behavior of model across **multiple runs**. For any single run, the exact behavior may not converge at the highest peak, at least within the specified number of generations.
# +
# Prepare the primary figure
average_fig = go.Figure()
# Setup general starting conditions
ex5_num_gens = 75
ex5_std_dev = 0.5
ex5_start = 4.9
ex5_state = np.random.RandomState(4)
num_runs = 100
# Setup storage for averages
avg_var1 = np.zeros(ex5_num_gens)
avg_fit = np.zeros(ex5_num_gens)
# Run the model multiple times
for i in range(num_runs):
# Generate Model Data
ex5_var1, ex5_fit = nat_sel_model(ex5_num_gens, ex5_std_dev,
ex5_start, ex5_state, high_func, improved_selection)
# Store to calculate averages for each generation
avg_fit = np.add(avg_fit, ex5_fit)
# Create trace for the run
average_fig.add_trace(go.Scatter(x=list(range(ex5_num_gens)),
y=ex5_fit, mode="lines",
line={"color": 'rgba(200, 200, 200, 0.08)'}))
# Calculate Average
avg_fit = avg_fit/num_runs
# Plot average over individual runs
average_fig.add_trace(go.Scatter(x=list(range(ex5_num_gens)),y=avg_fit,
mode="lines", name="Average", line_width=4,
line_color="#d62728"))
# Final layout changes
average_fig.update_xaxes(title="Generation")
average_fig.update_yaxes(title="Fitness", range=[0,4])
average_fig.update_layout(template="walkintheforest-dark",
title="100-Run Average with Improved Selection Model",
showlegend=False,
title_x = 0.5)
# -
# The averaged plot illustrates that the model *does*, on average, trend towards improved variants over time. In addition, there are three clear convergent points (~2.5, ~3.1, and ~3.9) that correspond to three of the peaks in the landscape.
| code/notebooks/basic-model-natural-selection/basic-model-natural-selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ZeiDFNocItQE"
# # install wikidataintegrator
# If the library is not installed yet, run this step
# + id="auWqfrrBIlMC"
# %%capture
# !pip install wikidataintegrator
# + [markdown] id="mmPuk0NQJAmF"
# # Load the libraries
#
# + id="5hZjSO8IJF04"
from wikidataintegrator import wdi_core, wdi_login
from getpass import getpass
# + [markdown] id="utD92d4XJU61"
# # Login to Wikidata
# + id="y9qOYOhnJOfs" colab={"base_uri": "https://localhost:8080/"} outputId="9cd54276-c9ff-403e-ea57-e8968d382ddb"
WBUSER = getpass(prompt="username:")
WBPASS = getpass(prompt='Enter your password: ')
login = wdi_login.WDLogin(WBUSER, WBPASS)
| notebooks/LoginWikidata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Write Component from YAML netlist
#
#
# Note that you define the connections as `instance_source.port -> instance_destination.port` so the order is important and therefore you can only change the position of the `instance_destination`
# +
import pp
netlist = """
instances:
CP1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
arm_bot:
component: mzi_arm
placements:
arm_bot:
mirror: True
ports:
W0: CP1,W0
E0: CP2,W0
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
c = pp.component_from_yaml(netlist)
pp.show(c)
pp.plotgds(c)
# -
# ## Adjust component settings
#
# We can reduce the length of each of the arms
# +
import pp
netlist = """
instances:
CP1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
settings:
L0: 0
DL: 0
arm_bot:
component: mzi_arm
settings:
L0: 0
DL: 10
placements:
arm_bot:
mirror: True
ports:
W0: CP1,W0
E0: CP2,W0
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
c = pp.component_from_yaml(netlist)
pp.show(c)
pp.plotgds(c)
# -
# ## Swap components
#
# We can also use 2x2 couplers instead of 1x2 MMIs
# +
import pp
netlist = """
instances:
CP1:
component: mmi2x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi2x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
settings:
L0: 0
DL: 0
arm_bot:
component: mzi_arm
settings:
L0: 0
DL: 10
placements:
arm_bot:
mirror: True
ports:
W0: CP1,W0
E0: CP2,W0
W1: CP1,W1
E1: CP2,W1
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
c = pp.component_from_yaml(netlist)
pp.show(c)
pp.plotgds(c)
# -
# ## Exposing more ports
#
# We can also expose more ports, such as the electrical ports, so we can route electrical signals to the circuits.
# +
import pp
netlist = """
instances:
CP1:
component: mmi2x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi2x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
settings:
L0: 0
DL: 0
arm_bot:
component: mzi_arm
settings:
L0: 0
DL: 10
placements:
arm_bot:
mirror: True
ports:
W0: CP1,W0
E0: CP2,W0
W1: CP1,W1
E1: CP2,W1
E_TOP_0: arm_top,E_0
E_TOP_1: arm_top,E_1
E_TOP_2: arm_top,E_2
E_TOP_3: arm_top,E_3
E_BOT_0: arm_bot,E_0
E_BOT_1: arm_bot,E_1
E_BOT_2: arm_bot,E_2
E_BOT_3: arm_bot,E_3
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
c = pp.component_from_yaml(netlist)
pp.show(c)
pp.plotgds(c)
# -
c.ports
# ## Custom factories
#
# You can leverage netlist defined components to define more complex circuits
# +
import pp
@pp.cell
def mzi_custom(delta_length=0):
netlist = f"""
instances:
CP1:
component: mmi2x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi2x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
settings:
L0: 0
DL: 0
with_elec_connections: False
arm_bot:
component: mzi_arm
settings:
L0: 0
DL: {delta_length/2}
with_elec_connections: False
placements:
arm_bot:
mirror: True
ports:
W0: CP1,W0
E0: CP2,W0
W1: CP1,W1
E1: CP2,W1
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
return pp.component_from_yaml(netlist)
c = mzi_custom(delta_length=10, cache=False)
pp.show(c)
pp.plotgds(c)
# -
c.ports
# +
import pp
@pp.cell
def mzi_custom(delta_length):
return pp.c.mzi(DL=delta_length/2, coupler=pp.c.mmi2x2)
pp.c.component_factory.update(dict(mzi_custom=mzi_custom))
c = pp.c.component_factory['mzi_custom'](delta_length=0, cache=False)
pp.plotgds(c)
pp.show(c)
print(c.ports.keys())
# +
import pp
@pp.cell
def mzi_filter(delta_lengths=(20, 40, 60), component_factory=pp.c.component_factory):
sample = f"""
instances:
mzi1:
component: mzi_custom
settings:
delta_length: {delta_lengths[0]}
arm_top1:
component: mzi_arm
settings:
L0: 0
DL: 0
with_elec_connections: False
arm_bot1:
component: mzi_arm
settings:
L0: 0
DL: {delta_lengths[1]/2}
with_elec_connections: False
mzi3:
component: mzi_custom
settings:
delta_length: {delta_lengths[2]}
placements:
arm_bot1:
mirror: True
ports:
W0: mzi1,W0
E0: mzi3,E0
W1: mzi1,W1
E1: mzi3,E1
connections:
arm_bot1,W0: mzi1,E0
arm_top1,W0: mzi1,E1
mzi3,W0: arm_bot1,E0
mzi3,W1: arm_top1,E0
"""
c = pp.component_from_yaml(sample, component_factory=component_factory)
return c
c = mzi_filter(cache=False)
pp.show(c)
pp.plotgds(c)
# -
c = pp.c.mzi()
c.plot_netlist()
n = c.get_netlist()
print(c.get_netlist_yaml())
| notebooks/11_YAML_netlist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JacobFV/AGI/blob/master/PGI0_0_0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="NM21gjfCTlAG" cellView="form" outputId="3458a67d-223c-409b-e339-572de805bbf1"
#@title imports
# %tensorflow_version 2.x
import math
import tqdm
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# !pip install -q tsalib
import tsalib
import networkx
# !pip install -q jplotlib
import jplotlib as jpl
# !pip install -q livelossplot
from livelossplot import PlotLossesKeras
import tensorflow as tf
keras = tf.keras
tfkl = keras.layers
import tensorflow_probability as tfp
tfd = tfp.distributions
tfpl = tfp.layers
tfb = tfp.bijectors
# + [markdown] id="rf-c6U93WoX_"
# ## Simple Data
# + colab={"base_uri": "https://localhost:8080/"} id="9uP1gmiRWoKy" outputId="24868259-9b7a-4d13-bc91-2d40c942dcbd"
(mnist_x_train, mnist_y_train), (mnist_x_test, mnist_y_test) = keras.datasets.mnist.load_data()
mnist_x_train, mnist_x_test = mnist_x_train/255., mnist_x_test/255.
mnist_size = mnist_x_train.shape[1:]
mnist_classes = 10
mnist_size, mnist_classes
# + colab={"base_uri": "https://localhost:8080/", "height": 574} id="eSXJ3ckJXKOz" outputId="c9b77777-9e62-4b14-a200-7ee4fd831731"
gp = jpl.GridPlot()
for i in range(100):
gp.imshow(mnist_x_train[i])
gp.show()
# + [markdown] id="IdQhKsWmT4KP"
# ## linear predictor 0 X -> Y
#
# Experiment not performed.
#
# Reason: I now realize how much menial work keras does for you
# + id="8ncRHyoKWWVa"
lp0 = keras.Sequential([
tfkl.Flatten(),
tfkl.Dense(mnist_classes, 'relu')
])
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="wdUWOebdciVx" outputId="811e547b-11a5-48a1-8b9e-4ff0868428f7"
plt.imshow(mnist_y_train[:100, None])
# + colab={"base_uri": "https://localhost:8080/"} id="aEBIXicPcyUN" outputId="c87db973-36ba-48b0-878f-692f5b3bcc21"
mnist_y_train
# + colab={"base_uri": "https://localhost:8080/", "height": 338} id="KYgOVl0WcFxn" outputId="e847fba9-6a4b-4ec5-fd52-c472b48133f0"
sns.distplot(mnist_y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="k0VmaZRRY8JQ" outputId="7d0349df-8669-436d-f0a8-865d71ba51cf"
def train_lp(x, y, epochs, lp):
opt = tf.optimizers.SGD(learning_rate=0.01)
def loss_fn(x, y, lp):
return keras.losses.SparseCategoricalCrossentropy(True)(y, lp(x))
for epoch in tf.range(epochs):
with tf.GradientTape() as tape:
loss = loss_fn(x, y, lp)
grads = tape.gradient(loss, lp.trainable_variables)
opt.apply_gradients(zip(grads, lp.trainable_variables))
tf.print(f'Epoch {epoch}: loss {loss}')
train_lp(mnist_x_train, mnist_y_train, 100, lp0)
# + [markdown] id="awEyslVLc1zt"
# 2.2 ≈ -log(0.1) (init)
# 1.47 ≈ -log(0.4) (covergence)
# + [markdown] id="fZyjlb_9dBAu"
# ## linear predictor 1 X -> Y
# + colab={"base_uri": "https://localhost:8080/", "height": 655} id="_WDjQbWMdHHv" outputId="27e4bd4f-4435-447a-dda2-0897d60e1853"
lp1 = keras.Sequential([
tfkl.Flatten(),
tfkl.Dense(mnist_classes, 'relu')
])
lp1.compile('sgd', loss=keras.losses.SparseCategoricalCrossEntropy(True))
lp1.fit(mnist_x_train, mnist_y_train,
epochs=100,
verbose=1,
callbacks=[PlotLossesKeras()],
validation_data=(mnist_x_test, mnist_y_test))
| .ipynb_checkpoints/PGI-0-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Build a Basic ML Model for Text Classification
#
# - In this notebook, you'll learn how to implement a text classification task using machine learning.
# - You'll learn to create basic NLP based features that can be created from the text and you'll then test the model on the test data set to evaluate it's performance.
#
# To make things interesting, the task is to build a machine learning model to **classify** whether a particular tweet is **hate speech** or **not**. I'll explain more as you proceed further, so let's start without much ado!
#
# ### Table of Contents
#
# 1. About the Dataset
# 2. Text Cleaning
# 3. Feature Engineering
# 4. Train an ML model for Text Classification
# 5. Evaluate the ML model
# 6. Conclusion
# ### 1. About the Dataset
#
# The dataset that you are going to use is of **Detecting Hate Speech** in people's tweets. You can download it from [here.](http://trainings.analyticsvidhya.com/asset-v1:AnalyticsVidhya+NLP101+2018_T1+type@asset+block@final_dataset_basicmlmodel.csv)
# Let's load the dataset using pandas and have a quick look at some sample tweets.
#
# +
#Load the dataset
import pandas as pd
dataset = pd.read_csv('final_dataset_basicmlmodel.csv')
dataset.head()
# -
# **Things to note**
# - **label** is the column that contains the target variable or the value that has to be predicted. 1 means it's a hate speech and 0 means it is not.
# - **tweet** is the column that contains the text of the tweet. This is the main data on which NLP techniques will be applied.
#
# Let's have a close look at some of the tweets.
for index, tweet in enumerate(dataset["tweet"][10:15]):
print(index+1,".",tweet)
# **Note :- Noise present in Tweets**
#
# - If you look closely, you'll see that there are many hashtags present in the tweets of the form `#` symbol followed by text. We particularly don't need the `#` symbol so we will clean it out.
# - Also, there are strange symbols like `â` and `ð` in tweet 4. This is actually `unicode` characters that is present in our dataset that we need to get rid of because they don't particularly add anything meaningful.
# - There are also numerals and percentages .
#
# ### 2. Data Cleaning
#
# Let's clean up the noise in our dataset.
# +
import re
#Clean text from noise
def clean_text(text):
#Filter to allow only alphabets
text = re.sub(r'[^a-zA-Z\']', ' ', text)
#Remove Unicode characters
text = re.sub(r'[^\x00-\x7F]+', '', text)
#Convert to lowercase to maintain consistency
text = text.lower()
return text
# -
dataset['clean_text'] = dataset.tweet.apply(lambda x: clean_text(x))
# ### 3. Feature Engineering
#
# - Feature engineering is the science (and art) of extracting more information from existing data. You are not adding any new data here, but you are actually making the data you already have more useful.
# - The machine learning model does not understand text directly, **so we create numerical features that reperesant the underlying text**.
# - In this module, you'll deal with very basic NLP based features and as you progress further in the course you'll come across more complex and efficient ways of doing the same.
# +
#Exhaustive list of stopwords in the english language. We want to focus less on these so at some point will have to filter
STOP_WORDS = ['a', 'about', 'above', 'after', 'again', 'against', 'all', 'also', 'am', 'an', 'and',
'any', 'are', "aren't", 'as', 'at', 'be', 'because', 'been', 'before', 'being', 'below',
'between', 'both', 'but', 'by', 'can', "can't", 'cannot', 'com', 'could', "couldn't", 'did',
"didn't", 'do', 'does', "doesn't", 'doing', "don't", 'down', 'during', 'each', 'else', 'ever',
'few', 'for', 'from', 'further', 'get', 'had', "hadn't", 'has', "hasn't", 'have', "haven't", 'having',
'he', "he'd", "he'll", "he's", 'her', 'here', "here's", 'hers', 'herself', 'him', 'himself', 'his', 'how',
"how's", 'however', 'http', 'i', "i'd", "i'll", "i'm", "i've", 'if', 'in', 'into', 'is', "isn't", 'it',
"it's", 'its', 'itself', 'just', 'k', "let's", 'like', 'me', 'more', 'most', "mustn't", 'my', 'myself',
'no', 'nor', 'not', 'of', 'off', 'on', 'once', 'only', 'or', 'other', 'otherwise', 'ought', 'our', 'ours',
'ourselves', 'out', 'over', 'own', 'r', 'same', 'shall', "shan't", 'she', "she'd", "she'll", "she's",
'should', "shouldn't", 'since', 'so', 'some', 'such', 'than', 'that', "that's", 'the', 'their', 'theirs',
'them', 'themselves', 'then', 'there', "there's", 'these', 'they', "they'd", "they'll", "they're",
"they've", 'this', 'those', 'through', 'to', 'too', 'under', 'until', 'up', 'very', 'was', "wasn't",
'we', "we'd", "we'll", "we're", "we've", 'were', "weren't", 'what', "what's", 'when', "when's", 'where',
"where's", 'which', 'while', 'who', "who's", 'whom', 'why', "why's", 'with', "won't", 'would', "wouldn't",
'www', 'you', "you'd", "you'll", "you're", "you've", 'your', 'yours', 'yourself', 'yourselves']
#Generate word frequency
def gen_freq(text):
#Will store the list of words
word_list = []
#Loop over all the tweets and extract words into word_list
for tw_words in text.split():
word_list.extend(tw_words)
#Create word frequencies using word_list
word_freq = pd.Series(word_list).value_counts()
#Drop the stopwords during the frequency calculation
word_freq = word_freq.drop(STOP_WORDS, errors='ignore')
return word_freq
#Check whether a negation term is present in the text
def any_neg(words):
for word in words:
if word in ['n', 'no', 'non', 'not'] or re.search(r"\wn't", word):
return 1
else:
return 0
#Check whether one of the 100 rare words is present in the text
def any_rare(words, rare_100):
for word in words:
if word in rare_100:
return 1
else:
return 0
#Check whether prompt words are present
def is_question(words):
for word in words:
if word in ['when', 'what', 'how', 'why', 'who']:
return 1
else:
return 0
# -
word_freq = gen_freq(dataset.clean_text.str)
#100 most rare words in the dataset
rare_100 = word_freq[-100:]
#Number of words in a tweet
dataset['word_count'] = dataset.clean_text.str.split().apply(lambda x: len(x))
#Negation present or not
dataset['any_neg'] = dataset.clean_text.str.split().apply(lambda x: any_neg(x))
#Prompt present or not
dataset['is_question'] = dataset.clean_text.str.split().apply(lambda x: is_question(x))
#Any of the most 100 rare words present or not
dataset['any_rare'] = dataset.clean_text.str.split().apply(lambda x: any_rare(x, rare_100))
#Character count of the tweet
dataset['char_count'] = dataset.clean_text.apply(lambda x: len(x))
#Top 10 common words are
gen_freq(dataset.clean_text.str)[:10]
dataset.head()
# ### Splitting the dataset into Train-Test split
#
# - The dataset is split into train and test sets so that we can evaluate our model's performance on unseen data.
# - The model will only be trained on the `train` set and will make predictions on the `test` set whose data points the model has never seen. This will make sure that we have a proper way to test the model.
#
# This is a pretty regular practice in Machine Learning, don't worry if you are confused. It's just a way of testing your model's performance on unseen data.
# +
from sklearn.model_selection import train_test_split
X = dataset[['word_count', 'any_neg', 'any_rare', 'char_count', 'is_question']]
y = dataset.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=27)
# -
# ### 4. Train an ML model for Text Classification
#
# Now that the dataset is ready, it is time to train a Machine Learning model on the same. You will be using a **Naive Bayes** classifier from `sklearn` which is a prominent python library used for machine learning.
# +
from sklearn.naive_bayes import GaussianNB
#Initialize GaussianNB classifier
model = GaussianNB()
#Fit the model on the train dataset
model = model.fit(X_train, y_train)
#Make predictions on the test dataset
pred = model.predict(X_test)
# -
# ### 5. Evaluate the ML model
#
# It is time to train the model on previously unseen data: **X_test** and **y_test** sets that you previously created. Let's check the accuracy of the model.
# +
from sklearn.metrics import accuracy_score
print("Accuracy:", accuracy_score(y_test, pred)*100, "%")
# -
# ### 6. Conclusion
#
# **Note:** that since we have used very basic NLP features, the classification accuracy and f1 scores aren't that impressive. The goal of this exercise was to make you familiar with the model building process and I hope that you have a better idea on how to build a text classification model.
| NLP/Sentiment Analysis/Basic ML Model for Text Classification 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] chapterId="H1NeLCPx07M" id="chapter_name"
# # ★Chapter11 matplotlibの使い方
# + [markdown] id="table"
# - **11.1 1種類のデータを可視化する**
# - **11.1.1 グラフにデータをプロットする**
# - **11.1.2 グラフの表示範囲を設定する**
# - **11.1.3 グラフの要素に名前を設定する**
# - **11.1.4 グラフにグリッドを表示する**
# - **11.1.5 グラフの軸にラベルを設定する**
# <br><br>
# - **11.2 複数のデータを可視化する1**
# - **11.2.1 1つのグラフに2種類のデータをプロットする**
# - **11.2.2 系列ラベルを設定する**
# <br><br>
# - **11.3 複数のデータを可視化する2**
# - **11.3.1 図の大きさを設定する**
# - **11.3.2 サブプロットを作成する**
# - **11.3.3 サブプロットのまわりの余白を調整する**
# - **11.3.4 サブプロット内のグラフの表示範囲を設定する**
# - **11.3.5 サブプロット内のグラフの要素に名前を設定する**
# - **11.3.6 サブプロット内のグラフにグリッドを表示する**
# - **11.3.7 サブプロット内のグラフの軸にラベルを設定する**
# <br><br>
# - **添削問題**
# + [markdown] id="section_name" sectionId="S1HxUAPlC7M"
# ## ●11.1 1種類のデータを可視化する
# + [markdown] courseId=4040 exerciseId="r1IgLRDe0mf" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.1.1 グラフにデータをプロットする
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportしてください
import
import numpy as np
# %matplotlib inline
# np.pi は円周率を表します
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# データx,yをグラフにプロットし、表示してください
plt.show()
# -
# **リスト 11.1:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportしてください
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# np.pi は円周率を表します
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# データx,yをグラフにプロットし、表示してください
plt.plot(x,y)
plt.show()
# -
# **リスト 11.2:解答例**
# + [markdown] courseId=4040 exerciseId="r1wgUADeAQG" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.1.2 グラフの表示範囲を設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# np.piは円周率を表します
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# y軸の表示範囲を[0,1]に指定してください
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.3:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# np.piは円周率を表します
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# y軸の表示範囲を[0,1]に指定してください
plt.ylim([0, 1])
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.4:解答例**
# + [markdown] courseId=4040 exerciseId="HJdxU0DgAXf" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.1.3 グラフの要素に名前を設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# グラフのタイトルを設定してください
# グラフのx軸とy軸に名前を設定してください
# y軸の表示範囲を[0,1]に指定します
plt.ylim([0, 1])
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.5:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# グラフのタイトルを設定してください
plt.title("y=sin(x)( 0< y< 1)")
# グラフのx軸とy軸に名前を設定してください
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# y軸の表示範囲を[0,1]に指定します
plt.ylim([0, 1])
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.6:解答例**
# + [markdown] courseId=4040 exerciseId="ryYlL0vxCXz" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.1.4 グラフにグリッドを表示する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# グラフのタイトルを設定します
plt.title("y=sin(x)")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示してください
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.7:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# グラフのタイトルを設定します
plt.title("y=sin(x)")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示してください
plt.grid(True)
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.8:解答例**
# + [markdown] courseId=4040 exerciseId="ByceU0wgAQG" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.1.5 グラフの軸に目盛りを設定する
# -
# #### □問題
# +
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# グラフのタイトルを設定します
plt.title("y=sin(x)")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示します
plt.grid(True)
# positionsとlabelsを設定します
positions = [0, np.pi/2, np.pi, np.pi*3/2, np.pi*2]
labels = ["0°", "90°", "180°", "270°", "360°"]
# グラフのx軸に目盛りを設定してください
# データx,yをグラフにプロットし、表示します
plt.plot(x,y)
plt.show()
# -
# **リスト 11.9:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# グラフのタイトルを設定します
plt.title("y=sin(x)")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示します
plt.grid(True)
# positionsとlabelsを設定します
positions = [0, np.pi/2, np.pi, np.pi*3/2, np.pi*2]
labels = ["0°", "90°", "180°", "270°", "360°"]
# グラフのx軸に目盛りを設定してください
plt.xticks(positions, labels)
# データx,yをグラフにプロットし、表示します
plt.plot(x,y)
plt.show()
# -
# **リスト 11.10:解答例**
# + [markdown] id="section_name" sectionId="HyjlIRvgAmf"
# ## ●11.2 複数のデータを可視化する①
# + [markdown] courseId=4040 exerciseId="ry3xU0veA7f" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### 〇11.2.1 1つのグラフに2種類のデータをプロットする
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# グラフのタイトルを設定します
plt.title("graphs of trigonometric functions")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示します
plt.grid(True)
# グラフのx軸にラベルを設定します
plt.xticks(positions, labels)
# データx, y1をグラフにプロットし、黒で表示してください
# データx, y2をグラフにプロットし、青で表示してください
plt.show()
# -
# **リスト 11.11:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# グラフのタイトルを設定します
plt.title("graphs of trigonometric functions")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示します
plt.grid(True)
# グラフのx軸にラベルを設定します
plt.xticks(positions, labels)
# データx, y1をグラフにプロットし、黒で表示してください
plt.plot(x, y1, color="k")
# データx, y2をグラフにプロットし、青で表示してください
plt.plot(x, y2, color="b")
plt.show()
# -
# **リスト 11.12:解答例**
# + [markdown] courseId=4040 exerciseId="S16gICvlRmz" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### 〇11.2.2 系列ラベルを設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# グラフのタイトルを設定します
plt.title("graphs of trigonometric functions")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示します
plt.grid(True)
# グラフのx軸にラベルを設定します
plt.xticks(positions, labels)
# データx, y1をグラフにプロットし、"y=sin(x)"とラベルを付けて黒で表示してください
# データx, y2をグラフにプロットし、"y=cos(x)"とラベルを付けて青で表示してください
# 系列ラベルを設定してください
plt.show()
# -
# **リスト 11.13:問題** 問題の部分で回答コード削除
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# グラフのタイトルを設定します
plt.title("graphs of trigonometric functions")
# グラフのx軸とy軸に名前を設定します
plt.xlabel("x-axis")
plt.ylabel("y-axis")
# グラフにグリッドを表示します
plt.grid(True)
# グラフのx軸にラベルを設定します
plt.xticks(positions, labels)
# データx, y1をグラフにプロットし、"y=sin(x)"とラベルを付けて黒で表示してください
plt.plot(x, y1, color="k", label="y=sin(x)")
# データx, y2をグラフにプロットし、"y=cos(x)"とラベルを付けて青で表示してください
plt.plot(x, y2, color="b", label="y=cos(x)")
# 系列ラベルを設定してください
plt.legend(["y=sin(x)", "y=cos(x)"])
plt.show()
# -
# **リスト 11.14:解答例**
# + [markdown] id="section_name" sectionId="SyCe8Rvx07M"
# ## ●11.3 複数のデータを可視化する②
# + [markdown] courseId=4040 exerciseId="H11-8AwgCQM" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.1 図の大きさを設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# 図の大きさを設定してください
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.15:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# 図の大きさを設定してください
plt.figure(figsize=(4, 4))
# データx,yをグラフにプロットし、表示します
plt.plot(x, y)
plt.show()
# -
# **リスト 11.16:解答例**
# + [markdown] courseId=4040 exerciseId="B1xW8CPx0mz" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.2 サブプロットを作成する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトを作ってください
ax =
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
# グラフがどこに追加されるか確認するため空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.17:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトを作ってください
ax = fig.add_subplot(2, 3, 5)
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
# グラフがどこに追加されるか確認するため空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.18:解答例**
# + [markdown] courseId=4040 exerciseId="BJZW8ADxCQM" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.3 サブプロットのまわりの余白を調整する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1の割合で空けてください
# データx,yをグラフにプロットし、表示します
ax.plot(x, y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.19:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1の割合で空けてください
plt.subplots_adjust(wspace=1, hspace=1)
# データx,yをグラフにプロットし、表示します
ax.plot(x, y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.20:解答例**
# + [markdown] courseId=4040 exerciseId="ryGWURwxCXf" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.4 サブプロット内のグラフの表示範囲を設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1の割合で空けます
plt.subplots_adjust(wspace=1, hspace=1)
# サブプロットaxのグラフのy軸の表示範囲を[0,1]に設定してください
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
# 空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.21:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1の割合で空けます
plt.subplots_adjust(wspace=1, hspace=1)
# サブプロットaxのグラフのy軸の表示範囲を[0,1]に設定してください
ax.set_ylim([0, 1])
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
# 空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.22:解答例**
# + [markdown] courseId=4040 exerciseId="Hkm-I0vxRmf" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.5 サブプロット内のグラフの要素に名前を設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1.0の割合で空けます
plt.subplots_adjust(wspace=1.0, hspace=1.0)
# サブプロットaxのグラフのタイトルを設定してください
# サブプロットaxのグラフのx軸、y軸に名前を設定してください
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.23:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
labels = ["90°", "180°", "270°", "360°"]
positions = [np.pi/2, np.pi, np.pi*3/2, np.pi*2]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1.0の割合で空けます
plt.subplots_adjust(wspace=1.0, hspace=1.0)
# サブプロットaxのグラフのタイトルを設定してください
ax.set_title("y=sin(x)")
# サブプロットaxのグラフのx軸、y軸に名前を設定してください
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.24:解答例**
# + [markdown] courseId=4040 exerciseId="r14WURPxRQG" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.6 サブプロット内のグラフにグリッドを表示する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1.0の割合で空けます
plt.subplots_adjust(wspace=1.0, hspace=1.0)
# サブプロットaxのグラフにグリッドを設定してください
# サブプロットaxのグラフのタイトルを設定します
ax.set_title("y=sin(x)")
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.25:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1.0の割合で空けます
plt.subplots_adjust(wspace=1.0, hspace=1.0)
# サブプロットaxのグラフにグリッドを設定してください
ax.grid(True)
# サブプロットaxのグラフのタイトルを設定します
ax.set_title("y=sin(x)")
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.26:解答例**
# + [markdown] courseId=4040 exerciseId="SyS-ICPeAXG" id="code_session_name" important=false isDL=false timeoutSecs=10
# ### ○11.3.7 サブプロット内のグラフの軸に目盛りを設定する
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
positions = [0, np.pi/2, np.pi, np.pi*3/2, np.pi*2]
labels = ["0°", "90°", "180°", "270°", "360°"]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1の割合で空けます
plt.subplots_adjust(wspace=1, hspace=1)
# サブプロットaxのグラフにグリッドを表示します
ax.grid(True)
# サブプロットaxのグラフのタイトルを設定します
ax.set_title("y=sin(x)")
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# サブプロットaxのグラフのx軸に目盛りを設定してください
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.27:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x = np.linspace(0, 2*np.pi)
y = np.sin(x)
positions = [0, np.pi/2, np.pi, np.pi*3/2, np.pi*2]
labels = ["0°", "90°", "180°", "270°", "360°"]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 2×3のレイアウトの上から2行目、左から2列目にサブプロットオブジェクトaxを作成します
ax = fig.add_subplot(2, 3, 5)
# 図内のサブプロット間を、縦横ともに1の割合で空けます
plt.subplots_adjust(wspace=1, hspace=1)
# サブプロットaxのグラフにグリッドを表示します
ax.grid(True)
# サブプロットaxのグラフのタイトルを設定します
ax.set_title("y=sin(x)")
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# サブプロットaxのグラフのx軸に目盛りを設定してください
ax.set_xticks(positions)
ax.set_xticklabels(labels)
# データx,yをグラフにプロットし、表示します
ax.plot(x,y)
#空白部分をサブプロットで埋めます
axi = []
for i in range(6):
if i==4:
continue
fig.add_subplot(2, 3, i+1)
plt.show()
# -
# **リスト 11.28:解答例**
# + [markdown] id="chapter_exam"
# ## ●添削問題
# -
# #### □問題
# + id="index"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x_upper = np.linspace(0, 5)
x_lower = np.linspace(0, 2 * np.pi)
x_tan = np.linspace(-np.pi / 2, np.pi / 2)
positions_upper = [i for i in range(5)]
positions_lower = [0, np.pi / 2, np.pi, np.pi * 3 / 2, np.pi * 2]
positions_tan = [-np.pi / 2, 0, np.pi / 2]
labels_upper = [i for i in range(5)]
labels_lower = ["0°", "90°", "180°", "270°", "360°"]
labels_tan = ["-90°", "0°", "90°"]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 3×2のレイアウトをもつ複数の関数のグラフをプロットしてください
plt.show()
# -
# **リスト 11.29:問題**
# #### □解答例
# + id="answer"
# matplotlib.pyplotをpltとしてimportします
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
x_upper = np.linspace(0, 5)
x_lower = np.linspace(0, 2 * np.pi)
x_tan = np.linspace(-np.pi / 2, np.pi / 2)
positions_upper = [i for i in range(5)]
positions_lower = [0, np.pi / 2, np.pi, np.pi * 3 / 2, np.pi * 2]
positions_tan = [-np.pi / 2, 0, np.pi / 2]
labels_upper = [i for i in range(5)]
labels_lower = ["0°", "90°", "180°", "270°", "360°"]
labels_tan = ["-90°", "0°", "90°"]
# Figureオブジェクトを作成します
fig = plt.figure(figsize=(9, 6))
# 3×2のレイアウトをもつ複数の関数のグラフをプロットしてください
# サブプロット同士が重ならないように設定します
plt.subplots_adjust(wspace=0.4, hspace=0.4)
# 上段のサブプロットを作成します
for i in range(3):
y_upper = x_upper ** (i + 1)
ax = fig.add_subplot(2, 3, i + 1)
# サブプロットaxのグラフにグリッドを表示します
ax.grid(True)
# サブプロットaxのグラフのタイトルを設定します
ax.set_title("$y=x^%i$" % (i + 1))
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# サブプロットaxのグラフのx軸にラベルを設定します
ax.set_xticks(positions_upper)
ax.set_xticklabels(labels_upper)
# データx,yをグラフにプロットし、表示します
ax.plot(x_upper, y_upper)
# 下段のサブプロットを作成します
# あらかじめリストに使う関数とタイトルを入れておくことでfor文による処理を可能にします
y_lower_list = [np.sin(x_lower), np.cos(x_lower)]
title_list = ["$y=sin(x)$", "$y=cos(x)$"]
for i in range(2):
y_lower = y_lower_list[i]
ax = fig.add_subplot(2, 3, i + 4)
# サブプロットaxのグラフにグリッドを表示します
ax.grid(True)
# サブプロットaxのグラフのタイトルを設定します
ax.set_title(title_list[i])
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# サブプロットaxのグラフのx軸にラベルを設定します
ax.set_xticks(positions_lower)
ax.set_xticklabels(labels_lower)
# データx,yをグラフにプロットし、表示します
ax.plot(x_lower, y_lower)
# y=tan(x)のグラフのプロットします
ax = fig.add_subplot(2, 3, 6)
# サブプロットaxのグラフにグリッドを表示します
ax.grid(True)
# サブプロットaxのグラフのタイトルを設定します
ax.set_title("$y=tan(x)$")
# サブプロットaxのグラフのx軸、y軸に名前を設定します
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
# サブプロットaxのグラフのx軸にラベルを設定します
ax.set_xticks(positions_tan)
ax.set_xticklabels(labels_tan)
# サブプロットaxのグラフのyの範囲を設定します
ax.set_ylim(-1, 1)
# データx,yをグラフにプロットし、表示します
ax.plot(x_tan, np.tan(x_tan))
plt.show()
# -
# **リスト 11.30:解答例**
| notebooks/ShinsouGakushu_sample/Chapter11_Sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''test-env'': conda)'
# language: python
# name: python37764bittestenvcondaffaa0dabc271479880312265588daec4
# ---
# +
# import data
import pandas as pd
data = pd.read_excel('input_data.xlsx')
data
# +
# read heders
print(data.columns)
# -
data.area.describe()
data.info()
# +
# find all unique city
all_city = data['city'].unique()
print("City array: {0}".format(all_city))
# find all unique country
all_country = data['country'].unique()
print("Country array: {0}".format(all_country))
# +
# mapping value
dicionary_corect = {'US':'USA', 'USA':'USA', ' United States of America':'USA', 'America':'USA', 'Poland':'POL', 'PL':'POL', 'Polska':'POL' }
mapping_country = data['country'].map(dicionary_corect)
data['country'] = mapping_country
data
# +
# check area and population value
for city in all_city:
# get uniqe value area and population for the city
area = data[(data['city']==city) & (~data['area'].isna())]['area'].unique()
population = data[(data['city']==city) & (~data['population'].isna())]['population'].unique()
if len(area) == 1:
data.loc[(data['city']==city) & (data['area'].isna()), 'area'] = area
else:
print('Area data mismatch on the context of {0}'.format(city))
if len(population) == 1:
data.loc[(data['city']==city) & (data['population'].isna()), 'population'] = population
else:
print('Population data mismatch on the context of {0}'.format(city))
data
# +
# get country
country_list = pd.DataFrame(data['country'].unique(), columns=['country'])
country_list.index.name = 'id'
country_list
# +
# get city and conect with country
city_list = data[['city','country']].drop_duplicates().reset_index().drop(columns = ['index']);
city_list.index.name = 'id'
city_list = city_list.rename(columns = {'country':'country_id'})
city_list
# +
city_list['country_id'] = city_list['country_id'].map(lambda x: country_list[country_list['country'] == x].index.values.astype(int)[0])
city_list
# +
# get area and population
city_pop_area = data[['city','area', 'population', 'president']].drop_duplicates().reset_index().drop(columns = ['index']);
city_pop_area.index.name = 'id'
city_pop_area = city_pop_area.rename(columns = {'city':'city_id'})
city_pop_area['city_id'] = city_pop_area['city_id'].map(lambda x: city_list[city_list['city'] == x].index.values.astype(int)[0])
city_pop_area
# +
# get city and monument
city_monuments = data[['city', 'monument']].drop_duplicates().dropna().reset_index().drop(columns = ['index']);
city_monuments.index.name = 'id'
city_monuments = city_monuments.rename(columns = {'city':'city_id'})
city_monuments['city_id'] = city_monuments['city_id'].map(lambda x: city_list[city_list['city'] == x].index.values.astype(int)[0])
city_monuments
# +
#Table definition and insert data
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
# db_string = "postgres://postgres:postgres@127.0.0.1:5432/testAGH"
db_string = "postgresql://postgres:xxx@localhost/Advanced_Databases"
engine = create_engine(db_string)
Base = declarative_base()
# Import column structure and constraints
from sqlalchemy import Column, Integer, String, Float, ForeignKey, Sequence, CheckConstraint, UniqueConstraint
class Country(Base):
__tablename__ = 'countryies'
__table_args__ = (
CheckConstraint('len(country) = 3'),
UniqueConstraint('country'),
)
id = Column(Integer, Sequence('seq_country_id'), primary_key = True)
country = Column(String(50), nullable = False)
class City(Base):
__tablename__ = 'cities'
__table_args__ = (
CheckConstraint('len(city) > 0'),
)
id = Column(Integer, Sequence('seq_city_id'), primary_key=True)
country_id = Column(Integer, ForeignKey('countries.id'))
city = Column(String, nullable = False)
class City_data(Base):
__tablename__ = 'city_data'
__table_args__ = (
CheckConstraint('area > 0'),
CheckConstraint('population >= 0')
)
id = Column(Integer, Sequence('seq_city_data_id'), primary_key=True )
city_id = Column(Integer, ForeignKey('cityies.id'))
area = Column(Float, nullable = False, default=0)
population = Column(Integer, nullable = False, default=0)
president = Column(String(60), nullable = True, default='')
class Monument(Base):
__tablename__ = 'monuments'
__table_args__ = (
CheckConstraint('len(monument) > 0'),
)
id = Column(Integer, Sequence('seq_monument_id'), primary_key=True )
city_id = Column(Integer, ForeignKey('cityies.id'))
monument = Column(String(100), nullable = True)
Base.metadata.create_all(engine)
# -
country_list.to_sql('countryies',engine, if_exists='append')
city_list.to_sql('cityies',engine, if_exists='append')
city_pop_area.to_sql('city_data',engine, if_exists='append')
city_monuments.to_sql('monuments',engine, if_exists='append')
| Advanced databases/Lab 4-5 (Analysis of input data and constraints of columns)/lab4_exampel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Deploying the CNN - Sample Images
#
# In this tutorial we will deploy the CNN trained in the previous demo. We will test the CNN using sample MR data.
#
# The goal of this tutorial is:
# - Illustarte how to deploy a trained CNN for image segmentation.
# +
# %matplotlib inline
import matplotlib.pylab as plt
import numpy as np
import nibabel as nib
import os
import glob
import sys
import time
import siamxt
MY_UTILS_PATH = "../Modules/"
if not MY_UTILS_PATH in sys.path:
sys.path.append(MY_UTILS_PATH)
import ipt_utils
import cnn_utils
import metrics_utils
# -
# ## Loading the data and the trained CNN
# +
orig_path = "../Data/CC0003_philips_15_63_F_staple.nii.gz"
stats = np.load("../Data/wm_unet_cc347.npy")
mean = stats[0]
std = stats[1]
model_path = "../Data/wm_unet_cc_347_best.hdf5"
model = cnn_utils.get_unet_mod()
model.load_weights(model_path)
# -
# ## Running the CNN prediction stage
img = nib.load(orig_path)
affine = img.affine
img = img.get_data()
img = img.transpose(1,0,2)
img_min = img.min()
img_max = img.max()
img_norm = 1.0*(img - img_min)/(img_max-img_min)
img_norm -= mean
img_norm /= std
x,y,z = img_norm.shape
img_rgb = np.zeros((x-2,y,z,3))
img_rgb[:,:,:,0] = img_norm[0:-2,:,:]
img_rgb[:,:,:,1] = img_norm[1:-1,:,:]
img_rgb[:,:,:,2] = img_norm[2:,:,:]
img_rgb,nw,nz = cnn_utils.pad_images(img_rgb)
predict = model.predict(img_rgb)
predict = predict[:,:-nw,:-nz,0]
predict2 = np.zeros((x,y,z))
predict2[1:-1,:,:] = predict
predict2 = (predict2 >0.5).astype(np.uint8)
H,W,Z = img_norm.shape
plt.figure()
plt.subplot(131)
plt.imshow(img_norm[H/2,:,:], cmap = 'gray')
plt.imshow(predict2[H/2,:,:], cmap = 'cool',alpha = 0.2)
plt.axis("off")
plt.subplot(132)
plt.imshow(img_norm[:,W/2,:], cmap = 'gray')
plt.imshow(predict2[:,W/2,:], cmap = 'cool',alpha = 0.2)
plt.axis("off")
plt.subplot(133)
plt.imshow(img_norm[:,:,Z/2], cmap = 'gray')
plt.imshow(predict2[:,:,Z/2], cmap = 'cool',alpha = 0.2)
plt.axis("off")
plt.show()
# ## Activities List
#
# - The network used in this demo was trained in the coronal view. Feed the sample images on different views (sagittal, coronal) to see if this affect the segmentation results.
| JNotebooks/cnn-cc-347-predict-CC12-wm-sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import os
import librosa
import glob
import sys
import yaml
from keras.callbacks import CSVLogger
from keras.optimizers import Adam
sys.path.insert(0,'..')
from mavd.model import build_custom_cnn
from mavd.data_generator_URBAN_SED import DataGenerator
from mavd.callbacks import *
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# +
# files parameters
Nfiles = None
resume = False
load_subset = Nfiles
label_list = (['air_conditioner', 'car_horn', 'children_playing',
'dog_bark', 'drilling', 'engine_idling', 'gun_shot',
'jackhammer', 'siren', 'street_music'])
# Create output folders
expfolder = '../exps/S-CNN_baseline/'
from exps.S-CNN_baseline.params import *
#param_path = os.path.join(expfolder,'params.py')
#params = yaml.load(open(param_path))
audio_folder = '/data_ssd/users/pzinemanas/maestria/URBAN-SED/audio22050'
#feature_folder = '../../MedleyDB/22050'
label_folder='/data_ssd/users/pzinemanas/maestria/URBAN-SED/annotations'
alpha = 10**8 #REF del log
# +
#params = {'files_batch':20, 'path':audio_folder, 'sequence_time': sequence_time, 'sequence_hop_time':sequence_hop_time,'label_list':label_list,'alpha': alpha,'normalize_energy':normalize_energy,
# 'audio_hop':audio_hop, 'audio_win':audio_win,'n_fft':n_fft,'sr':sr,'mel_bands':mel_bands,'normalize':normalize_data, 'frames':frames,'get_annotations':get_annotations}
#params['path'] = audio_folder
#params['label_list'] = label_list
#sequence_frames = int(np.ceil(params['sequence_time']*params['sr']/params['audio_hop']))
sequence_frames = int(np.ceil(sequence_time*sr/audio_hop))
# Datasets
partition = {}# IDs
labels = {}# Labels
test_files = sorted(glob.glob(os.path.join(audio_folder,'test', '*.wav')))
val_files = sorted(glob.glob(os.path.join(audio_folder,'validate', '*.wav')))
if load_subset is not None:
test_files = test_files[:load_subset]
val_files = val_files[:load_subset]
test_labels = {}
test_mel = {}
val_labels = {}
val_mel = {}
print('Founding scaler')
for n,id in enumerate(test_files):
labels[id] = os.path.join(label_folder, 'test',os.path.basename(id).replace('.wav','.txt'))
#train_mel[id] = os.path.join(mel_folder, 'train',os.path.basename(id).replace('.wav','.npy.gz'))
for id in val_files:
labels[id] = os.path.join(label_folder, 'validate',os.path.basename(id).replace('.wav','.txt'))
params['train'] = False
# Generators
print('Making generators')
test_generator = DataGenerator(test_files, labels, **params)
#scaler = training_generator.get_scaler()
#print('scaler',scaler)
#params['scaler'] = scaler
#params['train'] = False
params['sequence_hop_time'] = sequence_time
validation_generator = DataGenerator(val_files, labels, **params)
print('Getting data')
_,_,x_val,y_val = validation_generator.return_all()
_,_,x_test,y_test = test_generator.return_all()
print(x_val.shape, y_val.shape)
sequence_frames = x_val.shape[1]
# Build model
print('\nBuilding model...')
sequence_samples = int(sequence_time*sr)
model = build_custom_cnn(n_freq_cnn=mel_bands, n_frames_cnn=sequence_frames,large_cnn=large_cnn)
model.summary()
weights_best_file = os.path.join(expfolder, 'weights_best.hdf5')
model.load_weights(weights_best_file)
# Fit model
print('\nTesting model...')
y_test_predicted = model.predict(x_test)
y_val_predicted = model.predict(x_val)
#np.save('predict_proba.npy',y_val_predicted)
#np.save('test_proba.npy',y_val)
np.save(os.path.join(expfolder, 'y_test_predict.npy'),y_test_predicted)
np.save(os.path.join(expfolder, 'y_test.npy'),y_test)
print(y_test.shape)
print(F1(y_test,y_test_predicted))
print(ER(y_test,y_test_predicted))
print(F1(y_val,y_val_predicted))
print(ER(y_val,y_val_predicted))
# -
| notebooks/.ipynb_checkpoints/03_test_S-CNN-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Preprocess and validate eADAGE network data
#
# eADAGE G-G network/edge list was generated by Alex using the script at [make_GiG_network.R](../generic_expression_patterns_modules/make_GiG_network.R).
#
# Here, we preprocess it to:
# * reweight negative edges
# * add generic gene info from [here](../pseudomonas_analysis/annot_df.tsv)
#
# Output: A new edge list file that can be quickly loaded by `graph-tool` for subsequent analyses.
# +
import os
import numpy as np
import pandas as pd
import graph_tool.all as gt
import matplotlib.pyplot as plt
import seaborn as sns
# +
# relevant file paths
data_dir = "./data"
unprocessed_edge_list = os.path.join(data_dir, "edgeList.csv")
# map of Pa gene names to generic/not generic status, generated by Alex
generic_gene_map = os.path.join("..", "pseudomonas_analysis", "annot_df.tsv")
# save edge list with preprocessed weight information
processed_edge_list = os.path.join(data_dir, "edge_list_processed_unsigned.csv")
# place to save preprocessed graph/attributes, in graph-tool binary format
processed_graph = os.path.join(data_dir, "eadage_generic_graph_unsigned.gt")
# -
# #### Load edge list and handle negative edge weights
#
# Most algorithms for community detection and betweenness don't work with negative weights/correlations. Taking inspiration from WGCNA, we can solve this problem in one of two ways:
# * Unsigned: `weight = abs(corr(g1, g2))`
# * Signed: `weight = abs((1 + corr(g1, g2) / 2)`
#
# We should probably try both eventually, but for now we'll just use the unsigned version (taking absolute value of negative edges). I tend to agree with the rationale in this BioStars post for taking absolute values (unsigned) rather than rescaling (signed approach): https://www.biostars.org/p/144078/#144088
# +
if not os.path.isfile(processed_edge_list):
edgelist_df = pd.read_csv(unprocessed_edge_list, index_col=0)
# take absolute value of edge weights
edgelist_df["weight"] = edgelist_df.weight.abs()
edgelist_df.to_csv(
processed_edge_list, columns=["from", "to", "weight"], index=False
)
edgelist_df = pd.read_csv(processed_edge_list)
edgelist_df.head()
# -
# In this "generic gene map", 1 denotes a generic gene and 0 is all other genes. A gene is considered generic if it had a high percentile from SOPHIE and the manually curated set based on the correlation plot seen [here](../pseudomonas_analysis/2_identify_generic_genes_pathways.ipynb).
annot_df = pd.read_csv(generic_gene_map, sep="\t", index_col=0)
annot_df.head()
G = gt.load_graph_from_csv(
processed_edge_list,
skip_first=True,
directed=False,
hashed=True,
eprop_names=["weight"],
eprop_types=["float"],
)
# +
# add vertex property for generic genes
vprop_generic = G.new_vertex_property("bool")
for ix, v in enumerate(G.vertices()):
v_name = G.vp["name"][v]
v_label = annot_df.loc[v_name, "label"]
vprop_generic[v] = v_label
G.vertex_properties["is_generic"] = vprop_generic
# -
# make sure vertex/edge properties exist
print(G)
print(list(G.vp.keys()))
print(list(G.ep.keys()))
# make sure names/weights from file were loaded properly
for s, t, w in G.iter_edges([G.ep["weight"]]):
print(G.vp["name"][s], G.vp["name"][t], w)
if s > 0:
break
# save graph with attributes to file
G.save(processed_graph, fmt="gt")
# plot generic and non-generic genes, just for fun
# https://stackoverflow.com/a/60462353
red_blue_map = {0: (1, 0, 0, 1), 1: (0, 0, 1, 1)}
plot_color = G.new_vertex_property("vector<double>")
for v in G.vertices():
plot_color[v] = red_blue_map[G.vp["is_generic"][v]]
gt.graph_draw(G, vertex_fill_color=plot_color)
| network_analysis/1_preprocess_network_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="8mcDdeK4X04d"
# #Preproccesing
# + id="roip2O1dG7rs" colab={"base_uri": "https://localhost:8080/"} outputId="02f39ba8-51fe-42d8-9833-11e83b45d479"
# !pip install yfinance
# + id="tbehq5LpuL1C" colab={"base_uri": "https://localhost:8080/"} outputId="aac9f1ef-0861-46f3-b447-bf89db5f77d0"
# !pip3 install --user --upgrade git+https://github.com/twintproject/twint.git@origin/master#egg=twint
# + id="ytyWFVYhhN8w"
import pandas as pd
from pandas_datareader import data as web
from yfinance import Ticker
import datetime
import yfinance as yf
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
import numpy as np
from keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="n6YB9ZpiKLI5" outputId="d469a460-a782-45c3-f313-2b2216ed1eb0"
import twint
import nest_asyncio
nest_asyncio.apply()
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
nltk.download('stopwords')
nltk.download('wordnet')
from textblob import TextBlob
# + id="FJ-OCc0HFZ5I"
start_date = (datetime.datetime(2020,12,21) - datetime.timedelta(days=1825)).strftime("%Y-%m-%d")
end_date = datetime.datetime(2020,12,21).strftime("%Y-%m-%d")
# + id="pXP60aTnJ6uH"
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
def sneaky_cleanup(title):
tokens = []
for token in title.split():
if token not in stop_words:
if token.isalnum():
tokens.append(lemmatizer.lemmatize(token))
return " ".join(tokens)
# + [markdown] id="lRCRni0qmYxq"
# # Amazon
# + id="aZc3iRWv53ZP"
stock = "Amazon"
# + id="6U4rGWHOJ9wi"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("CNBC")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="DVEZ-uiQrvXi"
#Run
twint.run.Search(c)
# + id="mvxyOHdxoRCF"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="gOY8SIM0tf7-" outputId="0765847a-7b0c-4078-a0ac-2ff73206936b"
df_cnbc_amzn = df[['id','date','tweet','hashtags','username','search']]
df_cnbc_amzn.head()
# + colab={"base_uri": "https://localhost:8080/"} id="TDf1SjWbKAes" outputId="85ea446c-9a11-4b0c-afa7-b4722d196913"
#Lemmatizing the tweets.
df_cnbc_amzn.tweet = df_cnbc_amzn.tweet.apply(lambda x: sneaky_cleanup(x))
# + id="YjmbVwMw5RnD"
def polarity(text):
return TextBlob(text).sentiment.polarity
# + colab={"base_uri": "https://localhost:8080/"} id="aDGVfdLk5Y-d" outputId="d08fbe72-7490-4837-cba3-c452a97613b5"
df_cnbc_amzn["polarity"] = df_cnbc_amzn["tweet"].apply(polarity)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="sMaYkjK8KBGG" outputId="17ede103-da8a-4eac-c194-b935c9b0c489"
df_cnbc_amzn.head()
# + id="NIOeDg0u2bmC"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("business")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="-GM-_8uj2bmC"
#Run
twint.run.Search(c)
# + id="oQ_M6w1V2bmD"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="DiG8XWrQ2bmD" outputId="86284ecb-287e-45d8-cfdb-8139869f9366"
df_bloom = df[['id','date','tweet','hashtags','username','search']]
df_bloom
# + colab={"base_uri": "https://localhost:8080/"} id="rh0YHcFPNS1X" outputId="974940f4-519f-4898-de73-eed4f53a491b"
df_bloom.tweet = df_bloom.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="A5IVHxTFNIao" outputId="19ec5a5f-5758-45b3-b2ba-e67cd0051f14"
df_bloom["polarity"] = df_bloom["tweet"].apply(polarity)
# + id="gFSE695O2bw0"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("WSJ")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="rGiLQcX72bw0"
#Run
twint.run.Search(c)
# + id="mF_yxz6G2bw0"
df = twint.storage.panda.Tweets_df
# + id="bvnqdeEF2bw1"
df_wsj = df[['id','date','tweet','hashtags','username','search']]
# + colab={"base_uri": "https://localhost:8080/"} id="36i9_Fo0NV1k" outputId="03bbbdea-fd3a-47b0-a408-f1a5f8061637"
df_wsj.tweet = df_wsj.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="7WhM67Y6EJ9W" outputId="c911f986-527f-48a9-f08a-54a7277a2218"
df_wsj["polarity"] = df_wsj["tweet"].apply(polarity)
# + id="FII-Hc0PQ7QI"
frames=[df_cnbc_amzn,df_bloom,df_wsj]
# + id="BXVMrCfcRXPD"
merged = pd.concat(frames)
# + id="-C_bvh_hVxev"
merged["date"] = pd.to_datetime(merged.date)
merged["date"] = merged["date"] + datetime.timedelta(hours=8)
# + id="IfN7MwKHSShv"
positive = merged[merged["polarity"]>0]
neutral = merged[merged["polarity"]==0]
negative = merged[merged["polarity"]<0]
# + id="7ZBOuxg5WOsP"
positive = positive.set_index('date').resample('D')['polarity'].count()
neutral = neutral.set_index('date').resample('D')['polarity'].count()
negative = negative.set_index('date').resample('D')['polarity'].count()
# + id="F0uykDgtd-m6"
positive = pd.DataFrame(positive)
neutral = pd.DataFrame(neutral)
negative = pd.DataFrame(negative)
# + id="uctueXu-eRJ9"
positive["positive"] = positive.polarity
neutral["neutral"]=neutral.polarity
negative["negative"]=negative.polarity
# + id="T7GbE3clZXFl"
df2 = pd.merge(positive,neutral,left_index=True,right_index=True)
# + id="3Lan5t6ue9Sf"
sentiment = pd.merge(df2,negative,right_index=True,left_index=True)
# + id="mDGs-I-dFoBp"
df = yf.download("AMZN", start=start_date, end=end_date, progress=False, interval='1d')
# + id="QoUhAgi-KNwE"
df.columns = [w.lower() for w in df.columns]
# + id="V4eTsWIukpd7"
df = pd.merge(df,sentiment,left_index=True,right_index=True)
# + id="PPeL2iJfstAf"
df['close-1'] = df['close'].shift(+1, fill_value=df['close'].iloc[0])
# + id="aEggogdNkcTA"
df['change'] = df["close"] - df['close'].shift(+1, fill_value=df['close'].iloc[0])
#df['change_pred'] = (df["close"] - df['close'].shift(-1, fill_value=df['change'].iloc[1]))*-1
# + id="t0VGPXo2FIEe"
df = df.drop(["open","polarity_x","polarity_y","polarity","adj close","close","high","low"],axis=1)
# + id="dY8Wll7gE7zF"
#df["change"] = (df["change"]>0).astype(int).astype("float32")
# + [markdown] id="nh-PnEWvulu6"
# Since we want to predict the closing stock price for the following day, we just shift the closing price one day to get our y value.
# + [markdown] id="Z0sRZLx-viEr"
# Since we are working with sequential data, we dont use train_test_split. Insted we pick the first 80% of obersavations as training set, and the remaning as testing set.
# + id="tGpJjtq2XCSx"
test_size = int(len(df) * 0.1)
train = df.iloc[:-test_size,:].copy()
test = df.iloc[-test_size:,:].copy()
# + [markdown] id="pDHMZ2ygv8zl"
# We split the dataset into x values and y values. We also specify .values, since the date is not relevant for the training and dosn't work with some of the later preprocessing
# + id="TxR30dN2XQgv"
X_train = train.iloc[:,:-1].values
y_train = train.iloc[:,-1].values
X_test = test.iloc[:,:-1].values
y_test = test.iloc[:,-1].values
# + id="w-F9n-FKaNhV"
# + [markdown] id="zTPIlToqxT-X"
# We scale all our values to be between -1 and 1. This should help the accuracy of the model.
# + id="uMY79ZABaJo8"
x_scaler = MinMaxScaler(feature_range=(-1, 1))
y_scaler = MinMaxScaler(feature_range=(-1, 1))
# + id="4kgPnMaAaOsj"
X_train = x_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train.reshape(-1,1))
X_test = x_scaler.transform(X_test)
y_test = y_scaler.transform(y_test.reshape(-1,1))
# + id="qg08o5jXCh1q"
X_train = np.reshape(X_train, (X_train.shape[0], 1, 5))
X_test = np.reshape(X_test, (X_test.shape[0], 1, 5))
# + id="9N1Qmkvbafi0"
# + [markdown] id="nEVkSS3ZxrRp"
# Now we start making our RNN model.
#
# n_input = how many days we look in the past to predict the next sample. We chose 20 mostly by trial and error.
#
# We set epochs to 100. It's our experience that the more you train the model, the more it will try to predict the daily volatility. If we only trained it for eg. 10 epochs, the model would make a soft curve which didn't look like a real stock development. By trail and error we found 100 to be the best training amount.
# + id="utykwCALadZB"
n_input = 10
n_features= X_train.shape[2] # how many predictors/Xs/features we have to predict y
b_size = 10 # Number of timeseries samples in each batch
epochs = 200
# + id="fxYbjN9WbY3o"
# + id="Ao5kR8nPaq1l"
# + [markdown] id="4m7-UV5c9gIx"
# Since we are working with sequential stock data we chose an LSTM model, which is a RNN model.
# Activation function is set to relu, and optimizer is adam, since these are the standard for this kind of task.
# We chose 2 layers with 50 units each, and once again done by trial and error.
#
# Since we want to predict the actual stock price we use mse(mean squared error) as our loss fuction.
# + colab={"base_uri": "https://localhost:8080/"} id="Bv9kBQ-OatKf" outputId="0d4b5f93-17a1-45a1-ed10-225f6c6d6ea1"
model = Sequential()
model.add(LSTM(50, activation='relu',return_sequences=True, input_shape=(n_input, n_features)))
model.add(Dropout(0.2))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='MSE',metrics=['MSE'])
model.summary()
# + id="sA7G_XY0a11w"
model.fit(X_train,y_train,epochs=epochs,verbose=1)
# + id="fq67YpWQbCRL"
import matplotlib.pyplot as plt
# + [markdown] id="mfxayDqTMJEU"
# This looks like overfitting, but it works the best in our case.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="iXILfC3mbAoL" outputId="a9096320-c8bf-43b7-a1bd-aaf8227b1f1f"
loss_per_epoch = model.history.history['loss']
plt.plot(range(len(loss_per_epoch)),loss_per_epoch);
# + [markdown] id="fpS5IOM_Ftp1"
# Then we do some data processing to get it back to the orginal format, so we can compare the real data to the predictions.
# + colab={"base_uri": "https://localhost:8080/"} id="cYdKlL4ub6zR" outputId="6f80a3d4-8a67-4ddb-aacf-cf177e9b4bb6"
y_pred_scaled = model.predict(X_test)
y_pred = y_scaler.inverse_transform(y_pred_scaled)
y_test = y_scaler.inverse_transform(y_test)
results = pd.DataFrame({'y_true':y_test.flatten(),'y_pred':y_pred.flatten()})
print(results)
# + id="xKDx72tuTkSy"
results["y_pred"] = (results["y_pred"]>0).astype(int).astype("float32")
results["y_true"] = (results["y_true"]>0).astype(int).astype("float32")
# + id="S_ZpA0fGMUeQ"
backtesting = pd.merge(test.reset_index(),results,right_index=True,left_index=True)
# + id="OS1ojZTcOyFA"
buy = backtesting[backtesting["y_pred"]==1]
sell = backtesting[backtesting["y_pred"]==0]
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="YR-8AXC0H-4E" outputId="b52b45db-3632-48e9-84f6-4e6504a34048"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=False).iloc[0:10,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="t7_qbLXkIDA7" outputId="985a1382-a976-4437-df3a-a3049dd456a7"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=True).iloc[0:10,:],)
# + id="44YCjP7YTspu"
gain = buy.change.sum()
loss = sell.change.sum()
# + id="fTVa8z1sPtTW"
profit = gain - loss
# + colab={"base_uri": "https://localhost:8080/"} id="o--cKlvTQzbR" outputId="60ddaded-6ffc-4d4b-d1e3-f0252f864835"
profit
# + colab={"base_uri": "https://localhost:8080/", "height": 136} id="qZWlWBHsIJtl" outputId="ac00d468-c950-463c-87ec-b5f06e15f19b"
pd.crosstab(results.y_true,results.y_pred)
# + id="J2qC9qrsNB0M"
# + id="76kYZtY9NCPP"
test=df[df.positive/df.negative>5]
# + colab={"base_uri": "https://localhost:8080/"} id="sFCzo-UUNCPQ" outputId="4d9c7b5f-e2ba-4791-92b5-fd37f0aa737f"
test.change.mean()
# + id="98i4s8CeNCPS"
test2=df[df.negative/df.positive>5]
# + colab={"base_uri": "https://localhost:8080/"} id="0nKGk684NCPS" outputId="72c9f818-99c9-4889-b9cb-5c22d959db3f"
test2.change.mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="g_6xcNL2RA2K" outputId="a82b8ddf-3e6a-4272-e6fe-55fe47105e82"
test2
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="qB93DjpHu9zJ" outputId="d5433162-ce36-44c5-db29-506ae4aa3346"
plt.bar(list(df.iloc[:,1:4].columns.values),df.iloc[:,1:4].sum())
# + id="V3ffh1kxvDwg"
amzn = df
# + [markdown] id="0wHwTc4C-hZ8"
# # Apple
# + id="PZ4p61LN-hZ_"
stock = "Apple"
# + id="eRjTxpE--hZ_"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("CNBC")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="13STi4vN-haA"
#Run
twint.run.Search(c)
# + id="eP7IIfa0-haB"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="7KraioL9-haC" outputId="5cde3324-5a53-4b07-8931-2e035ce7f32d"
df_cnbc = df[['id','date','tweet','hashtags','username','search']]
df_cnbc.head()
# + colab={"base_uri": "https://localhost:8080/"} id="c1f6ofTQ-haC" outputId="7d34edb4-0a99-4254-84a8-3abafe61e6ce"
#Lemmatizing the tweets.
df_cnbc.tweet = df_cnbc.tweet.apply(lambda x: sneaky_cleanup(x))
# + id="28zdKw9V-haC"
def polarity(text):
return TextBlob(text).sentiment.polarity
# + colab={"base_uri": "https://localhost:8080/"} id="ftEZk4Hf-haD" outputId="d1245601-763e-4d9e-fb3b-a128ba9ce8b3"
df_cnbc["polarity"] = df_cnbc["tweet"].apply(polarity)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="JQimQecw-haD" outputId="ae733ea2-d5cf-4931-a1a1-929f1d04ea5c"
df_cnbc.head()
# + id="8BYbEghn-haD"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("business")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="cx4mKnoG-haE"
#Run
twint.run.Search(c)
# + id="s4c9t5j3-haF"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="GYn4H2e_-haF" outputId="d7771496-8a70-4b12-f165-00e1393ec6c8"
df_bloom = df[['id','date','tweet','hashtags','username','search']]
df_bloom
# + colab={"base_uri": "https://localhost:8080/"} id="rettbOxx-haF" outputId="2bf7a642-a895-4cb6-dc7f-000c7a1f2ddc"
df_bloom.tweet = df_bloom.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="x7lHqsb4-haG" outputId="6f471c0d-003a-4ba6-af4c-fd6aaf86c8cd"
df_bloom["polarity"] = df_bloom["tweet"].apply(polarity)
# + id="ykABPfWF-haG"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("WSJ")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="sJzjcUTL-haG"
#Run
twint.run.Search(c)
# + id="rKsL-vD0-haH"
df = twint.storage.panda.Tweets_df
# + id="ESo9YbNC-haH"
df_wsj = df[['id','date','tweet','hashtags','username','search']]
# + colab={"base_uri": "https://localhost:8080/"} id="buc2mpEr-haH" outputId="75f9a7e5-d2ac-47cc-bb52-ccf565885cf6"
df_wsj.tweet = df_wsj.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="c227TUMr-haH" outputId="1479861b-5e3e-4cc2-d7c9-fbddad79b3ac"
df_wsj["polarity"] = df_wsj["tweet"].apply(polarity)
# + id="POfTDOIw-haI"
frames=[df_cnbc,df_bloom,df_wsj]
# + id="gZ6u30xG-haI"
merged = pd.concat(frames)
# + id="5gdzH9R6-haI"
merged["date"] = pd.to_datetime(merged.date)
merged["date"] = merged["date"] + datetime.timedelta(hours=8)
# + id="v7KiJWsP-haI"
positive = merged[merged["polarity"]>0]
neutral = merged[merged["polarity"]==0]
negative = merged[merged["polarity"]<0]
# + id="7p3eu4FJ-haI"
positive = positive.set_index('date').resample('D')['polarity'].count()
neutral = neutral.set_index('date').resample('D')['polarity'].count()
negative = negative.set_index('date').resample('D')['polarity'].count()
# + id="kTL2RUcX-haJ"
positive = pd.DataFrame(positive)
neutral = pd.DataFrame(neutral)
negative = pd.DataFrame(negative)
# + id="aGxHSLzb-haJ"
positive["positive"] = positive.polarity
neutral["neutral"]=neutral.polarity
negative["negative"]=negative.polarity
# + id="o8ES2Phw-haJ"
df2 = pd.merge(positive,neutral,left_index=True,right_index=True)
# + id="QtzMkvt0-haJ"
sentiment = pd.merge(df2,negative,right_index=True,left_index=True)
# + id="BW17Ptfo-haJ"
df = yf.download("AAPL", start=start_date, end=end_date, progress=False, interval='1d')
# + id="5s4EjdYE-haJ"
df.columns = [w.lower() for w in df.columns]
# + id="9sMdOsXq-haJ"
df = pd.merge(df,sentiment,left_index=True,right_index=True)
# + id="G_Q_Mwsn-haK"
df['close-1'] = df['close'].shift(+1, fill_value=df['close'].iloc[0])
# + id="Wcr5uTbs-haK"
df['change'] = df["close"] - df['close'].shift(+1, fill_value=df['close'].iloc[0])
#df['change_pred'] = (df["close"] - df['close'].shift(-1, fill_value=df['change'].iloc[1]))*-1
# + id="WISJIcI6-haK"
df = df.drop(["open","polarity_x","polarity_y","polarity","adj close","close","high","low"],axis=1)
# + id="1Q1myNqv-haK"
#df["change"] = (df["change"]>0).astype(int).astype("float32")
# + [markdown] id="JF9lT__p-haK"
# Since we want to predict the closing stock price for the following day, we just shift the closing price one day to get our y value.
# + [markdown] id="s3KB4nf7-haK"
# Since we are working with sequential data, we dont use train_test_split. Insted we pick the first 80% of obersavations as training set, and the remaning as testing set.
# + id="BXNGYXAD-haL"
test_size = int(len(df) * 0.1)
train = df.iloc[:-test_size,:].copy()
test = df.iloc[-test_size:,:].copy()
# + [markdown] id="oY0uLttX-haL"
# We split the dataset into x values and y values. We also specify .values, since the date is not relevant for the training and dosn't work with some of the later preprocessing
# + id="-B3HJYvB-haL"
X_train = train.iloc[:,:-1].values
y_train = train.iloc[:,-1].values
X_test = test.iloc[:,:-1].values
y_test = test.iloc[:,-1].values
# + id="6ashFzar-haL"
from sklearn.preprocessing import MinMaxScaler
# + [markdown] id="MTeUOyHj-haL"
# We scale all our values to be between -1 and 1. This should help the accuracy of the model.
# + id="Dh_NMhYj-haL"
x_scaler = MinMaxScaler(feature_range=(-1, 1))
y_scaler = MinMaxScaler(feature_range=(-1, 1))
# + id="LjZ6TevC-haL"
X_train = x_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train.reshape(-1,1))
X_test = x_scaler.transform(X_test)
y_test = y_scaler.transform(y_test.reshape(-1,1))
# + id="pmwc3Vzi-haM"
X_train = np.reshape(X_train, (X_train.shape[0], 1, 5))
X_test = np.reshape(X_test, (X_test.shape[0], 1, 5))
# + id="TVR1LBjD-haM"
from keras.preprocessing.sequence import TimeseriesGenerator
# + [markdown] id="Sa2m1dBh-haM"
# Now we start making our RNN model.
#
# n_input = how many days we look in the past to predict the next sample. We chose 20 mostly by trial and error.
#
# We set epochs to 100. It's our experience that the more you train the model, the more it will try to predict the daily volatility. If we only trained it for eg. 10 epochs, the model would make a soft curve which didn't look like a real stock development. By trail and error we found 100 to be the best training amount.
# + id="Ro2ZbTgT-haM"
n_input = 10
n_features= X_train.shape[2] # how many predictors/Xs/features we have to predict y
b_size = 10 # Number of timeseries samples in each batch
epochs = 200
# + [markdown] id="Jex49Oii-haN"
# Since we are working with sequential stock data we chose an LSTM model, which is a RNN model.
# Activation function is set to relu, and optimizer is adam, since these are the standard for this kind of task.
# We chose 2 layers with 50 units each, and once again done by trial and error.
#
# Since we want to predict the actual stock price we use mse(mean squared error) as our loss fuction.
# + colab={"base_uri": "https://localhost:8080/"} id="hkjiud3g-haN" outputId="bd125661-43a6-455b-a9f0-e9c5c0200061"
model = Sequential()
model.add(LSTM(50, activation='relu',return_sequences=True, input_shape=(n_input, n_features)))
model.add(Dropout(0.2))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='MSE',metrics=['MSE'])
model.summary()
# + id="B8YkSdxt-haN"
model.fit(X_train,y_train,epochs=epochs,verbose=1)
# + [markdown] id="dfL7xeP_-haO"
# This looks like overfitting, but it works the best in our case.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="XMQVRawG-haO" outputId="b03ff17f-9f25-49b2-d79a-3b775cebe704"
loss_per_epoch = model.history.history['loss']
plt.plot(range(len(loss_per_epoch)),loss_per_epoch);
# + [markdown] id="efm20ySx-haO"
# Then we do some data processing to get it back to the orginal format, so we can compare the real data to the predictions.
# + colab={"base_uri": "https://localhost:8080/"} id="Mw3P9k9r-haO" outputId="ff638dd1-39c3-482d-f23b-d7b7651d9b3c"
y_pred_scaled = model.predict(X_test)
y_pred = y_scaler.inverse_transform(y_pred_scaled)
y_test = y_scaler.inverse_transform(y_test)
results = pd.DataFrame({'y_true':y_test.flatten(),'y_pred':y_pred.flatten()})
print(results)
# + id="kU60RolT-haO"
results["y_pred"] = (results["y_pred"]>0).astype(int).astype("float32")
results["y_true"] = (results["y_true"]>0).astype(int).astype("float32")
# + id="5LTYZP_J-haP"
backtesting = pd.merge(test.reset_index(),results,right_index=True,left_index=True)
# + id="KlUuv1T0-haP"
buy = backtesting[backtesting["y_pred"]==1]
sell = backtesting[backtesting["y_pred"]==0]
# + id="4rKHJ1Wj-haP"
gain = buy.change.sum()
loss = sell.change.sum()
# + id="oRhdaNJs-haP"
profit = gain - loss
# + colab={"base_uri": "https://localhost:8080/"} id="mNt7DU5F-haP" outputId="d673835f-77aa-4099-cea7-b138aa42fd04"
profit
# + colab={"base_uri": "https://localhost:8080/", "height": 136} id="0nO1sC4Z-haP" outputId="dc2fc34e-5b2e-447d-bca6-df2402c16e4e"
pd.crosstab(results.y_true,results.y_pred)
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="XiU2TwEjFkD7" outputId="0ab5bc0b-de4f-4fd8-de69-28b70e5111ef"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=False).iloc[0:10,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="ccwNkn5-FkD-" outputId="e48fc428-ade7-453d-e334-0ead7016e028"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=True).iloc[0:10,:])
# + id="g9ltQjcQJnkE"
test=df[df.positive/df.negative>2]
# + colab={"base_uri": "https://localhost:8080/"} id="EU8vMFdCIvaF" outputId="cb9b6271-b66e-4959-dcb2-7599254741b1"
test.change.mean()
# + id="z5NXf89PKky9"
test2=df[df.negative/df.positive>2]
# + colab={"base_uri": "https://localhost:8080/"} id="nOOVFsPNI4G2" outputId="8b0c99af-c179-4834-e974-6817ce42a34e"
test2.change.mean()
# + id="iRf1VKgYv3Ca"
aapl = df
# + [markdown] id="MwF8_GGo-3JF"
# # Pfizer
# + id="jqIchd4OFjr_"
# + id="mdRqNaXT-3JJ"
stock = "Pfizer"
# + id="fckhwpVE-3JJ"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("CNBC")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="ILyffOV1-3JK"
#Run
twint.run.Search(c)
# + id="d8C-pOYT-3JM"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="Ftj6M1ii-3JM" outputId="4ad9944a-bda9-4dc6-ebbd-8538be05a204"
df_cnbc = df[['id','date','tweet','hashtags','username','search']]
df_cnbc.head()
# + colab={"base_uri": "https://localhost:8080/"} id="zIPt848z-3JN" outputId="8e09e996-3b47-40cf-a36e-6dbbbca917a8"
#Lemmatizing the tweets.
df_cnbc.tweet = df_cnbc.tweet.apply(lambda x: sneaky_cleanup(x))
# + id="zZA0QL2l-3JN"
def polarity(text):
return TextBlob(text).sentiment.polarity
# + colab={"base_uri": "https://localhost:8080/"} id="Yvtg-vmZ-3JN" outputId="3e4adea0-de77-4467-9fff-72a2bbfe6d4b"
df_cnbc["polarity"] = df_cnbc["tweet"].apply(polarity)
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="u2daH-ji-3JO" outputId="146bd232-01e1-4412-f96b-05e21ab2a8a4"
df_cnbc.head()
# + id="xsRU-OTA-3JO"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("business")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="YjxA5BYK-3JO"
#Run
twint.run.Search(c)
# + id="BhXncKM_-3JS"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="DP_fkAgD-3JT" outputId="343b60ce-1e67-4b08-f9be-b405b9765d1a"
df_bloom = df[['id','date','tweet','hashtags','username','search']]
df_bloom
# + colab={"base_uri": "https://localhost:8080/"} id="qZS-ZJK1-3JT" outputId="b2b4b797-5c5c-4344-8bf6-9c3a61fa3e3d"
df_bloom.tweet = df_bloom.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="q3EU-daD-3JU" outputId="685a7ed1-228d-465a-aedd-d714d2258a22"
df_bloom["polarity"] = df_bloom["tweet"].apply(polarity)
# + id="QuossVtk-3JV"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("WSJ")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="5992FdaL-3JW"
#Run
twint.run.Search(c)
# + id="T4t6uW-V-3Ja"
df = twint.storage.panda.Tweets_df
# + id="0ElLkrei-3Ja"
df_wsj = df[['id','date','tweet','hashtags','username','search']]
# + colab={"base_uri": "https://localhost:8080/"} id="Ljff9n17-3Jb" outputId="54f60854-7921-49df-e43e-40bc184b66df"
df_wsj.tweet = df_wsj.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="IgJA8Apc-3Jc" outputId="4924e61b-5087-4deb-be4b-a1b1994eaa6c"
df_wsj["polarity"] = df_wsj["tweet"].apply(polarity)
# + id="FBuPRZ42-3Jc"
frames=[df_cnbc,df_bloom,df_wsj]
# + id="bAiyjRCt-3Jd"
merged = pd.concat(frames)
# + id="xA16ztgw-3Je"
merged["date"] = pd.to_datetime(merged.date)
merged["date"] = merged["date"] + datetime.timedelta(hours=8)
# + id="mRFKMP6h-3Je"
positive = merged[merged["polarity"]>0]
neutral = merged[merged["polarity"]==0]
negative = merged[merged["polarity"]<0]
# + id="bHNv0gAz-3Je"
positive = positive.set_index('date').resample('D')['polarity'].count()
neutral = neutral.set_index('date').resample('D')['polarity'].count()
negative = negative.set_index('date').resample('D')['polarity'].count()
# + id="U_AXvkLe-3Jf"
positive = pd.DataFrame(positive)
neutral = pd.DataFrame(neutral)
negative = pd.DataFrame(negative)
# + id="I-c-Eiqp-3Jf"
positive["positive"] = positive.polarity
neutral["neutral"]=neutral.polarity
negative["negative"]=negative.polarity
# + id="QMPutE0d-3Jg"
df2 = pd.merge(positive,neutral,left_index=True,right_index=True)
# + id="N6SxSREH-3Jg"
sentiment = pd.merge(df2,negative,right_index=True,left_index=True)
# + id="v6HptzBT-3Jh"
df = yf.download("PFE", start=start_date, end=end_date, progress=False, interval='1d')
# + id="QWBe-20O-3Jh"
df.columns = [w.lower() for w in df.columns]
# + id="ubfoXYcz-3Jh"
df = pd.merge(df,sentiment,left_index=True,right_index=True)
# + id="fufFiBdO-3Ji"
df['close-1'] = df['close'].shift(+1, fill_value=df['close'].iloc[0])
# + id="YwUwUbY0-3Jj"
df['change'] = df["close"] - df['close'].shift(+1, fill_value=df['close'].iloc[0])
#df['change_pred'] = (df["close"] - df['close'].shift(-1, fill_value=df['change'].iloc[1]))*-1
# + id="9FoCUkgT-3Jj"
df = df.drop(["open","polarity_x","polarity_y","polarity","adj close","close","high","low"],axis=1)
# + id="z0Ut8fkF-3Jj"
#df["change"] = (df["change"]>0).astype(int).astype("float32")
# + [markdown] id="Xx9O1EYH-3Jk"
# Since we want to predict the closing stock price for the following day, we just shift the closing price one day to get our y value.
# + [markdown] id="Ue50epyO-3Jl"
# Since we are working with sequential data, we dont use train_test_split. Insted we pick the first 80% of obersavations as training set, and the remaning as testing set.
# + id="ip7lw8Df-3Jl"
test_size = int(len(df) * 0.1)
train = df.iloc[:-test_size,:].copy()
test = df.iloc[-test_size:,:].copy()
# + [markdown] id="uow_TN7u-3Jm"
# We split the dataset into x values and y values. We also specify .values, since the date is not relevant for the training and dosn't work with some of the later preprocessing
# + id="XGHKGulf-3Jm"
X_train = train.iloc[:,:-1].values
y_train = train.iloc[:,-1].values
X_test = test.iloc[:,:-1].values
y_test = test.iloc[:,-1].values
# + [markdown] id="b3B4oxKD-3Jo"
# We scale all our values to be between -1 and 1. This should help the accuracy of the model.
# + id="P4X7v3RJ-3Jo"
x_scaler = MinMaxScaler(feature_range=(-1, 1))
y_scaler = MinMaxScaler(feature_range=(-1, 1))
# + id="8K7HsTVw-3Jo"
X_train = x_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train.reshape(-1,1))
X_test = x_scaler.transform(X_test)
y_test = y_scaler.transform(y_test.reshape(-1,1))
# + id="qMEf8lkM-3Jp"
X_train = np.reshape(X_train, (X_train.shape[0], 1, 5))
X_test = np.reshape(X_test, (X_test.shape[0], 1, 5))
# + [markdown] id="h6ZjD3J5-3Jq"
# Now we start making our RNN model.
#
# n_input = how many days we look in the past to predict the next sample. We chose 20 mostly by trial and error.
#
# We set epochs to 100. It's our experience that the more you train the model, the more it will try to predict the daily volatility. If we only trained it for eg. 10 epochs, the model would make a soft curve which didn't look like a real stock development. By trail and error we found 100 to be the best training amount.
# + id="NnZpC7iS-3Jq"
n_input = 10
n_features= X_train.shape[2] # how many predictors/Xs/features we have to predict y
b_size = 10 # Number of timeseries samples in each batch
epochs = 200
# + [markdown] id="XtjJhbRs-3Js"
# Since we are working with sequential stock data we chose an LSTM model, which is a RNN model.
# Activation function is set to relu, and optimizer is adam, since these are the standard for this kind of task.
# We chose 2 layers with 50 units each, and once again done by trial and error.
#
# Since we want to predict the actual stock price we use mse(mean squared error) as our loss fuction.
# + colab={"base_uri": "https://localhost:8080/"} id="oRrb4bX2-3Jt" outputId="2be23e69-e73f-49ba-cf68-b221a224afa7"
model = Sequential()
model.add(LSTM(50, activation='relu',return_sequences=True, input_shape=(n_input, n_features)))
model.add(Dropout(0.2))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='MSE',metrics=['MSE'])
model.summary()
# + id="6jIbwbBB-3Jw"
model.fit(X_train,y_train,epochs=epochs,verbose=1)
# + [markdown] id="ncATM6GI-3Jy"
# This looks like overfitting, but it works the best in our case.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="2S9LPTGO-3Jy" outputId="1bd684ed-5374-4c43-d6fb-ca2b57cd5166"
loss_per_epoch = model.history.history['loss']
plt.plot(range(len(loss_per_epoch)),loss_per_epoch);
# + [markdown] id="bYyTmcvd-3Jy"
# Then we do some data processing to get it back to the orginal format, so we can compare the real data to the predictions.
# + colab={"base_uri": "https://localhost:8080/"} id="Rz9eAzkJ-3Jz" outputId="061327a7-711f-40e8-e336-7f6ab48faef2"
y_pred_scaled = model.predict(X_test)
y_pred = y_scaler.inverse_transform(y_pred_scaled)
y_test = y_scaler.inverse_transform(y_test)
results = pd.DataFrame({'y_true':y_test.flatten(),'y_pred':y_pred.flatten()})
print(results)
# + id="SA7lw68R-3Jz"
results["y_pred"] = (results["y_pred"]>0).astype(int).astype("float32")
results["y_true"] = (results["y_true"]>0).astype(int).astype("float32")
# + id="wKAv0YYq-3J7"
backtesting = pd.merge(test.reset_index(),results,right_index=True,left_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="syzP7zTDIRzO" outputId="954ccc4e-9129-44f3-f599-fb11c0d988aa"
backtesting
# + id="r_g2H1zG-3J7"
buy = backtesting[backtesting["y_pred"]==1]
sell = backtesting[backtesting["y_pred"]==0]
# + id="xcUAmOYk-3J8"
gain = buy.change.sum()
loss = sell.change.sum()
# + id="IVieRanf-3J8"
profit = gain - loss
# + colab={"base_uri": "https://localhost:8080/"} id="qwvNwCBA-3J9" outputId="0ffd3e28-4d6f-4b2d-82be-c0d71f7aa5ca"
profit
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="yMwwuFtZMeDS" outputId="f35e90aa-c621-4a53-9ba4-b82f110eaa30"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=False).iloc[0:10,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="EiCR4SltMeDT" outputId="b1583a10-2a30-4b20-c272-0b62eda76b59"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=True).iloc[0:10,:])
# + id="9iE4Et3v-3KA"
pd.crosstab(results.y_true,results.y_pred)
# + id="94CMBG_mNGFC"
# + id="JxpX3urwNGUB"
test=df[df.positive/df.negative>5]
# + colab={"base_uri": "https://localhost:8080/"} id="lOG3IrGqNGUC" outputId="a184335e-db8f-4317-baff-c7dfdf610ab1"
test.change.mean()
# + id="BakGpu_eNGUD"
test2=df[df.negative/df.positive>5]
# + colab={"base_uri": "https://localhost:8080/"} id="pQ6sbyPqNGUE" outputId="72f4057c-feae-4a15-b7b8-0d7262a29d8e"
test2.change.mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Xxy5CsR3jFs7" outputId="c570a1b8-d107-41f9-9539-54892cce30f3"
df.sort_values(by="positive",ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="3PMET-ibke09" outputId="32037817-7dd9-457e-8ee2-8c1efed82935"
df.sort_values(by="negative",ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="Moyxu2w6nFfH" outputId="85b996a9-3f9a-466e-e7b8-dc03edb2b4c3"
plt.bar(list(df.iloc[:,1:4].columns.values),df.iloc[:,1:4].sum())
# + colab={"base_uri": "https://localhost:8080/"} id="c_kROxW-zDe6" outputId="9a677d47-7f56-4fe1-cc6d-d32c4f946639"
list(df.iloc[:,1:4].columns.values)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="R4FecdaksVre" outputId="894bf8a9-4b08-43da-c911-123f30dfb3d8"
plt.plot(df.iloc[:,1:4].sum())
# + colab={"base_uri": "https://localhost:8080/"} id="_mp3PeUHnhon" outputId="ddfbd1c3-c598-40b6-9565-9d71f9e61db9"
df.iloc[:,1:4].sum()
# + id="e1_DqWkrnhhv"
pfe = df
# + [markdown] id="8KSWNOHZ_QRW"
# # Disney
# + id="fSR6qrgB_QRY"
stock = "Disney"
# + id="1-3ANR8i_QRY"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("CNBC")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="HNgsu03Y_QRZ"
#Run
twint.run.Search(c)
# + id="e7alGAxd_QRa"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="vwTaY9ig_QRa" outputId="602a7b9a-ff12-4ddf-ff3e-83fff25ada54"
df_cnbc = df[['id','date','tweet','hashtags','username','search']]
df_cnbc.head()
# + colab={"base_uri": "https://localhost:8080/"} id="2ms44p8c_QRb" outputId="9cffbcb1-7ef0-4fbe-b7e6-4d2e58568a0e"
#Lemmatizing the tweets.
df_cnbc.tweet = df_cnbc.tweet.apply(lambda x: sneaky_cleanup(x))
# + id="BtpoZiKd_QRb"
def polarity(text):
return TextBlob(text).sentiment.polarity
# + colab={"base_uri": "https://localhost:8080/"} id="q0tlap9y_QRb" outputId="029b3491-168e-4ebb-9bd9-d866c1f1a1de"
df_cnbc["polarity"] = df_cnbc["tweet"].apply(polarity)
# + id="dmwnsWqo_QRb" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="c99a8b87-648e-4a78-ec4f-cbf212770cbe"
df_cnbc.head()
# + id="CJky0VnO_QRc"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("business")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="ja4q2O8l_QRc"
#Run
twint.run.Search(c)
# + id="948BO5-__QRe"
df = twint.storage.panda.Tweets_df
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="xv3y35mL_QRe" outputId="a93faee2-7516-454b-a20a-cabce3a5005c"
df_bloom = df[['id','date','tweet','hashtags','username','search']]
df_bloom
# + colab={"base_uri": "https://localhost:8080/"} id="bCKHql1e_QRe" outputId="f81973db-60f3-4893-a675-30b52d25a0bd"
df_bloom.tweet = df_bloom.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="gmVftPba_QRf" outputId="ba3683a2-d789-43c4-d5bd-16c9862cc5bd"
df_bloom["polarity"] = df_bloom["tweet"].apply(polarity)
# + id="NC7pnoUV_QRf"
# https://github.com/twintproject/twint/wiki/Configuration
#Configuration
c = twint.Config()
c.Username = ("WSJ")
c.Search = stock
c.Since = "2015-1-1"
c.Count = True
c.Filter_retweets = True
c.Pandas = True
# + id="bTf7CUJQ_QRf"
#Run
twint.run.Search(c)
# + id="uVklMS0__QRj"
df = twint.storage.panda.Tweets_df
# + id="D6oWBERn_QRk"
df_wsj = df[['id','date','tweet','hashtags','username','search']]
# + colab={"base_uri": "https://localhost:8080/"} id="f7YSzvPc_QRk" outputId="f0eb41c2-f353-4766-ab84-db03100dc016"
df_wsj.tweet = df_wsj.tweet.apply(lambda x: sneaky_cleanup(x))
# + colab={"base_uri": "https://localhost:8080/"} id="b0io0_h8_QRk" outputId="462db515-6f4e-4ca2-ed2e-0ba7936c9c62"
df_wsj["polarity"] = df_wsj["tweet"].apply(polarity)
# + id="rqWWBDLF_QRm"
frames=[df_cnbc,df_bloom,df_wsj]
# + id="4NqnVRpq_QRm"
merged = pd.concat(frames)
# + id="nXFWr2Xv_QRm"
merged["date"] = pd.to_datetime(merged.date)
merged["date"] = merged["date"] + datetime.timedelta(hours=8)
# + id="KmvK3qkH_QRm"
positive = merged[merged["polarity"]>0]
neutral = merged[merged["polarity"]==0]
negative = merged[merged["polarity"]<0]
# + id="WLmiBxQb_QRn"
positive = positive.set_index('date').resample('D')['polarity'].count()
neutral = neutral.set_index('date').resample('D')['polarity'].count()
negative = negative.set_index('date').resample('D')['polarity'].count()
# + id="yGQoR_9d_QRo"
positive = pd.DataFrame(positive)
neutral = pd.DataFrame(neutral)
negative = pd.DataFrame(negative)
# + id="8EIBA1kv_QRo"
positive["positive"] = positive.polarity
neutral["neutral"]=neutral.polarity
negative["negative"]=negative.polarity
# + id="WON5Cp6o_QRp"
df2 = pd.merge(positive,neutral,left_index=True,right_index=True)
# + id="8ad1neS3_QRp"
sentiment = pd.merge(df2,negative,right_index=True,left_index=True)
# + id="zrprcnNb_QRp"
df = yf.download("DIS", start=start_date, end=end_date, progress=False, interval='1d')
# + id="Wk2IiKyS_QRr"
df.columns = [w.lower() for w in df.columns]
# + id="XLxGys8N_QR1"
df = pd.merge(df,sentiment,left_index=True,right_index=True)
# + id="WtXOQ8AZ_QR2"
df['close-1'] = df['close'].shift(+1, fill_value=df['close'].iloc[0])
# + id="6fNKK_jq_QR2"
df['change'] = df["close"] - df['close'].shift(+1, fill_value=df['close'].iloc[0])
#df["day_change"]=(df["open"] - df['close'])*-1
#df['change_pred'] = (df["close"] - df['close'].shift(-1, fill_value=df['change'].iloc[1]))*-1
# + id="WsnYRy0T_QR2"
df = df.drop(["open","polarity_x","polarity_y","polarity","adj close","close","high","low"],axis=1)
#df= df.drop(["adj close","close","high","low"],axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="3zwnG5AiSNsA" outputId="82f30a8a-284a-486e-9b79-fcc469c1152d"
df
# + id="uanxF5ad_QR4"
#df["change"] = (df["change"]>0).astype(int).astype("float32")
# + [markdown] id="aa965-1E_QR4"
# Since we want to predict the closing stock price for the following day, we just shift the closing price one day to get our y value.
# + [markdown] id="_t6d4I5T_QR4"
# Since we are working with sequential data, we dont use train_test_split. Insted we pick the first 80% of obersavations as training set, and the remaning as testing set.
# + id="8q_aEPw0_QR5"
test_size = int(len(df) * 0.1)
train = df.iloc[:-test_size,:].copy()
test = df.iloc[-test_size:,:].copy()
# + [markdown] id="shOvDTPD_QR5"
# We split the dataset into x values and y values. We also specify .values, since the date is not relevant for the training and dosn't work with some of the later preprocessing
# + id="tKBXDa8l_QR6"
X_train = train.iloc[:,:-1].values
y_train = train.iloc[:,-1].values
X_test = test.iloc[:,:-1].values
y_test = test.iloc[:,-1].values
# + id="unr1c7qR_QR6"
from sklearn.preprocessing import MinMaxScaler
# + [markdown] id="BT15ydSn_QR7"
# We scale all our values to be between -1 and 1. This should help the accuracy of the model.
# + id="69aLO3jm_QR7"
x_scaler = MinMaxScaler(feature_range=(-1, 1))
y_scaler = MinMaxScaler(feature_range=(-1, 1))
# + id="KJ-oxBWO_QR7"
X_train = x_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train.reshape(-1,1))
X_test = x_scaler.transform(X_test)
y_test = y_scaler.transform(y_test.reshape(-1,1))
# + id="e9V-wHz6_QR8"
X_train = np.reshape(X_train, (X_train.shape[0], 1, 5))
X_test = np.reshape(X_test, (X_test.shape[0], 1, 5))
# + id="_Ok7BjzX_QR9"
from keras.preprocessing.sequence import TimeseriesGenerator
# + [markdown] id="xlkg87cM_QR9"
# Now we start making our RNN model.
#
# n_input = how many days we look in the past to predict the next sample. We chose 20 mostly by trial and error.
#
# We set epochs to 100. It's our experience that the more you train the model, the more it will try to predict the daily volatility. If we only trained it for eg. 10 epochs, the model would make a soft curve which didn't look like a real stock development. By trail and error we found 100 to be the best training amount.
# + id="LBKZ1D5K_QR9"
n_input = 10
n_features= X_train.shape[2] # how many predictors/Xs/features we have to predict y
b_size = 10 # Number of timeseries samples in each batch
epochs = 200
# + id="Ytiip4Z9_QR_"
import numpy as np
# + id="gtQv2Tea_QSA"
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# + [markdown] id="i7wkayYx_QSA"
# Since we are working with sequential stock data we chose an LSTM model, which is a RNN model.
# Activation function is set to relu, and optimizer is adam, since these are the standard for this kind of task.
# We chose 2 layers with 50 units each, and once again done by trial and error.
#
# Since we want to predict the actual stock price we use mse(mean squared error) as our loss fuction.
# + colab={"base_uri": "https://localhost:8080/"} id="T_SqwuHN_QSA" outputId="2d10c2be-fd46-4bdb-edd1-796666303f15"
model = Sequential()
model.add(LSTM(50, activation='relu',return_sequences=True, input_shape=(n_input, n_features)))
model.add(Dropout(0.2))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='MSE',metrics=['MSE'])
model.summary()
# + id="ora_UrMY_QSB"
model.fit(X_train,y_train,epochs=epochs,verbose=1)
# + id="vwxkc8si_QSC"
import matplotlib.pyplot as plt
# + [markdown] id="u5QQVecS_QSC"
# This looks like overfitting, but it works the best in our case.
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="pVSh77pV_QSD" outputId="d7228caf-9481-4a31-ccdc-fdba00107255"
loss_per_epoch = model.history.history['loss']
plt.plot(range(len(loss_per_epoch)),loss_per_epoch);
# + [markdown] id="r1ZUfXtL_QSD"
# Then we do some data processing to get it back to the orginal format, so we can compare the real data to the predictions.
# + colab={"base_uri": "https://localhost:8080/"} id="JetMaJ5m_QSD" outputId="728ff059-ab4a-4f94-c7f7-8e33beca0864"
y_pred_scaled = model.predict(X_test)
y_pred = y_scaler.inverse_transform(y_pred_scaled)
y_test = y_scaler.inverse_transform(y_test)
results = pd.DataFrame({'y_true':y_test.flatten(),'y_pred':y_pred.flatten()})
print(results)
# + id="OJcplhVC_QSE"
results["y_pred"] = (results["y_pred"]>0).astype(int).astype("float32")
results["y_true"] = (results["y_true"]>0).astype(int).astype("float32")
# + id="vqya4TRM_QSF"
backtesting = pd.merge(test.reset_index(),results,right_index=True,left_index=True)
# + id="LmnfSEv7_QSF"
buy = backtesting[backtesting["y_pred"]==1]
sell = backtesting[backtesting["y_pred"]==0]
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="L_EpPXGGK8cp" outputId="fcdc7f00-9a63-4707-bcda-bbc4fc5013ff"
backtesting
# + id="TbzK9bQZ_QSF"
gain = buy.change.sum()
loss = sell.change.sum()
# + id="uN8q-Q_R_QSF"
profit = gain - loss
# + colab={"base_uri": "https://localhost:8080/"} id="r31QEHlR_QSG" outputId="c6cd13a0-908a-4e84-c4f6-5de40e404be0"
profit
# + id="7sgeBZ52P1jW"
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="e6Gq7OwbP1sX" outputId="16ddabbd-7683-4344-956c-b73c1b66491b"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=False).iloc[0:10,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="DiCOfR7BP1sZ" outputId="be99af60-36c9-45b0-8dc1-3025d11cd2a6"
pd.DataFrame(backtesting.sort_values(by=["change"],ascending=True).iloc[0:10,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="ww2H1tUS_QSH" outputId="a35c0414-2dd2-46a8-b983-877e0be5ec05"
pd.crosstab(results.y_true,results.y_pred)
# + id="wGPS8X8CdwvM"
# + id="sG_uo8m4NHuU"
test=df[df.positive/df.negative>5]
# + colab={"base_uri": "https://localhost:8080/"} id="Ny1ktY4LNHuV" outputId="cf8f8f26-ad9d-47cf-d79e-b133d81cab40"
test.change.mean()
# + id="4bx8Rq50NHuV"
test2=df[df.negative/df.positive>2]
# + colab={"base_uri": "https://localhost:8080/"} id="jviYCjm2NHuW" outputId="79cb4116-e432-42d1-bff8-ee3df34b8955"
test2.change.mean()
# + id="uCohF2GFwn8C"
dis = df
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="lRbLLeZryzZ1" outputId="6ebeac33-e81c-40b3-8b24-f9335e2d285e"
plt.bar(list(df.iloc[:,1:4].columns.values),df.iloc[:,1:4].sum())
# + colab={"base_uri": "https://localhost:8080/", "height": 554} id="5gR321EG6HSn" outputId="7b916fa3-b8ca-4e22-8461-dfe4f5061087"
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar((list(df.iloc[:,1:4].columns.values))+0,amzn.iloc[:,1:4].sum(), color = 'b', width = 0.25)
ax.bar((list(df.iloc[:,1:4].columns.values))+0.25,aapl.iloc[:,1:4].sum(), color = 'r', width = 0.25)
ax.bar((list(df.iloc[:,1:4].columns.values))+0.5,pfe.iloc[:,1:4].sum(), color = 'g', width = 0.25)
# + colab={"base_uri": "https://localhost:8080/", "height": 666} id="Z6MJpEUv75eJ" outputId="d1eee4b5-396f-4e3e-8211-9da062480120"
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_axes([2,2,2,2])
X = np.arange(3)
ax.bar(X +0,aapl.iloc[:,1:4].sum(), color = 'r', width = 0.20,label="Apple")
ax.bar(X + 0.20,amzn.iloc[:,1:4].sum(), color = 'b', width = 0.20,label="Amazon")
ax.bar(X +0.40,dis.iloc[:,1:4].sum(), color = 'y', width = 0.20,label="Disney")
ax.bar(X +0.60,pfe.iloc[:,1:4].sum(), color = 'g', width = 0.20,label="Pfizer")
plt.xlabel('Sentiment', fontweight='bold',fontsize=18)
plt.ylabel('News', fontweight='bold',fontsize=18)
plt.xticks([r + 0.3 for r in range(len(amzn.iloc[:,1:4].sum()))], ticker,fontsize=13)
plt.legend()
# + colab={"base_uri": "https://localhost:8080/", "height": 666} id="hx5qlqWoBQyV" outputId="1cd6a8a5-7af8-4c5f-c409-799a1f143465"
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_axes([2,2,2,2])
X = np.arange(3)
ax.bar(X +0,aapl.iloc[:,1:4].sum()/sum(aapl.iloc[:,1:4].sum())*100, color = 'r', width = 0.20,label="Apple")
ax.bar(X + 0.20,amzn.iloc[:,1:4].sum()/sum(amzn.iloc[:,1:4].sum())*100, color = 'b', width = 0.20,label="Amazon")
ax.bar(X +0.40,dis.iloc[:,1:4].sum()/sum(dis.iloc[:,1:4].sum())*100, color = 'y', width = 0.20,label="Disney")
ax.bar(X +0.60,pfe.iloc[:,1:4].sum()/sum(pfe.iloc[:,1:4].sum())*100, color = 'g', width = 0.20,label="Pfizer")
plt.xlabel('Sentiment in %', fontweight='bold',fontsize=18)
plt.ylabel('News', fontweight='bold',fontsize=18)
plt.xticks([r + 0.3 for r in range(len(amzn.iloc[:,1:4].sum()))], ticker,fontsize=13)
plt.legend()
# + colab={"base_uri": "https://localhost:8080/"} id="LzVqrlpjBUox" outputId="0e3da7b6-3964-41af-e6c0-210d3ad312f1"
sum(aapl.iloc[:,1:4].sum())/aapl.iloc[:,1:4].sum()
# + colab={"base_uri": "https://localhost:8080/"} id="Oy5N5Af2B9TW" outputId="ebad557c-76af-429b-9544-796503c38c5d"
aapl.iloc[:,1:4].sum()/sum(aapl.iloc[:,1:4].sum())*100
| _notebooks/2021-01-08-stock_predictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
"""
Part 2
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# %matplotlib inline
img = mpimg.imread('MonaLisa.png')
plt.imshow(img)
# +
import random
samples = []
used_pts = set()
for i in xrange(5000):
rand_x = random.randint(0, img.shape[0]-1)
rand_y = random.randint(0, img.shape[1]-1)
if((rand_x, rand_y) not in used_pts):
used_pts.add((rand_x,rand_y))
samples.append((rand_x,rand_y))
samples = np.array(samples)
# +
def preprocess(data):
r_data = []
g_data = []
b_data = []
for pt in data:
r,g,b,_ = img[pt[0]][pt[1]]
r_data.append(r)
g_data.append(g)
b_data.append(b)
return r_data, g_data, b_data
r_d, b_d, g_d = preprocess(samples)
# +
# sklearn's random forest regressor: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from sklearn.ensemble import RandomForestRegressor
def get_random_forest(data, labels, max_depth=2, n_estimators=10):
regr = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
regr.fit(data,labels)
return regr
def get_rgb_forests(points, red_data, blue_data, green_data, max_depth, n_estimators):
r_tree = get_random_forest(points, red_data, max_depth, n_estimators)
g_tree = get_random_forest(points, green_data, max_depth, n_estimators)
b_tree = get_random_forest(points, blue_data, max_depth, n_estimators)
return r_tree, g_tree, b_tree
def get_color_predictions(red_tree, blue_tree, green_tree):
ret = np.zeros(img.shape)
xs = []
for x in xrange(img.shape[0]):
for y in xrange(img.shape[1]):
xs.append((x,y))
r_preds = red_tree.predict(xs)
b_preds = blue_tree.predict(xs)
g_preds = green_tree.predict(xs)
for i in xrange(len(r_preds)):
red = r_preds[i]#int(r_preds[i] * 255.0)
green = g_preds[i]#int(g_preds[i] * 255.0)
blue = b_preds[i]#int(b_preds[i] * 255.0)
x = i / img.shape[1]
y = i - img.shape[1] * x
ret[x][y] = [red,green,blue, 1.0]
ret = np.array(ret)
return ret
def print_image(color_array):
plt.clf()
plt.imshow(color_array)
plt.show()
r_t, b_t, g_t = get_rgb_forests(samples, r_d, b_d, g_d, 5, 10)
new_img = get_color_predictions(r_t, b_t, g_t)
print_image(new_img)
# -
for depth in (1, 2, 3, 5, 10, 15):
r_t, b_t, g_t = get_rgb_forests(samples, r_d, b_d, g_d, depth, 1)
new_img = get_color_predictions(r_t, b_t, g_t)
print('Depth {0}'.format(depth))
print_image(new_img)
for trees in (1, 3, 5, 10, 100):
r_t, b_t, g_t = get_rgb_forests(samples, r_d, b_d, g_d, 7, n_estimators=trees)
new_img = get_color_predictions(r_t, b_t, g_t)
print('Trees {0}'.format(trees))
print_image(new_img)
# +
"""
Part 2.E.III
"""
from sklearn.neighbors import NearestNeighbors
nbrs = NearestNeighbors(n_neighbors=1).fit(samples)
xs = []
knn_img = np.zeros(img.shape)
for x in xrange(knn_img.shape[0]):
for y in xrange(knn_img.shape[1]):
xs.append((x,y))
_,indices = nbrs.kneighbors(xs)
for i in xrange(len(xs)):
x,y = xs[i]
img_x, img_y = samples[indices[i][0]]
knn_img[x][y] = img[img_x][img_y]
print_image(knn_img)
# +
"""
Part 2.E.IV
"""
def get_random_forest_experiment(data, labels, max_depth=2, n_estimators=10, min_samples_leaf=1, min_weight_fraction_leaf=0., max_leaf_nodes=None):
regr = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_leaf_nodes=max_leaf_nodes)
regr.fit(data,labels)
return regr
def get_rgb_forests_experiment(points, red_data, blue_data, green_data, max_depth, n_estimators, min_samples_leaf=1, min_weight_fraction_leaf=0., max_leaf_nodes=None):
r_tree = get_random_forest_experiment(points, red_data, max_depth, n_estimators, min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_leaf_nodes=max_leaf_nodes)
g_tree = get_random_forest_experiment(points, green_data, max_depth, n_estimators, min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_leaf_nodes=max_leaf_nodes)
b_tree = get_random_forest_experiment(points, blue_data, max_depth, n_estimators, min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_leaf_nodes=max_leaf_nodes)
return r_tree, g_tree, b_tree
for min_samples in (1, 100, 1000, 1000000):
print('Experiment 1: depth 15 trees 1 min_samples_leaf {0}'.format(min_samples))
r_t1, b_t1, g_t1 = get_rgb_forests_experiment(samples, r_d, b_d, g_d, 15, 1, min_samples_leaf=min_samples)
new_img1 = get_color_predictions(r_t1, b_t1, g_t1)
print_image(new_img1)
# -
"""
Part 2.E.IV Continued
"""
for min_weight in (0., 0.1, 0.5):
print('Experiment 2: depth 15 trees 1 min_weight {0}'.format(min_weight))
r_t1, b_t1, g_t1 = get_rgb_forests_experiment(samples, r_d, b_d, g_d, 15, 1, min_weight_fraction_leaf=min_weight)
new_img1 = get_color_predictions(r_t1, b_t1, g_t1)
print_image(new_img1)
"""
Part 2.E.IV Continued
"""
for max_leaf_nodes in (2, 10, 100, 1000, 10000000):
print('Experiment 2: depth 15 trees 1 max leaf nodes {0}'.format(max_leaf_nodes))
r_t1, b_t1, g_t1 = get_rgb_forests_experiment(samples, r_d, b_d, g_d, 15, 1, max_leaf_nodes=max_leaf_nodes)
new_img1 = get_color_predictions(r_t1, b_t1, g_t1)
print_image(new_img1)
# +
"""
Part 2.F.I
"""
from sklearn.tree import _tree
regr = RandomForestRegressor(n_estimators=1, max_depth=2)
regr.fit(samples,r_d)
tree_ = regr.estimators_[0].tree_
# The tree visualization code below is borrowed from KDnuggets.com
# CITATION: https://www.kdnuggets.com/2017/05/simplifying-decision-tree-interpretation-decision-rules-python.html
feature_names = ['Y', 'X']
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
def recurse(node, depth):
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = tree_.threshold[node]
print "{}if {} <= {}:".format(indent, name, threshold)
recurse(tree_.children_left[node], depth + 1)
print "{}else: # if {} > {}".format(indent, name, threshold)
recurse(tree_.children_right[node], depth + 1)
else:
print "{}return {}".format(indent, tree_.value[node])
recurse(0, 1)
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="5rmpybwysXGV"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="m8y3rGtQsYP2"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="hrXv0rU9sIma"
# # TensorFlow basics
# + [markdown] id="7S0BwJ_8sLu7"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/basics"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/basics.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/basics.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/basics.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="iJyZUDbzBTIG"
# This guide provides a quick overview of _TensorFlow basics_. Each section of this doc is an overview of a larger topic—you can find links to full guides at the end of each section.
#
# TensorFlow is an end-to-end platform for machine learning. It supports the following:
#
# * Multidimensional-array based numeric computation (similar to <a href="https://numpy.org/" class="external">NumPy</a>.)
# * GPU and distributed processing
# * Automatic differentiation
# * Model construction, training, and export
# * And more
# + [markdown] id="gvLegMMvBZYg"
# ## Tensors
#
# TensorFlow operates on multidimensional arrays or _tensors_ represented as `tf.Tensor` objects. Here is a two-dimensional tensor:
# + id="6ZqX5RnbBS1f"
import tensorflow as tf
x = tf.constant([[1., 2., 3.],
[4., 5., 6.]])
print(x)
print(x.shape)
print(x.dtype)
# + [markdown] id="k-AOMqevQGN4"
# The most important attributes of a `tf.Tensor` are its `shape` and `dtype`:
#
# * `Tensor.shape`: tells you the size of the tensor along each of its axes.
# * `Tensor.dtype`: tells you the type of all the elements in the tensor.
# + [markdown] id="bUkKeNWZCIJO"
# TensorFlow implements standard mathematical operations on tensors, as well as many operations specialized for machine learning.
#
# For example:
# + id="BM7xXNDsBfN5"
x + x
# + id="ZLGqscTxB61v"
5 * x
# + id="2ImJHd8VfnWq"
x @ tf.transpose(x)
# + id="U9JZD6TYCZWu"
tf.concat([x, x, x], axis=0)
# + id="seGBLeD9P_PI"
tf.nn.softmax(x, axis=-1)
# + id="YZNZRv1ECjf8"
tf.reduce_sum(x)
# + [markdown] id="8-mi5031DVxz"
# Running large calculations on CPU can be slow. When properly configured, TensorFlow can use accelerator hardware like GPUs to execute operations very quickly.
# + id="m97Gv5H6Dz0G"
if tf.config.list_physical_devices('GPU'):
print("TensorFlow **IS** using the GPU")
else:
print("TensorFlow **IS NOT** using the GPU")
# + [markdown] id="ln2FkLOqMX92"
# Refer to the [Tensor guide](tensor.ipynb) for details.
# + [markdown] id="oVbomvMyEIVF"
# ## Variables
#
# Normal `tf.Tensor` objects are immutable. To store model weights (or other mutable state) in TensorFlow use a `tf.Variable`.
# + id="SO8_bP4UEzxS"
var = tf.Variable([0.0, 0.0, 0.0])
# + id="aDLYFvu5FAFa"
var.assign([1, 2, 3])
# + id="9EpiOmxXFDSS"
var.assign_add([1, 1, 1])
# + [markdown] id="tlvTpi1CMedC"
# Refer to the [Variables guide](variable.ipynb) for details.
# + [markdown] id="rG1Dhv2QFkV3"
# ## Automatic differentiation
#
# <a href="https://en.wikipedia.org/wiki/Gradient_descent" class="external">_Gradient descent_</a> and related algorithms are a cornerstone of modern machine learning.
#
# To enable this, TensorFlow implements automatic differentiation (autodiff), which uses calculus to compute gradients. Typically you'll use this to calculate the gradient of a model's _error_ or _loss_ with respect to its weights.
# + id="cYKOi-z4GY9Y"
x = tf.Variable(1.0)
def f(x):
y = x**2 + 2*x - 5
return y
# + id="IQz99cxMGoF_"
f(x)
# + [markdown] id="ozLLop0cHeYl"
# At `x = 1.0`, `y = f(x) = (1**2 + 2*1 - 5) = -2`.
#
# The derivative of `y` is `y' = f'(x) = (2*x + 2) = 4`. TensorFlow can calculate this automatically:
# + id="N02NfWpHGvw8"
with tf.GradientTape() as tape:
y = f(x)
g_x = tape.gradient(y, x) # g(x) = dy/dx
g_x
# + [markdown] id="s-DVYJfcIRPd"
# This simplified example only takes the derivative with respect to a single scalar (`x`), but TensorFlow can compute the gradient with respect to any number of non-scalar tensors simultaneously.
# + [markdown] id="ECK3I9bUMk_r"
# Refer to the [Autodiff guide](autodiff.ipynb) for details.
# + [markdown] id="VglUM4M3KhNz"
# ## Graphs and tf.function
#
# While you can use TensorFlow interactively like any Python library, TensorFlow also provides tools for:
#
# * **Performance optimization**: to speed up training and inference.
# * **Export**: so you can save your model when it's done training.
#
# These require that you use `tf.function` to separate your pure-TensorFlow code from Python.
# + id="VitACyZWKJD_"
@tf.function
def my_func(x):
print('Tracing.\n')
return tf.reduce_sum(x)
# + [markdown] id="fBYDh-huNUBZ"
# The first time you run the `tf.function`, although it executes in Python, it captures a complete, optimized graph representing the TensorFlow computations done within the function.
# + id="vkOFSEkoM1bd"
x = tf.constant([1, 2, 3])
my_func(x)
# + [markdown] id="a3aWzt-rNsBa"
# On subsequent calls TensorFlow only executes the optimized graph, skipping any non-TensorFlow steps. Below, note that `my_func` doesn't print _tracing_ since `print` is a Python function, not a TensorFlow function.
# + id="23dMHWwwNIoa"
x = tf.constant([10, 9, 8])
my_func(x)
# + [markdown] id="nSeTti6zki0n"
# A graph may not be reusable for inputs with a different _signature_ (`shape` and `dtype`), so a new graph is generated instead:
# + id="OWffqyhqlVPf"
x = tf.constant([10.0, 9.1, 8.2], dtype=tf.float32)
my_func(x)
# + [markdown] id="UWknAA_zNTOa"
# These captured graphs provide two benefits:
#
# * In many cases they provide a significant speedup in execution (though not this trivial example).
# * You can export these graphs, using `tf.saved_model`, to run on other systems like a [server](https://www.tensorflow.org/tfx/serving/docker) or a [mobile device](https://www.tensorflow.org/lite/guide), no Python installation required.
# + [markdown] id="hLUJ6f2eMsA8"
# Refer to [Intro to graphs](intro_to_graphs.ipynb) for more details.
# + [markdown] id="t_36xPDPPBqp"
# ## Modules, layers, and models
# + [markdown] id="oDaT7kCpUgnJ"
# `tf.Module` is a class for managing your `tf.Variable` objects, and the `tf.function` objects that operate on them. The `tf.Module` class is necessary to support two significant features:
#
# 1. You can save and restore the values of your variables using `tf.train.Checkpoint`. This is useful during training as it is quick to save and restore a model's state.
# 2. You can import and export the `tf.Variable` values _and_ the `tf.function` graphs using `tf.saved_model`. This allows you to run your model independently of the Python program that created it.
#
# Here is a complete example exporting a simple `tf.Module` object:
# + id="1MqEcZOqPBDV"
class MyModule(tf.Module):
def __init__(self, value):
self.weight = tf.Variable(value)
@tf.function
def multiply(self, x):
return x * self.weight
# + id="la2G82HfVfU0"
mod = MyModule(3)
mod.multiply(tf.constant([1, 2, 3]))
# + [markdown] id="GaSJX7zQXCm4"
# Save the `Module`:
# + id="1MlfbEMjVzG4"
save_path = './saved'
tf.saved_model.save(mod, save_path)
# + [markdown] id="LgfoftD4XGJW"
# The resulting SavedModel is independent of the code that created it. You can load a SavedModel from Python, other language bindings, or [TensorFlow Serving](https://www.tensorflow.org/tfx/serving/docker). You can also convert it to run with [TensorFlow Lite](https://www.tensorflow.org/lite/guide) or [TensorFlow JS](https://www.tensorflow.org/js/guide).
# + id="pWuLOIKBWZYG"
reloaded = tf.saved_model.load(save_path)
reloaded.multiply(tf.constant([1, 2, 3]))
# + [markdown] id="nxU6P1RGwHyC"
# The `tf.keras.layers.Layer` and `tf.keras.Model` classes build on `tf.Module` providing additional functionality and convenience methods for building, training, and saving models. Some of these are demonstrated in the next section.
# + [markdown] id="tQzt3yaWMzLf"
# Refer to [Intro to modules](intro_to_modules.ipynb) for details.
# + [markdown] id="Rk1IEG5aav7X"
# ## Training loops
#
# Now put this all together to build a basic model and train it from scratch.
#
# First, create some example data. This generates a cloud of points that loosely follows a quadratic curve:
# + id="VcuFr7KPRPzn"
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['figure.figsize'] = [9, 6]
# + id="sXN9E_xf-GiP"
x = tf.linspace(-2, 2, 201)
x = tf.cast(x, tf.float32)
def f(x):
y = x**2 + 2*x - 5
return y
y = f(x) + tf.random.normal(shape=[201])
plt.plot(x.numpy(), y.numpy(), '.', label='Data')
plt.plot(x, f(x), label='Ground truth')
plt.legend();
# + [markdown] id="De5LldboSWcW"
# Create a model:
# + id="Pypd0GB4SRhf"
class Model(tf.keras.Model):
def __init__(self, units):
super().__init__()
self.dense1 = tf.keras.layers.Dense(units=units,
activation=tf.nn.relu,
kernel_initializer=tf.random.normal,
bias_initializer=tf.random.normal)
self.dense2 = tf.keras.layers.Dense(1)
def call(self, x, training=True):
# For Keras layers/models, implement `call` instead of `__call__`.
x = x[:, tf.newaxis]
x = self.dense1(x)
x = self.dense2(x)
return tf.squeeze(x, axis=1)
# + id="GkwToC5BWV1c"
model = Model(64)
# + id="ReWhH40wTY5F"
plt.plot(x.numpy(), y.numpy(), '.', label='data')
plt.plot(x, f(x), label='Ground truth')
plt.plot(x, model(x), label='Untrained predictions')
plt.title('Before training')
plt.legend();
# + [markdown] id="ZebWva4vTBlC"
# Write a basic training loop:
# + id="nOaES5gyTDtG"
variables = model.variables
optimizer = tf.optimizers.SGD(learning_rate=0.01)
for step in range(1000):
with tf.GradientTape() as tape:
prediction = model(x)
error = (y-prediction)**2
mean_error = tf.reduce_mean(error)
gradient = tape.gradient(mean_error, variables)
optimizer.apply_gradients(zip(gradient, variables))
if step % 100 == 0:
print(f'Mean squared error: {mean_error.numpy():0.3f}')
# + id="Qcvzyg3eYLh8"
plt.plot(x.numpy(),y.numpy(), '.', label="data")
plt.plot(x, f(x), label='Ground truth')
plt.plot(x, model(x), label='Trained predictions')
plt.title('After training')
plt.legend();
# + [markdown] id="hbtmFJIXb6qm"
# That's working, but remember that implementations of common training utilities are available in the `tf.keras` module. So consider using those before writing your own. To start with, the `Model.compile` and `Model.fit` methods implement a training loop for you:
# + id="5rt8HP2TZhEM"
new_model = Model(64)
# + id="73kCo1BtP3rQ"
new_model.compile(
loss=tf.keras.losses.MSE,
optimizer=tf.optimizers.SGD(learning_rate=0.01))
history = new_model.fit(x, y,
epochs=100,
batch_size=32,
verbose=0)
model.save('./my_model')
# + id="Mo7zRV7XZjv7"
plt.plot(history.history['loss'])
plt.xlabel('Epoch')
plt.ylim([0, max(plt.ylim())])
plt.ylabel('Loss [Mean Squared Error]')
plt.title('Keras training progress');
# + [markdown] id="ng-BY_eGS0bn"
# Refer to [Basic training loops](basic_training_loops.ipynb) and the [Keras guide](https://www.tensorflow.org/guide/keras) for more details.
| Tensorflow Tutorial/basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="gFESaUmfsU2e" colab_type="text"
# # Comparativo de solver basado en multiplicadores de Lagrange y método de Newton
#
# Para comparar el desempeño de ambos solvers, se decidió variar el valor del rendimiento $r$ en un rango de [WIP: espeficificar rango]. Al respecto, los resultados obtenidos se resumen en la siguiente tabla:
#
# [WIP: añadir tabla que compare la norma de la diferencia y el valor absoluto entre ambos]
#
# De lo anterior, se desprenden los siguientes hallazgos:
#
# [WIP: hallazgo 1]
# [WIP: hallazgo 2]
# [WIP: hallazgo 3]
#
#
# + [markdown] id="1eGeVGReslSw" colab_type="text"
# ## Librerías
# + id="gPFf_6gCrTt6" colab_type="code" colab={}
import numpy as np
import cupy as cp
import solver.extraer_datos_yahoo as extrae
import solver.funciones_auxiliares as aux
import solver.line_search as line
import solver.modelo_markowitz as mkv
import solver.utils as utils
import solver.optimizacion_numerica as opt
# + id="nOVH0NHyr3R-" colab_type="code" colab={}
stocks = ['COP','AMT','LIN','LMT','AMZN','WMT','JNJ','VTI','MSFT','GOOG','XOM','CCI','BHP.AX','UNP',
'BABA','NSRGY','RHHBY','VOO','AAPL','FB','CVX','PLD','RIO.L','HON','HD','PG','UNH','BRK-A','V','0700.HK',
'RDSA.AS','0688.HK','AI.PA','RTX','MC.PA','KO','PFE','JPM','005930.KS','VZ','RELIANCE.NS','DLR','2010.SR',
'UPS','7203.T','PEP','MRK','1398.HK','MA','T']
# + id="qeR9KIaLt9Hl" colab_type="code" outputId="1c4a9ac5-14c5-4496-addc-bfeaab6ab54b" colab={"base_uri": "https://localhost:8080/", "height": 35}
datos = extrae.extraer_datos_yahoo(stocks)
# + id="IcPJNRvmuLV2" colab_type="code" outputId="6df8e4b9-baca-4816-aae6-cbf4ba59071d" colab={"base_uri": "https://localhost:8080/", "height": 258}
datos.head()
# + id="-Tust9YTuobb" colab_type="code" colab={}
mu = aux.calcular_rendimiento(datos)
# + id="s7T8PDeAuzIZ" colab_type="code" colab={}
S = aux.calcular_varianza(datos)
# + id="1ayPXeajxSHx" colab_type="code" outputId="787a91e6-c66b-4cfe-a184-afc040a61e20" colab={"base_uri": "https://localhost:8080/", "height": 35}
max(mu)
# + id="ZcaZtpzXxUPt" colab_type="code" colab={}
rango =np.arange(0.4,1.1,0.1)
# + id="h3bde4vdxdWC" colab_type="code" outputId="a0c8cc1c-b511-4774-bbd4-ba6084da1889" colab={"base_uri": "https://localhost:8080/", "height": 35}
rango
# + id="b9wkFjOVxeRk" colab_type="code" colab={}
res_sol1 = [mkv.markowitz(r,mu,S) for r in rango]
# + id="BEChECSjzoAe" colab_type="code" colab={}
fo = lambda w: w@S@w
#w_ast = mkv.markowitz(r,mu,S)
n = mu.shape[0]
A = cp.concatenate((mu,cp.ones(n))).reshape(2,n)
#b = cp.array([r,1])
M = cp.ones((2,mu.shape[0]))
tol=1e-8
tol_backtracking=1e-14
#p_ast=fo(w_ast)
# + id="Sej1ekuhyvQ6" colab_type="code" outputId="37c575fd-b92c-4058-923f-82c5d205b90f" colab={"base_uri": "https://localhost:8080/", "height": 1000}
res_sol2 = [opt.Newtons_method_feasible_init_point(fo,
A,
utils.feasible_markowitz(r,mu),
tol,
tol_backtracking,
mkv.markowitz(r,mu,S),
fo(mkv.markowitz(r,mu,S)),
maxiter=50)[0] for r in rango]
# + id="JupqCpEv3j0A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8673f9d5-bfd2-478e-991d-15e0a47b19ff"
res_sol1
# + id="skS5ik_Fy7qJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="720075a6-3b26-49ff-c9f2-a3e718286867"
res_sol2
# + id="a1QGZk333g6d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 146} outputId="b12f9854-a3b9-4f57-d675-e6dfe456674e"
for i in range(7):
print(np.linalg.norm(res_sol1[i]-res_sol2[i]))
# + id="CT1F5ZI_5zMG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="fd7a3ac6-1217-477c-803d-9ca919a5f87c"
for i in range(7):
print("------------{}-------------".format(i))
print(sum(res_sol1[i]*mu))
print(sum(res_sol1[i]*mu))
# + id="ZGJJyzsz6Klv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="17a087ce-a26a-4fb9-a880-9849b036a583"
for i in range(7):
print("------------{}-------------".format(i))
print(sum(res_sol1[i]))
print(sum(res_sol1[i]))
# + id="XOPNxlFW6POW" colab_type="code" colab={}
| notebooks/Programacion/Comparacion_Lagrange_Newton.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Utilities
#
# ## Theory
# ### 1.- Convecity
# +
# F1 Plot Generator
# %matplotlib inline
import math
import numpy as np
import matplotlib.pyplot as plt
def f1(x):
return x**2 - (2 * math.e * x) + (math.e**2) - 2
def f2(x):
"""F2 implementation."""
return ((np.e - x) ** 6) - 6
def plot_f(X, Y, opt):
plt.figure()
plt.plot(X, Y)
plt.ylabel("f1(x)")
plt.xlabel("x")
plt.plot(opt, f1(opt),'ro')
plt.show()
X = np.linspace(-10, 15, num=100)
Y = f1(X)
plot_f(X, Y, 2.718281828459045)
X2 = np.linspace(-.4, 5.8, num=100)
Y2 = f2(X2)
plot_f(X2, Y2, 2.60939515596596)
# -
| psets/02/Utilities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import random
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import numpy
import PIL
from PIL import Image
np.random.seed(1337) # for reproducibility
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Flatten
from keras.optimizers import RMSprop
from keras import backend as K
from keras.layers import Concatenate, Dense, LSTM, Input, concatenate
# +
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/Documents/data_10feature.mat')
arr = mat['TR1_10feature']
arr = np.array(arr)
arr = arr.reshape(-1)
print(arr.shape)
X_train = []
for i in range(0,14):
for j in range(0,arr[i].shape[0]):
X_train.append(arr[i][j])
X_train = np.array(X_train)
print(X_train.shape)
y_train = []
for i in range(0,arr.shape[0]):
for j in range(0,arr[i].shape[0]):
y_train.append(i)
y_train = np.array(y_train)
print(y_train.shape)
print(y_train[1])
# +
arr1 = mat['TS1_10feature']
arr1 = np.array(arr1)
arr1 = arr1.reshape(-1)
print(arr1.shape)
X_test = []
for i in range(0,14):
for j in range(0,arr1[i].shape[0]):
X_test.append(arr1[i][j])
X_test = np.array(X_test)
print(X_test.shape)
y_test = []
for i in range(0,arr1.shape[0]):
for j in range(0,arr1[i].shape[0]):
y_test.append(i)
y_test = np.array(y_test)
print(y_test.shape)
print(y_test[1])
# +
arr2 = mat['TS2_10feature']
arr2 = np.array(arr2)
arr2 = arr2.reshape(-1)
print(arr2.shape)
X_test1 = []
for i in range(0,14):
for j in range(0,arr2[i].shape[0]):
X_test1.append(arr2[i][j])
X_test1 = np.array(X_test1)
print(X_test1.shape)
y_test1 = []
for i in range(0,arr2.shape[0]):
for j in range(0,arr2[i].shape[0]):
y_test1.append(i)
y_test1 = np.array(y_test1)
print(y_test1.shape)
print(y_test1[1])
# +
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_test1 = X_test1.astype('float32')
X_train = X_train/10000
X_test = X_test/10000
X_test1 = X_test1/10000
print(X_train.max())
print(X_test.max())
print(X_test1.max())
# +
def create_addi_pairs(x, y):
pairs = []
labels = []
for i in range(0,100):
k1 = k1 = random.randrange(0,x.shape[0])
for j in range(0,5):
k2 = random.randrange(0, y.shape[0])
pairs+= [[x[k1],y[k2]]]
labels += [3]
return np.array(pairs), np.array(labels)
def create_pairs(x, y, digit_indices):
pairs = []
labels = []
labels1 = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
labels1 += [[y[z1], y[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels1 += [[y[z1], y[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels1), np.array(labels)
# +
# create training+test positive and negative pairs
digit_indices = [np.where(y_train == i)[0] for i in range(10)]
tr_pairs, tr_pair_labels, tr_labels = create_pairs(X_train, y_train, digit_indices )
digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_pair_labels, te_labels = create_pairs(X_test, y_test, digit_indices)
tr1_pairs, tr1_y = create_addi_pairs(X_train, X_test1)
print(tr_pairs.shape)
print(tr_pair_labels.shape)
print(te_pairs.shape)
print(tr1_pairs.shape)
# -
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train, random_state = 0)
X_test, y_test = shuffle(X_test, y_test, random_state=0)
X_test1, y_test1 = shuffle(X_test1, y_test1, random_state=0)
print(X_train.shape)
# model
# Siamese Network
def siamese(X_input, output_dim, reuse= False):
with tf.variable_scope('siamese') as scope:
if (reuse):
tf.get_variable_scope().reuse_variables()
#first fully connected layer
W_fc1 = tf.get_variable('s_wfc1', [10, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc1 = tf.get_variable('s_bfc1', [16], initializer=tf.constant_initializer(0))
h_fc1 = tf.nn.relu(tf.matmul(X_input, W_fc1) + b_fc1)
#second fully connected layer
W_fc2 = tf.get_variable('s_wfc2', [16, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc2 = tf.get_variable('s_bfc2', [32], initializer=tf.constant_initializer(0))
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)
#third fully connected layer
W_fc3 = tf.get_variable('s_wfc3', [32, output_dim], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc3 = tf.get_variable('s_bfc3', [output_dim], initializer=tf.constant_initializer(0))
h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)
return h_fc3
# model
# Classifier
def classifier(X_input, input_dim, num_classes, reuse= False):
with tf.variable_scope('classifier') as scope:
if (reuse):
tf.get_variable_scope().reuse_variables()
#first fully connected layer
W_fc1 = tf.get_variable('c_wfc1', [input_dim, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc1 = tf.get_variable('c_bfc1', [32], initializer=tf.constant_initializer(0))
h_fc1 = tf.nn.relu(tf.matmul(X_input, W_fc1) + b_fc1)
#second fully connected layer
W_fc2 = tf.get_variable('c_wfc2', [32, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc2 = tf.get_variable('c_bfc2', [16], initializer=tf.constant_initializer(0))
h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)
#third fully connected layer
W_fc3 = tf.get_variable('c_wfc3', [16, num_classes], initializer=tf.truncated_normal_initializer(stddev=0.02))
b_fc3 = tf.get_variable('c_bfc3', [num_classes], initializer=tf.constant_initializer(0))
h_fc3 = tf.nn.softmax(tf.matmul(h_fc2, W_fc3) + b_fc3)
return h_fc3
# +
batch_size = 32
num_classes = 14
output_dim = 32
sess = tf.Session()
# placeholder for inputs
X_left = tf.placeholder('float', shape= [None, 10])
X_right = tf.placeholder('float', shape= [None, 10])
# placeholder for labels
Y_left = tf.placeholder('float', shape= [None, 14])
Y_right = tf.placeholder('float', shape= [None, 14])
Y_isSame = tf.placeholder('float', shape= [None, 1])
# +
# model outputs
processed_left = siamese(X_left, output_dim)
processed_right = siamese(X_right, output_dim, reuse=True)
classify_left = classifier(processed_left, output_dim, num_classes, reuse=False)
classify_right = classifier(processed_right,output_dim, num_classes, reuse=True)
# -
print(processed_left.shape)
print(classify_left.shape)
# +
# lossses
# crossentropy loss
y_clipped_left = tf.clip_by_value(classify_left, 1e-10, 0.9999999)
y_clipped_right = tf.clip_by_value(classify_right, 1e-10, 0.9999999)
cross_entropy_left = -tf.reduce_mean(tf.reduce_sum(Y_left * tf.log(y_clipped_left)
+ (1 - Y_left) * tf.log(1 - y_clipped_left), axis=1))
cross_entropy_right = -tf.reduce_mean(tf.reduce_sum(Y_right * tf.log(y_clipped_right)
+ (1 - Y_right) * tf.log(1 - y_clipped_right), axis=1))
#cross_entropy = (cross_entropy_left + cross_entropy_right)/2.0
cross_entropy = tf.losses.softmax_cross_entropy(Y_left, classify_left)+tf.losses.softmax_cross_entropy(Y_right, classify_right)
print(cross_entropy.shape)
# contrastive loss
y_pred1 = tf.sqrt(tf.reduce_sum(tf.square(processed_left - processed_right), axis=1, keep_dims=True))
y_true1 = Y_isSame
margin = 1
contrastive_loss = tf.reduce_mean(y_true1 * tf.square(y_pred1) +
(1 - y_true1) * tf.square(tf.maximum(margin - y_pred1, 0)))
print(contrastive_loss.shape)
print(y_pred1.shape)
print(y_true1.shape)
# logcoral loss
n = 32.0
mul1 = tf.matmul(tf.transpose(processed_left),processed_left)
one = processed_left*0+1
mul2 = tf.matmul(tf.transpose(one), processed_left)
sub = tf.matmul(tf.transpose(mul2), mul2)
source = (mul1 - (sub)/n)/(n-1)
source = tf.abs(source)
source = tf.clip_by_value(source, 1e-10,10000)
source1 = tf.log(source)
mul11 = tf.matmul(tf.transpose(processed_right),processed_right)
mul21 = tf.matmul(tf.transpose(one), processed_right)
sub1 = tf.matmul(tf.transpose(mul2), mul2)
target = (mul11 - (sub1)/n)/(n-1)
target = tf.abs(target)
target = tf.clip_by_value(target, 1e-10,10000)
target1 = tf.log(target)
logcoral_loss = (tf.reduce_sum(tf.matmul((source1-target1),(source1-target1)))/(2*32.0))
print(logcoral_loss.shape)
# -
tvars = tf.trainable_variables()
s_vars = [var for var in tvars if 's_' in var.name]
c_vars = [var for var in tvars if 'c_' in var.name]
print(len(s_vars))
print(len(c_vars))
print(tf.get_variable_scope().reuse)
adam = tf.train.AdamOptimizer()
trainer1 = adam.minimize(cross_entropy, var_list=c_vars)
trainer2 = adam.minimize(contrastive_loss, var_list=s_vars)
trainer3 = adam.minimize(logcoral_loss, var_list=s_vars)
# +
correct_prediction_left = tf.equal(tf.argmax(Y_left, 1), tf.argmax(classify_left, 1))
correct_prediction_right = tf.equal(tf.argmax(Y_right, 1), tf.argmax(classify_right, 1))
accuracy_left = tf.reduce_mean(tf.cast(correct_prediction_left, tf.float32))
accuracy_right = tf.reduce_mean(tf.cast(correct_prediction_right, tf.float32))
# -
from keras.utils import np_utils
tr_label1 = np_utils.to_categorical(tr_pair_labels[:,0], num_classes=14)
tr_label2 = np_utils.to_categorical(tr_pair_labels[:,1], num_classes=14)
te_label1 = np_utils.to_categorical(te_pair_labels[:,0], num_classes=14)
te_label2 = np_utils.to_categorical(te_pair_labels[:,1], num_classes=14)
print(tr_label1.shape)
print(te_label1.shape)
# +
y_train_onehot = np_utils.to_categorical(y_train, num_classes=14)
y_test_onehot = np_utils.to_categorical(y_test, num_classes=14)
y_test1_onehot = np_utils.to_categorical(y_test1, num_classes=14)
print(y_train_onehot.shape)
# -
print(tr_pair_labels[:,1].max())
print(y_train.max())
print(tr_label1[0:0+32].shape)
print(y_train_onehot[0:100].shape)
print(y_train_onehot)
# +
# Start Training
# Start a new TF session
sess = tf.Session()
# Run the initializer
sess.run(tf.global_variables_initializer())
num_batch_same = int(1360/32)
num_batch_class = int(1242/32)
# Training
for i in range(0,2000):
k = 0
avg_cost = 0
for j in (0,num_batch_same):
batch_left = tr_pairs[k:k+32,0]
batch_right = tr_pairs[k:k+32,1]
label = tr_labels[k:k+32]
label = label.reshape(-1, 1)
k+=32
# Run optimization op (backprop) and cost op (to get loss value)
_, l = sess.run([trainer2, contrastive_loss], feed_dict={X_left: batch_left, X_right: batch_right, Y_isSame: label})
avg_cost += l / num_batch_same
print("Epoch:", (i + 1), "contrastive_loss =", "{:.8f}".format(avg_cost))
#avg_cost = 0
#k=0
#_, l = sess.run([trainer3, logcoral_loss], feed_dict={X_left: tr1_pairs[:,0], X_right: tr1_pairs[:,1]})
#print("Epoch:", (i + 1), "logcoral_loss =", "{:.8f}".format(l))
avg_cost = 0
k=0
for j in (0,num_batch_same):
batch_left = X_train[k:k+32]
batch_right = X_train[k:k+32]
label_left = y_train_onehot[k:k+32]
label_right = y_train_onehot[k:k+32]
k+=32
# Run optimization op (backprop) and cost op (to get loss value)
_, l = sess.run([trainer1, cross_entropy], feed_dict={X_left: batch_left, X_right: batch_right, Y_left: label_left, Y_right: label_right})
avg_cost += l / num_batch_same
print("Epoch:", (i + 1), "cross_entropy =", "{:.8f}".format(avg_cost))
left_te_acc, correct = sess.run([accuracy_left,classify_left], feed_dict={X_left: X_test, Y_left: y_test_onehot})
left_te1_acc = sess.run(accuracy_left, feed_dict={X_left: X_test1, Y_left: y_test1_onehot})
left_tr_acc = sess.run(accuracy_left, feed_dict={X_left: X_train, Y_left: y_train_onehot})
right_tr_acc = sess.run(accuracy_right, feed_dict={X_right: X_test, Y_right: y_test_onehot})
right_te_acc = sess.run(accuracy_right, feed_dict={X_right: X_test1, Y_right: y_test1_onehot})
#print(correct)
print("Epoch:", (i + 1), "train_accuracy_left =", "{:.8f}".format(left_tr_acc), "Epoch:", (i + 1), "test_accuracy_left =", "{:.8f}".format(left_te_acc))
print("Epoch:", (i + 1), "domain_accuracy_left =", "{:.8f}".format(left_te1_acc))
#print("Epoch:", (i + 1), "train_accuracy_right =", "{:.8f}".format(right_tr_acc), "Epoch:", (i + 1), "test_accuracy_right =", "{:.8f}".format(right_te_acc))
print("")
# +
n = 122
print(y_train_onehot[n])
correct = sess.run([accuracy_left, classify_left, cross_entropy_left], feed_dict={X_left: X_train[n:n+1], Y_left: y_train_onehot[n:n+1]})
print(correct)
correct1 = sess.run([accuracy_right, classify_right, cross_entropy_right], feed_dict={X_right: X_train[n:n+1], Y_right: y_train_onehot[n:n+1]})
print(correct1)
| (logCoral-tensorlfow)Siamese_network(TR1+TS2).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ** Step -1: Import Libraries**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# **Step -2 : Load Dataset **
digits = load_digits()
print(digits.keys())
print(digits.DESCR)
# ** Extracting data**
X = digits.data # independent variable
y = digits.target # dependent variable
X.shape , y.shape
# Normalization [0 - 1] -> threshold
X[X>7] = X.max() # if greater than 7 replace that with max value
X[X<=7] = X.min() # if less than 7 replace that with min value
img1 = X[0:1]
print(y[0:1])
plt.imshow(img1.reshape((8,8)),cmap = 'gray')
# ** Step -3: Standard Scaling **
X = X / X.max()
X.shape, X.max()
# **Step - 5 : Splitting data into testing and training**
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X,y, test_size = 0.20,
random_state = 0)
x_train.shape , x_test.shape, y_train.shape, y_test.shape
# **Step -6 : Bulding a Machine Learning Classifier**
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
model_log = LogisticRegression(C = 10.0)
model_knn = KNeighborsClassifier(n_neighbors=3)
model_svm = SVC(C=10.0, kernel='rbf')
model_dt = DecisionTreeClassifier()
model_rf = RandomForestClassifier(n_estimators=100)
model_log.fit(x_train, y_train) # Logistic regression
model_knn.fit(x_train, y_train) # KNearest Neighbour
model_svm.fit(x_train, y_train) # Support vector machine
model_dt.fit(x_train, y_train) # Desicion Tree
model_rf.fit(x_train, y_train) # Random Forest
# **Step-7: Evaluation **
y_pred_log = model_log.predict(x_test) # for evalutating model
y_pred_knn = model_knn.predict(x_test) # for evalutating model
y_pred_svm = model_svm.predict(x_test) # for evalutating model
y_pred_dt = model_dt.predict(x_test) # for evalutating model
y_pred_rf = model_rf.predict(x_test) # for evalutating model
# **Classification Report**
from sklearn.metrics import confusion_matrix , classification_report
# +
cm_log = confusion_matrix(y_test, y_pred_log) # confusion matrix
cm_knn = confusion_matrix(y_test, y_pred_knn) # confusion matrix
cm_svm = confusion_matrix(y_test, y_pred_svm) # confusion matrix
cm_dt = confusion_matrix(y_test, y_pred_dt) # confusion matrix
cm_rf = confusion_matrix(y_test, y_pred_rf) # confusion matrix
# Classification report
cr_log = classification_report(y_test, y_pred_log)
cr_knn = classification_report(y_test, y_pred_knn)
cr_svm = classification_report(y_test, y_pred_svm)
cr_dt = classification_report(y_test, y_pred_dt)
cr_rf = classification_report(y_test, y_pred_rf)
# -
import seaborn as sns
sns.heatmap(cm_log ,annot=True, cbar=False,cmap = 'summer')
plt.title('Logistic Regression')
plt.show()
sns.heatmap(cm_knn ,annot=True, cbar=False,cmap = 'winter')
plt.title('K Nearest Neighbour')
plt.show()
sns.heatmap(cm_svm ,annot=True, cbar=False,cmap = 'spring')
plt.title('Support Vector Machine')
plt.show()
sns.heatmap(cm_dt ,annot=True, cbar=False,cmap = 'cool')
plt.title('Desicion Tree')
plt.show()
sns.heatmap(cm_rf ,annot=True, cbar=False,cmap = 'autumn')
plt.title('Random Forest')
plt.show()
print('='*20+'Logistic Regression'+'='*20)
print(cr_log)
print('='*20+'KNearest Neighbour'+'='*20)
print(cr_knn)
print('='*20+'Suport Vector Machine'+'='*20)
print(cr_svm)
print('='*20+'Descion Tree'+'='*20)
print(cr_dt)
print('='*20+'Random Forest'+'='*20)
print(cr_rf)
# # Saving model
from sklearn.externals import joblib
joblib.dump(model_log,'number_log.pkl')
joblib.dump(model_knn,'number_knn.pkl')
joblib.dump(model_svm,'number_svm.pkl')
joblib.dump(model_dt,'number_dt.pkl')
joblib.dump(model_rf,'number_rf.pkl')
classify = joblib.load('number_rf.pkl') # Loading model
# ** Testing my model **
import cv2
# step -1
img = cv2.imread('number2.jpg',0) # Load image and convert that into gray scale
# step -2 : Thresholding
ret , thresh = cv2.threshold(img, 127,255,cv2.THRESH_BINARY_INV)
# step -3: Resize image
img_re = cv2.resize(thresh,(8,8)) # resizing into 8 x 8 image
# step - 4: Reshape image
test = img_re.reshape((1,64)) # this is new test data that need to pass to model
# step - 5: Normalization
test = test / test.max()
plt.imshow(test, cmap ='gray')
plt.show()
print('Logistic Regression:',model_log.predict(test))
print('KNearest Neighbour:',model_knn.predict(test))
print('Support Vector Machine:',model_svm.predict(test))
print('Desicion Tree:',model_dt.predict(test))
print('Random Forest',model_rf.predict(test))
# +
# Video
# +
cap = cv2.VideoCapture(0)
while True:
_,img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7),3)
_,th3 = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV)
#th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,21,7)
im2, contours, hierarchy = cv2.findContours(th3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
ix = np.where(np.array(areas) > 300)[0]
result = np.array([1,0,0,0,0,0,0,0,0,0])
for i in ix:
cnt = contours[i]
xr,yr,wr,hr = cv2.boundingRect(cnt)
if xr< 20 :
xr = 25
if yr < 20:
yr = 25
cv2.rectangle(img,(xr-10,yr-10),(xr+wr+10,yr+hr+10), (0,255,0),2)
roi = th3[yr-20:yr+hr+20, xr-20:xr+wr+20]
roi_re=cv2.resize(roi,(8,8))
g = roi_re.reshape(1,64).astype('float32')
g = g/255
result= model_rf.predict(g)
#print(result)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Number: '+str(result),(xr-10,yr-10), font, 0.4, (255,0,0), 1, cv2.LINE_AA)
cv2.imshow('Threshold',th3)
cv2.imshow('orginal',img)
if cv2.waitKey(41) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
q to close
| 04 - Classification/Batch2/Number_Classification/06 - Classify Numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Compare sic codes and descriptions from various sources
# SIC codes and descriptions are available from a variety of sources. In this document, I compare lists of four-digit SIC codes from three different sources:
#
# 1. [OSHA website](https://www.osha.gov/pls/imis/sic_manual.html)
# 1. [SEC website](https://www.sec.gov/info/edgar/siccodes.htm)
# 1. [Scientific Telephone Samples website](http://www.stssamples.com/sic-code.asp)
# ### Key findings:
# * The SEC provides a different set of four-digit SIC codes:
# * It provides fewer overall codes (444 codes, compared to OSHA's 1005)
# * Some of the SIC codes it provides cannot be found in OSHA list - these are likely various aggregations of underlying four-digit SIC codes
# * The reference list of SIC codes shares all codes in common with OSHA, though some descriptions differ sightly
# ## Setup
# +
import sys
from os import path
path_notebooks = path.abspath('.')
path_base = path.dirname(path_notebooks)
path_src = path.join(path_base, 'src')
path_data = path.join(path_base, 'data')
path_tests = path.join(path_base, 'tests')
sys.path.insert(0, path_src)
import pickle
import pandas as pd
import scrape_sic_osha as scrape_osha
import scrape_sic_sec as scrape_sec
import nltk
from __future__ import division
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
# -
# ## Compare OSHA to SEC
# ### Clean and merge data
# +
# Read OSHA data
osha_fname = path.join(path_data, 'osha_combined')
if path.isfile(osha_fname + '.csv'):
osha = pd.read_csv(osha_fname + '.csv')
else:
scrape_osha.get_sic_all(out_fname=osha_fname)
osha = pd.read_csv(osha_fname + '.csv')
# Read SEC data
sec_fname = path.join(path_data, 'sec_combined.csv')
if path.isfile(sec_fname):
sec = pd.read_csv(sec_fname)
else:
scrape_sec.save_sic_sec(sec_fname)
sec = pd.read_csv(sec_fname)
# Merge OSHA and SEC data
inner = osha.merge(sec, how='inner', on='SIC4_cd')
# -
# ### Compare descriptions for each four-digit SIC code in common
# +
osha_desc = list(inner.SIC4_desc.str.lower().str.strip())
sec_desc = list(inner.industry_title.str.lower().str.strip())
match = []
for i in range(0, len(inner)):
# Identify direct matches
match_ind = sec_desc[i] == osha_desc[i]
if not(match_ind):
# Where not a direct match count the number of indirect matches
tokens_taged = nltk.pos_tag(nltk.word_tokenize(osha_desc[i]))
osha_words = [word[0] for word in tokens_taged]
sec_words = [word[0] for word in nltk.pos_tag(nltk.word_tokenize(sec_desc[i]))]
word_matches = [word[0] in sec_words for word in tokens_taged if word[1] != 'CC']
match_rate = sum(word_matches)/len(word_matches)
if match_rate > 0.3:
match_ind = True
match.append(match_ind)
# -
# ### Summary
# Nearly all of the shared four-digit SIC codes from the OSHA and SEC lists shared a similar description, on the basis of a direct match or an indirect (30% or more of words in common, excluding coordinating-conjunctions) match. Of mismatches, most can be attributed to punctuation, grammar or syntax (i.e., as opposed to reference to an entirely different industry).
# Identify match rate
print('{:.1f}% match rate'.format(sum(match)/len(inner) * 100))
# Identify specific mismatches
inner[[not(m) for m in match]]
# ## Compare OSHA to benchmark
#
# ### Clean and merge data
benchmark = pd.read_csv(path.join(path_tests, 'ref_list.csv'))
benchmark.columns = ['SIC4_cd', 'SIC4_desciption']
# ### Compare descriptions for each four-digit SIC code in common
# +
# Merge OSHA and benchmark data
inner = osha.merge(benchmark, how='inner', on='SIC4_cd')
osha_desc = list(inner.SIC4_desc.str.lower().str.strip())
benchmark_desc = list(inner.SIC4_desciption.str.lower().str.strip())
match = []
for i in range(0, len(inner)):
# Count direct matches
match_ind = benchmark_desc[i] == osha_desc[i]
if not(match_ind):
# Where not a direct match count the number of indirect matches
tokens_taged = nltk.pos_tag(nltk.word_tokenize(benchmark_desc[i].replace(', nec', '')))
osha_words = [word[0] for word in nltk.pos_tag(nltk.word_tokenize(osha_desc[i]))]
word_matches = [word[0] in osha_words for word in tokens_taged if word[1] != 'CC']
match_rate = sum(word_matches)/len(word_matches)
if match_rate > 0.3:
match_ind = True
match.append(match_ind)
# -
# ### Summary
# Nearly all of the shared four-digit SIC codes from the OSHA and SEC lists shared a similar description, on the basis of a direct match or an indirect (30% or more of words in common, excluding coordinating-conjunctions) match. Of mismatches, most can be attributed to punctuation, grammar or syntax (i.e., as opposed to reference to an entirely different industry).
# Identify match rate
print('{:.1f}% match rate'.format(sum(match)/len(inner) * 100))
# Identify specific mismatches
inner[[not(m) for m in match]]
| notebooks/compare_sic_lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Headers del archivo:
# %matplotlib inline
import time
import numpy as np
import matplotlib.pyplot as plt
def multp(N):
tMultp = 0.0
tSum = 0.0
A = np.random.randint(0,10,(N,N), dtype=np.int64)
B = np.random.randint(0,10,(N,N), dtype=np.int64)
C = np.zeros((N,N), dtype=np.int64)
for i in range(N):
for j in range(N):
for k in range(N):
t0 = time.clock()
mul = A[i,k]*B[k,j];
t1 = time.clock()
tMultp = tMultp + (t1 - t0)
t0 = time.clock()
C[i,j] = C[i,j] + mul
t1 = time.clock()
tSum = tSum + (t1 - t0)
#print(A)
#print(B)
#print(C)
return tMultp, tSum
def test(N):
X = np.arange(N)
Y = [multp(i) for i in range(N)]
plt.xlabel('N')
plt.ylabel('Tiempo(ms)')
plt.plot(X,Y)
plt.legend(['Multp','Sum']);
return Y
times = test(100)
times1 = test(50)
# Tiempo promedio por operación elemental:
def test1(N):
X = np.arange(N)
Y = [multp(i) for i in range(N)]
tAvg = [Y[0]] + [(Y[i][0]/i*i,Y[i][1]/i*i) for i in range(1,N)]
plt.xlabel('N')
plt.ylabel('Tiempo(ms)')
plt.title('Tiempo por operación elemental')
plt.plot(X,Y)
plt.legend(['Multp','Sum']);
for i in range(N):
print(i, tAvg[i])
test1(20)
| .ipynb_checkpoints/mattest-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
f=open("../lib/datasets/lists/PoseTrack/v1.0/posetrack_val.json","r")
dic=json.loads(f.read())
print(dic['images'][:10])
# -
print(dic["images"][0]["file_name"][:-12])
print(len([1163.6406 , 1177.6237 , 1171.6309 , 1193.6041 ,
1152.654 , 1229.5603 , 1127.6844 , 1246.5396 ,
1108.7076 , 1259.5238 , 1096.7222 , 1217.5748 ,
1143.665 , 1257.5261 , 1110.7052 , 1274.5055 ,
1095.7234 ]))
import numpy as np
array=np.asarray([[421.74918 , 417.76352 , 421.74918 , 431.71335 ,
407.79935 , 442.67395 , 388.86743 , 445.66318 ,
364.95343 , 457.62018 , 365.94983 , 415.7707 ,
380.8961 , 416.7671 , 361.96417 , 397.83517 ,
310.15048 ],
[336.91174 , 359.85703 , 320.9498 , 339.9046 ,
333.91888 , 380.80707 , 371.82846 , 435.6762 ,
422.70715 , 483.56204 , 447.64767 , 470.59296 ,
466.60248 , 530.4502 , 536.4359 , 598.28845 ,
526.4597 ],
[ 11.2089205 , 12.047041 , 9.988202 , 8.126896 ,
12.063157 , 5.9371266 , 7.615011 , 5.286614 ,
5.8725324 , 5.8890834 , 7.0978956 , 5.485666 ,
3.514296 , 3.1761117 , 3.4209828 , 4.630197 ,
2.045327 ],
[ 0.03764714, 0.04012923, 0.02225947, 0.02917847,
0.02939962, 0.01868254, 0.01908131, 0.02446137,
0.0148254 , 0.01913127, 0.0113323 , 0.01122672,
0.00835427, 0.00901907, 0.0187873 , 0.0153054 ,
0.01429355]])
# +
def compute_boxes_from_pose(poses):
"""
Args:
poses (list of list of list of floats):
list of poses in each frame, each list contains list of poses in
that frame, where each pose is a 17*3 element list (COCO style).
Returns:
boxes: (list of list of list of floats):
list of boxes in each frame, each list contains a list of boxes in
that frame, where each pose is [x, y, w, h] list.
Added by rgirdhar
"""
boxes = []
for frame_poses in poses:
if len(frame_poses) == 0:
boxes.append([])
continue
frame_boxes = []
frame_poses_np = np.array(frame_poses)
frame_poses_np = frame_poses_np.reshape((-1, 17, 3))
# only consider the points that are marked "2", i.e. labeled and visible
valid_pts = frame_poses_np[:, :, 2] == 2
for pose_id in range(frame_poses_np.shape[0]):
valid_pose = frame_poses_np[pose_id, valid_pts[pose_id], :]
# TODO(rgirdhar): Need to figure what to do here... Maybe just
# use the head box heuristic or something to proxy the box..
# For now just letting it get a random box
if valid_pose.shape[0] == 0:
frame_boxes.append([0, 0, 0, 0])
continue
# gen a xmin, ymin, xmax, ymax
box = np.array([
np.min(valid_pose[:, 0]),
np.min(valid_pose[:, 1]),
# The +1 ensures the box is at least 1x1 in size. Such
# small boxes will be later removed anyway I think
np.max(valid_pose[:, 0]) + 1,
np.max(valid_pose[:, 1]) + 1,
])
# Expand by 20%
box = expand_boxes(np.expand_dims(box, 0), 1.2)[0]
# conver to x,y,w,h; same as COCO json format (which is what it is
# in, at this point)
frame_boxes.append([
box[0], box[1], box[2] - box[0], box[3] - box[1]])
boxes.append(frame_boxes)
return boxes
print(array)
print(array.transpose([1,0]).shape)
array_process=array.transpose([1,0])[:,[0,1,3]]
print(array_process)
# -
def compute_boxes_from_pose(poses):
"""
Args:
poses (list of list of list of floats):
list of poses in each frame, each list contains list of poses in
that frame, where each pose is a 17*3 element list (COCO style).
Returns:
boxes: (list of list of list of floats):
list of boxes in each frame, each list contains a list of boxes in
that frame, where each pose is [x, y, w, h] list.
Added by rgirdhar
"""
boxes = []
for frame_poses in poses:
if len(frame_poses) == 0:
boxes.append([])
continue
frame_boxes = []
frame_poses_np = np.array(frame_poses)
frame_poses_np = frame_poses_np.reshape((-1, 17, 3))
# only consider the points that are marked "2", i.e. labeled and visible
valid_pts = frame_poses_np[:, :, 2] == 2
for pose_id in range(frame_poses_np.shape[0]):
valid_pose = frame_poses_np[pose_id, valid_pts[pose_id], :]
# TODO(rgirdhar): Need to figure what to do here... Maybe just
# use the head box heuristic or something to proxy the box..
# For now just letting it get a random box
if valid_pose.shape[0] == 0:
frame_boxes.append([0, 0, 0, 0])
continue
# gen a xmin, ymin, xmax, ymax
box = np.array([
np.min(valid_pose[:, 0]),
np.min(valid_pose[:, 1]),
# The +1 ensures the box is at least 1x1 in size. Such
# small boxes will be later removed anyway I think
np.max(valid_pose[:, 0]) + 1,
np.max(valid_pose[:, 1]) + 1,
])
# Expand by 20%
box = expand_boxes(np.expand_dims(box, 0), 1.2)[0]
# conver to x,y,w,h; same as COCO json format (which is what it is
# in, at this point)
frame_boxes.append([
box[0], box[1], box[2] - box[0], box[3] - box[1]])
boxes.append(frame_boxes)
return boxes
a=[]
b=a.append(5)
b=a
print(a,b)
# +
import numpy as np
ppGT={
"id": [6],
"x": [858.5],
"y": [395.5],
"is_visible": [1]
}
ppPr={
"id": [7],
"x": [897.5],
"y": [413.5],
"is_visible": [1]
}
rectGT={
"x1": [937],
"y1": [271],
"x2": [980],
"y2": [342]
}
def getHeadSize(x1,y1,x2,y2):
headSize = 0.6*np.linalg.norm(np.subtract([x2,y2],[x1,y1]));
return headSize
headSize = getHeadSize(rectGT["x1"][0], rectGT["y1"][0],
rectGT["x2"][0], rectGT["y2"][0])
pointGT = [ppGT["x"], ppGT["y"]]
pointPr = [ppPr["x"], ppPr["y"]]
dist = np.linalg.norm(np.subtract(pointGT, pointPr)) / headSize
print(np.subtract(pointGT, pointPr))
print( np.linalg.norm(np.subtract(pointGT, pointPr)))
print(dist)
# -
print(np.zeros([1, 17+ 1]).shape)
# +
import numpy.ma as ma
y = ma.array([1, 2, 3], mask = [0, 1, 0])
print(type(y))
print(y)
| tools/debug.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AequilibraE Routing
#
# Inputs: demand, network
#
# Outputs: shortest path skims, routing results
#
# ## Major steps
# 1. Set up Aequilibrae environment
# 2. Obtain the shortest path skim from the network
# 3. Run routing
# 4. Generate summary statistics
#
# ## Aequilibrae environment
#needs scipy, openmatrix (pip install)
import sys
from os.path import join
import numpy as np
import pandas as pd
import openmatrix as omx
from math import log10, floor
import matplotlib.pyplot as plt
from aequilibrae.distribution import GravityCalibration, Ipf, GravityApplication, SyntheticGravityModel
from aequilibrae import Parameters
from aequilibrae.project import Project
from aequilibrae.paths import PathResults
from aequilibrae.paths import SkimResults #as skmr
from aequilibrae.paths import Graph
from aequilibrae.paths import NetworkSkimming
from aequilibrae.matrix import AequilibraeData, AequilibraeMatrix
from aequilibrae import logger
from aequilibrae.paths import TrafficAssignment, TrafficClass
import logging
fldr = 'C:/Users/Scott.Smith/GMNS/Lima' #was aeqRepro
proj_name = 'Lima.sqlite' #the network comes from this sqlite database
dt_fldr = '0_tntp_data'
prj_fldr = '1_project'
skm_fldr = '2_skim_results'
assg_fldr = '4_assignment_results'
p = Parameters()
p.parameters['system']['logging_directory'] = fldr
p.write_back()
# Because assignment takes a long time, we want the log to be shown here
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s;%(name)s;%(levelname)s ; %(message)s")
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
# ## Shortest path skim
project = Project()
project.load(join(fldr, prj_fldr, proj_name))
# +
# we build all graphs
project.network.build_graphs()
# We get warnings that several fields in the project are filled with NaNs. Which is true, but we won't
# use those fields
# we grab the graph for cars
graph = project.network.graphs['c']
# let's say we want to minimize free_flow_time #distance
graph.set_graph('free_flow_time')
# And will skim time and distance while we are at it
graph.set_skimming(['free_flow_time', 'distance'])
# And we will allow paths to be compute going through other centroids/centroid connectors
# required for the Sioux Falls network, as all nodes are centroids
graph.set_blocked_centroid_flows(True)
# +
########## SKIMMING ###################
# setup the object result
res = SkimResults()
res.prepare(graph)
# And run the skimming
res.compute_skims()
# The result is an AequilibraEMatrix object
skims = res.skims
# We can export to OMX
skims.export(join(fldr, skm_fldr, 'sp_skim.omx')) #change for each run
# -
# ## Routing
# +
#### Open the matrix to get its size ####
f_demand = omx.open_file(join(fldr, dt_fldr, 'demand.omx'))
matrix_shape = f_demand.shape()
matrix_size = matrix_shape[1]
print('Base Skim Shape:',f_demand.shape(), "Size=",matrix_size)
print('Number of tables',len(f_demand))
print('Table names:',f_demand.list_matrices())
print('attributes:',f_demand.list_all_attributes())
f_demand.close()
# -
#### LOAD DEMAND MATRIX #####
demand = AequilibraeMatrix()
demand.load(join(fldr, dt_fldr, 'demand.omx'))
demand.computational_view(['matrix']) # We will only assign one user class stored as 'matrix' inside the OMX file
# +
######### TRAFFIC ASSIGNMENT WITH SKIMMING
assig = TrafficAssignment()
# Creates the assignment class
assigclass = TrafficClass(graph, demand)
# The first thing to do is to add at list of traffic classes to be assigned
assig.set_classes([assigclass])
assig.set_vdf("BPR") # This is not case-sensitive # Then we set the volume delay function
assig.set_vdf_parameters({"alpha": "b", "beta": "power"}) # Get parameters from link file
#assig.set_vdf_parameters({"alpha": 0.15, "beta": 4})
assig.set_capacity_field("capacity") # The capacity and free flow travel times as they exist in the graph
assig.set_time_field("free_flow_time")
# And the algorithm we want to use to assign
assig.set_algorithm('bfw')
#assig.set_algorithm('msa') #all-or-nothing
# since I haven't checked the parameters file, let's make sure convergence criteria is good
assig.max_iter = 100 #was 1000 or 100
assig.rgap_target = 0.001 #was 0.00001, or 0.01
assig.execute() # we then execute the assignment
# The link flows are easy to export.
# we do so for csv and AequilibraEData
assigclass.results.save_to_disk(join(fldr, assg_fldr, 'linkflow.csv'), output="loads") #change for each run
#assigclass.results.save_to_disk(join(fldr, assg_fldr, 'link_flows_c1.aed'), output="loads")
# the skims are easy to get.
# The blended one are here
avg_skims = assigclass.results.skims
# The ones for the last iteration are here
last_skims = assigclass._aon_results.skims
# Assembling a single final skim file can be done like this
# We will want only the time for the last iteration and the distance averaged out for all iterations
kwargs = {'file_name': join(fldr, assg_fldr, 'rt_skim'+'.aem'), #change
'zones': graph.num_zones,
'matrix_names': ['time_final', 'distance_blended']}
# Create the matrix file
out_skims = AequilibraeMatrix()
out_skims.create_empty(**kwargs)
out_skims.index[:] = avg_skims.index[:]
# Transfer the data
# The names of the skims are the name of the fields
out_skims.matrix['time_final'][:, :] = last_skims.matrix['free_flow_time'][:, :]
# It is CRITICAL to assign the matrix values using the [:,:]
out_skims.matrix['distance_blended'][:, :] = avg_skims.matrix['distance'][:, :]
out_skims.matrices.flush() # Make sure that all data went to the disk
# Export to OMX as well
out_skims.export(join(fldr, assg_fldr, 'rt_skim'+'.omx'))
demand.close()
# -
# ## Calculate summary statistics
#
# +
f = omx.open_file(join(fldr, dt_fldr, 'demand.omx'),'r') #change
print('DEMAND FILE Shape:',f.shape(),' Tables:',f.list_matrices(),' Mappings:',f.list_mappings())
dem = f['matrix']
spbf = omx.open_file(join(fldr, skm_fldr,'sp_skim.omx'),'r') #change
print('SP BASE SKIM FILE Shape:',spbf.shape(),' Tables:',spbf.list_matrices(),' Mappings:',spbf.list_mappings())
spbt = spbf['free_flow_time']
spbd = spbf['distance']
rtbf = omx.open_file(join(fldr, assg_fldr, 'rt_skim.omx'),'r')
print('RT BASE SKIM FILE Shape:',rtbf.shape(),' Tables:',rtbf.list_matrices(),' Mappings:',rtbf.list_mappings())
rtbt = rtbf['time_final']
rtbd = rtbf['distance_blended']
# -
#Summary information on the input trip tables
print('sum of demand trips','{:.9}'.format(np.sum(dem)))
# ### Skims as .csv files
# +
outfile = open("combined_skim.txt","w") #change
spb_cumtripcount = 0.0;
spb_cumtime = 0.0;
spb_cumdist = 0.0;
rtb_cumtime = 0.0;
rtb_cumdist = 0.0;
largeval = 999999;
#Shortest path base times and distances
print("i j demand sp_dist rt_dist sp_time rt_time",file=outfile)
for i in range(matrix_size):
tripcount = 0.0;
sp_timecount = 0.0;
sp_distcount = 0.0;
rt_timecount = 0.0;
rt_distcount = 0.0;
for j in range(matrix_size):
if(dem[i][j]>0):
tripcount = tripcount + dem[i][j]
sp_timecount = sp_timecount + dem[i][j]*spbt[i][j]
sp_distcount = sp_distcount + dem[i][j]*spbd[i][j]
rt_timecount = rt_timecount + dem[i][j]*rtbt[i][j]
rt_distcount = rt_distcount + dem[i][j]*rtbd[i][j]
print(i,j,dem[i][j],spbd[i][j],rtbd[i][j],spbt[i][j],rtbt[i][j],file=outfile)
#print("SP Base Row",i,'{:.6} {:.6} {:.6}'.format(tripcount,distcount,timecount),file=outfile)
spb_cumtripcount = spb_cumtripcount + tripcount;
spb_cumtime = spb_cumtime + sp_timecount;
spb_cumdist = spb_cumdist + sp_distcount;
rtb_cumtime = rtb_cumtime + rt_timecount;
rtb_cumdist = rtb_cumdist + rt_distcount;
#print("Row",i,tripcount,timecount,distcount)
#print("Shortest path base totals",'{:.8} {:.8} {:.8}'.format(cumtripcount,cumdist,cumtime),file=outfile)
#print("Shortest path base totals",'{:.8} {:.8} {:.8}'.format(spb_cumtripcount,spb_cumdist,spb_cumtime))
print(spb_cumtripcount,spb_cumdist,rtb_cumdist,spb_cumtime/60,rtb_cumtime/60)
outfile.close()
# -
# ## Alternative calculations using numpy array
sp_pht = np.array(dem)*np.array(spbt)/60
sp_pmt = np.array(dem)*np.array(spbd)
print('total pht',np.sum(sp_pht),' average per trip',np.sum(sp_pht)/np.sum(dem))
print('total pmt',np.sum(sp_pmt),' average per trip',np.sum(sp_pmt)/np.sum(dem))
rt_pht = np.array(dem)*np.array(rtbt)/60
rt_pmt = np.array(dem)*np.array(rtbd)
print('total pht',np.sum(rt_pht),' average per trip',np.sum(rt_pht)/np.sum(dem))
print('total pmt',np.sum(rt_pmt),' average per trip',np.sum(rt_pmt)/np.sum(dem))
# ## Close the files
f.close()
spbf.close()
rtbf.close()
outfile.close()
# +
| Small_Network_Examples/Lima/Route.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Ingestion of manual annotations of the HLCA and removal of low QC cells:
# In this notebook we add the manual annotations of the HLCA clusters to the HLCA object, and we will remove cells that were annotated as low quality (e.g. high mitochondrial count clusters, doublets, etc.). We will also do a comparison of original versus final (manual) annotations.
# ### Import modules and set paths:
# +
import scanpy as sc
import pandas as pd
import numpy as np
import sys
sys.path.append("../../scripts/")
import reference_based_harmonizing
# -
# For pretty code formatting (not necessary to run):
# %load_ext lab_black
# Figure parameters:
sc.set_figure_params(figsize=(5, 5))
# Paths:
path_input_HLCA = "../../data/HLCA_core_h5ads/HLCA_v1_intermediates/LCA_Bano_Barb_Jain_Kras_Lafy_Meye_Mish_MishBud_Nawi_Seib_Teic_log1p.h5ad"
path_output_HLCA = "../../data/HLCA_core_h5ads/HLCA_v1.h5ad"
path_manual_anns = "../../supporting_files/celltype_reannotation/manual_cluster_annotation_20210820.csv"
path_celltype_ref_mapping = "../../supporting_files/metadata_harmonization/HLCA_cell_type_reference_mapping_20211103.csv"
path_man_anns_levd_and_colors = "../../supporting_files/celltype_structure_and_colors/manual_anns_and_leveled_anns_ordered.csv"
path_grouped_man_anns_levd_and_colors = "../../supporting_files/celltype_structure_and_colors/manual_anns_grouped_order_and_colors.csv"
# ### Add manual annotations to HLCA:
# import atlas:
adata_full = sc.read(path_input_HLCA)
adata_full
# import manual annotations of clusters:
manual_annot_raw = pd.read_csv(path_manual_anns)
# import cell type reference:
harmonizing_df = reference_based_harmonizing.load_harmonizing_table(path_celltype_ref_mapping)
consensus_df = reference_based_harmonizing.create_consensus_table(harmonizing_df)
# update harmonized *original* annotations in the atlas (These are not the final manual annotations!!). This is only necessary if the cell type mapping has changed.
celltype_translation_df = (
reference_based_harmonizing.create_orig_ann_to_consensus_translation_df(
adata_full, consensus_df, harmonizing_df, verbose=False
)
)
adata_full = reference_based_harmonizing.consensus_annotate_anndata(
adata_full, celltype_translation_df, verbose=True
)
adata_full = reference_based_harmonizing.add_clean_annotation(adata_full)
# pre-process manual annotation table:
# pre-process manual annotation dataframe:
manual_annot = pd.DataFrame(index=manual_annot_raw.index)
# store the highest cluster level for each row
manual_annot["cl_level"] = np.sum(
~pd.isnull(manual_annot_raw.loc[:, [f"Leiden {lev}" for lev in range(1, 6)]]),
axis=1,
)
# store the matching cluster name:
manual_annot["cl"] = [
manual_annot_raw.loc[row, f"Leiden {lev}"]
for row, lev in zip(manual_annot.index, manual_annot["cl_level"])
]
# and the matching final annotation:
manual_annot["final_ann"] = manual_annot_raw["Final annotation"]
# and the matching coarse annotations:
manual_annot["final_ann_coarse"] = manual_annot_raw["Coarse final annotation"]
# convert final annotations to matching leveled annotations (level 1-5):
# +
final_ann_set = manual_annot.final_ann.unique()
final_anns_to_leveled_anns_df = pd.DataFrame(
index=final_ann_set, columns=[f"Level_{num}" for num in range(1, 6)] + ["ordering"]
)
def find_matching_leveled_ann(final_ann):
return [ann for ann in consensus_df.index if ann[2:] == final_ann]
final_ann_to_leveled_ann = {
final_ann: find_matching_leveled_ann(final_ann) for final_ann in final_ann_set
}
# -
# check where there's no translation, or two translations available (something is wrong there, unless these are clusters to be discarded, e.g. doublets):
for final_ann, leveled_anns in final_ann_to_leveled_ann.items():
if len(leveled_anns) == 0:
print(
final_ann, leveled_anns, "(setting translation to 'Unicorns_and_artefacts')"
)
final_ann_to_leveled_ann[final_ann] = "Unicorns_and_artefacts"
elif len(leveled_anns) > 1:
print(final_ann, leveled_anns)
print(
"THIS NEEDS TO BE FIXED!!! This annotation is present at more than 1 level in the reference!!"
)
final_ann_to_leveled_ann[final_ann] = leveled_anns[0]
else:
final_ann_to_leveled_ann[final_ann] = leveled_anns[0]
# create dfs with translations of manual annotations to leveled annotations
for final_ann in final_anns_to_leveled_anns_df.index:
if final_ann_to_leveled_ann[final_ann] == "Unicorns_and_artefacts":
final_anns_to_leveled_anns_df.loc[
final_ann, [f"Level_{num}" for num in range(1, 6)] + ["ordering"]
] = (["Unicorns_and_artefacts"] + 4 * ["1_Unicorns_and_artefacts"] + [1000])
else:
final_anns_to_leveled_anns_df.loc[
final_ann, [f"Level_{num}" for num in range(1, 6)]
] = consensus_df.loc[
final_ann_to_leveled_ann[final_ann], [f"level_{num}" for num in range(1, 6)]
].values
# get row location of this annotation in consensus_df,
# so that we can use the ordering from the consensus df:
final_anns_to_leveled_anns_df.loc[final_ann, "ordering"] = np.where(
consensus_df.index == final_ann_to_leveled_ann[final_ann]
)[0][0]
# sort df:
final_anns_to_leveled_anns_df.sort_values(by="ordering", ascending=True, inplace=True)
final_anns_to_leveled_anns_df.head(5)
# now add all this info to our adata:
# store original leveled annotations under "original_ann_level_[lev number]" (these are the harmonized original annotations, and not the corrected, manual annotations)
adata_full.obs.columns = [
col.replace("ann_level_", "original_ann_level_") for col in adata_full.obs.columns
]
# Check which cells are manually annotated at which clustering level (this will make mapping easier):
max_annotated_at_lev_3 = adata_full.obs.leiden_3.values.isin(manual_annot.cl.values)
max_annotated_at_lev_4 = adata_full.obs.leiden_4.values.isin(manual_annot.cl.values)
max_annotated_at_lev_5 = adata_full.obs.leiden_5.values.isin(manual_annot.cl.values)
adata_full.obs.loc[max_annotated_at_lev_3, "cluster_annotated"] = adata_full.obs.loc[
max_annotated_at_lev_3, "leiden_3"
]
adata_full.obs.loc[max_annotated_at_lev_4, "cluster_annotated"] = adata_full.obs.loc[
max_annotated_at_lev_4, "leiden_4"
]
adata_full.obs.loc[max_annotated_at_lev_5, "cluster_annotated"] = adata_full.obs.loc[
max_annotated_at_lev_5, "leiden_5"
]
# 0 cells should have None/nan:
sum(pd.isnull(adata_full.obs.cluster_annotated))
# This should add up to all cells:
sum(max_annotated_at_lev_3) + sum(max_annotated_at_lev_4) + sum(max_annotated_at_lev_5)
# i.e. should correspond to:
adata_full.n_obs
# generate cluster to manual ann mapping:
cl_to_manann = {
cl: manann for cl, manann in zip(manual_annot.cl, manual_annot.final_ann)
}
adata_full.obs["manual_ann"] = None
# map level 3 clusters to manann
adata_full.obs.loc[max_annotated_at_lev_3, "manual_ann"] = adata_full.obs.loc[
max_annotated_at_lev_3, "leiden_3"
].map(cl_to_manann)
# level 4 clusters
adata_full.obs.loc[max_annotated_at_lev_4, "manual_ann"] = adata_full.obs.loc[
max_annotated_at_lev_4, "leiden_4"
].map(cl_to_manann)
# and level 5 clusters
adata_full.obs.loc[max_annotated_at_lev_5, "manual_ann"] = adata_full.obs.loc[
max_annotated_at_lev_5, "leiden_5"
].map(cl_to_manann)
# plot to see if results make sense:
sc.pl.umap(adata_full, color="manual_ann", frameon=False)
# if wanted, check if all cell types are represented by more than one donor (with at least 10 cells of the cell type)
counts_per_subj_per_ct = adata_full.obs.groupby(["manual_ann", "subject_ID"]).agg(
{"subject_ID": "count"}
)
more_than_10_per_subj_per_ct = counts_per_subj_per_ct > 10
# This shows the number of donors with at least 10 cells of the cell type:
more_than_10_per_subj_per_ct.unstack().sum(axis=1).sort_values()[:5]
# Now also add leveled annotations for each cell (i.e. from level 1 to level 5 for every cell, based on final annotation which is somewhere in the hierarchy):
for lev in range(1, 6):
man_ann_to_lev_mapper = {
manann: levann
for manann, levann in zip(
final_anns_to_leveled_anns_df.index,
final_anns_to_leveled_anns_df[f"Level_{lev}"],
)
}
# delete old colors
if f"ann_level_{lev}_colors" in adata_full.uns.keys():
del adata_full.uns[f"ann_level_{lev}_colors"]
adata_full.obs[f"ann_level_{lev}"] = adata_full.obs.manual_ann.map(
man_ann_to_lev_mapper
)
# add clean annotations (without forward propagation of lower levels) and remove forward-propagated labels:
adata_full = reference_based_harmonizing.add_clean_annotation(adata_full)
for lev in range(1, 6):
del adata_full.obs[f"ann_level_{lev}"]
adata_full.obs.rename(
columns={f"ann_level_{lev}_clean": f"ann_level_{lev}"}, inplace=True
)
# Remove "Unicorns and Artefacts" (i.e. doublets, low QC etc.):
n_cells_before = adata_full.n_obs
adata_full = adata_full[
adata_full.obs.ann_level_1 != "Unicorns_and_artefacts", :
].copy()
n_cells_after = adata_full.n_obs
print("Cells removed:", n_cells_before - n_cells_after)
# ### Re-embed (neighbor graph and umap) after removing cells based on manual annotations (incl. e.g. doublets):
sc.pp.neighbors(adata_full, n_neighbors=30, use_rep="X_scanvi_emb")
sc.tl.umap(adata_full)
adata_full.obsm["X_umap_scanvi"] = adata_full.obsm["X_umap"]
sc.pl.umap(
adata_full,
color=[f"ann_level_{n}" for n in range(1, 6)],
frameon=False,
ncols=1,
)
# ### Add manual annotation coarse:
# This annotation, which was also determined manually, is a coarsified version of the final annotations, and will be used for e.g. GWAS mapping to HLCA cell types, and modeling of effects of age, sex etc. on cell types. Each of these coarse annotations is part of the 5-level hierarchical cell-type reference.
fine_ann_to_coarse = {
fine: coarse
for fine, coarse in zip(manual_annot.final_ann, manual_annot.final_ann_coarse)
}
# sanity check: check if there's not an accidental one-to-many mapping:
for fine, coarse in zip(manual_annot.final_ann, manual_annot.final_ann_coarse):
if fine_ann_to_coarse[fine] != coarse:
print(
f"There's a one-to-many mapping, check this! Fine: {fine}, coarse: {coarse}"
)
# Add coarse annotations to adata:
adata_full.obs["manual_ann_grouped"] = adata_full.obs.manual_ann.map(fine_ann_to_coarse)
# And plot:
sc.pl.umap(adata_full, color="manual_ann_grouped")
# ### Generate colors for all manual annotations, and store:
# Delete existing color map, as this was generated (for umap above) for all manual annotations, including low QC and doublet clusters. We can generate a color map with fewer colors after removing the cells above.
del adata_full.uns["manual_ann_colors"]
# Order the remaining manual annotations (i.e. not the doublets etc.) in a biologically sensible order, i.e. using the order of the hierarchical cell type reference.
manual_ann_ordered = [
manann
for manann in final_anns_to_leveled_anns_df.index.tolist()
if manann in adata_full.obs.manual_ann.unique()
]
# Also re-order categories in manual ann adata column accordingly:
adata_full.obs.manual_ann.cat.reorder_categories(manual_ann_ordered, inplace=True)
# Do the same for the "grouped" manual annotations (i.e. the coarsified annotations), based on the ordered fine annotations:
del adata_full.uns["manual_ann_grouped_colors"]
manual_ann_grouped_ordered = list()
for manann in manual_ann_ordered:
grouped_manann = fine_ann_to_coarse[manann]
if not grouped_manann in manual_ann_grouped_ordered:
manual_ann_grouped_ordered.append(grouped_manann)
adata_full.obs.manual_ann_grouped.cat.reorder_categories(
manual_ann_grouped_ordered, inplace=True
)
# Plot umap for both, which autmoatically generated a colormap:
sc.pl.umap(adata_full, color="manual_ann", frameon=False)
sc.pl.umap(adata_full, color="manual_ann_grouped", frameon=False)
# store color mapping in dataframe:
colors = adata_full.uns["manual_ann_colors"]
man_ann_to_color = {man_ann: col for man_ann, col in zip(manual_ann_ordered, colors)}
final_anns_to_leveled_anns_df["colors"] = final_anns_to_leveled_anns_df.index.map(
man_ann_to_color
)
colors_grouped_manann = adata_full.uns["manual_ann_grouped_colors"]
man_ann_grouped_to_color = {
mananngr: col
for mananngr, col in zip(manual_ann_grouped_ordered, colors_grouped_manann)
}
mananngrouped_df = pd.DataFrame(index=manual_ann_grouped_ordered)
mananngrouped_df["color"] = mananngrouped_df.index.map(man_ann_grouped_to_color)
# store order and colors of annotations
# remove annotations that were discarded (e.g. "Doublets")
final_anns_to_leveled_anns_df = final_anns_to_leveled_anns_df.loc[
[
manann
for manann in final_anns_to_leveled_anns_df.index
if manann in adata_full.obs.manual_ann.unique()
],
:,
]
final_anns_to_leveled_anns_df.to_csv(path_man_anns_levd_and_colors)
mananngrouped_df.to_csv(path_grouped_man_anns_levd_and_colors)
# ## Quantify re-annotations:
# Check which cells, according to the manual reannotation, were correctly annotated, incorrectly annotated, or underannotated.
# First, generate two empty dictionaries that will contain the mapping of the manual annotations to the leveled annotations, plus the level of each manual annotation in the hierarchical cell type reference. This will allow us to match the (harmonized) original with the final annotations.
manann2refann = dict()
manann2level = dict()
# check to which level the manual annotations mapped (also check for slight changes in writing, e.g. different capitalization or underscores versus spaces)
for ct in adata_full.obs.manual_ann.unique():
ct_found = False
for level in [2, 3, 4, 5]:
level_cts = adata_full.obs[f"ann_level_{level}"].unique()
if not ct_found:
for level_ct in level_cts:
if ct.lower() == level_ct.lower():
manann2refann[ct] = level_ct
manann2level[ct] = level
ct_found = True
elif ct.replace("_", " ").lower() == level_ct.lower():
manann2refann[ct] = level_ct
manann2level[ct] = level
ct_found = True
elif ct.replace("_", " ").lower().strip("s") == level_ct.lower():
manann2refann[ct] = level_ct
manann2level[ct] = level
ct_found = True
if ct_found == True:
if manann2refann[ct] != ct:
print(ct)
if ct_found == False:
print(f"{ct} not found")
# function to determine the reannotation type for every cell. We distinguish four types:
# - "correctly annotated", i.e. the original annotation was at least as detailed as the final annotation, and corresponds to the final annotation at the final annotation's level.
# - "underannotated, correct", i.e. the original annotation was less detailed than the final annotations, and corresponds to the final annotation at a lower level than the final annotation's level
# - "underannotated, incorrect", i.e. the original annotation was less detailed than the final annotation, and does not correspond to the final annotation at that lower level (this will be called "misannotated in figures and paper)
# - "misannotated", i.e. the original annotation was equally or more detailed than the final annotation, but does not corresond to the final annotation at the final annotation's level.
def get_cell_reannotation_type(
manann, refann, manann2refann, manann2level, consensus_df
):
manann_matched = manann2refann[manann]
if manann_matched == refann:
# if manual annotation and original annotation are the same,
# then return correctly annotated
return "correctly annotated"
elif refann[:2] in ["1_", "2_", "3_", "4_"]:
# if the original annotation has a 1_, 2_ etc. prefix at the level
# of the manual annotation, then it was not annotated at this level.
# In that case, check if the annotation at lower levels was correct.
refann_level = refann[0]
# check what the annotation at the refann_level is for the manual
# annotation.
manann_at_refann_level = consensus_df.loc[
f"{manann2level[manann]}_{manann_matched}", f"level_{refann_level}"
]
# if the refann has the correct under-annotation, then return:
if refann[2:] == manann_at_refann_level:
return "underannotated, correct"
# otherwise, it was misannotated
else:
return "underannotated, incorrect"
# if manann does not match refann, but it doesn't have a prefix,
# then it was misannotated
else:
return "misannotated"
# Calculate "reannotation type" for all cells in the HLCA:
adata_full.obs["reannotation_type"] = [
get_cell_reannotation_type(
manann=manann,
refann=adata_full.obs.loc[cell, f"original_ann_level_{manann2level[manann]}"],
manann2refann=manann2refann,
manann2level=manann2level,
consensus_df=consensus_df,
)
for cell, manann in zip(adata_full.obs.index, adata_full.obs.manual_ann)
]
# And plot:
sc.pl.umap(adata_full, color="reannotation_type")
# ### Store final adata:
adata_full.write(path_output_HLCA)
| notebooks/1_building_and_annotating_the_atlas_core/07_manual_ann_ingestion_and_removal_of_doublets_etc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Nibelungenlied and Völsunga saga
# Not only do the medieval Germanic people share a common ancestor concerning their languages, but they also have a common hero celebrated in several stories (poems, sagas).
#
# * **Völsunga saga** (*saga* written in Old Norse around the XIII century)
# * **Nibelungenlied** (poem written in Middle High German around the XII century)
# * the heroic lays of the **Poetic Edda** (poems written in Old Norse like **Grípisspá**, **Reginsmál**, **Fáfnismál**, **Sigrdrífumál**)
#
# Characters in such stories look similar. It would be insteresting to analyse how much they look alike. Are the characters' nouns related? Do the characters have similar relationships with each other.
#
#
# From a statistical point of view, we can set a null hypothesis: "Characters' relationships with each other look alike in **Völsunga saga** and in **Nibelungenlied**" and an alternative hypothesis: "Characters' relationships with each other differ in **Völsunga saga** and in **Nibelungenlied**"
#
#
# In other words, we will study in what extent **Völsunga saga** and **Nibelungenlied** differ.
#
#
# ## 1. Loading texts
# ### 1.1 Völsunga saga
# #### 1.1.1 Reading the text
# Import the code to load the text.
import norsecorpus.reader as ncr
# See which texts are available.
available_texts = ncr.get_available_texts()
print(available_texts.keys())
# Load the selected text.
volsunga_saga = ncr.read_tei_words(available_texts["volsunga.xml"])
# Check what was loaded.
print(volsunga_saga[0][0][0])
# We can see that the imported has a structure.
#
# Medival copists wrote the text without structure [see the original manuscript](https://handrit.is/en/manuscript/view/AM02-0006). The text structure was made later by philologists.
#
# Text => chapters. Chapter => paragraphs. Paragraph => sentences. Sentence => words. Word => character.
#
# Text: `volsunga_saga`
# +
# print(volsunga_saga)
# -
# Chapter: `volsunga_saga[i_chapter]`
# +
# print(volsunga_saga[0])
# -
# Paragraph: `volsunga_saga[i_chapter][j_paragraph]`
# +
# print(volsunga_saga[0][0])
# -
# Sentence: `volsunga_saga[i_chapter][j_paragraph][k_sentence]`
# +
# print(volsunga_saga[0][0][0])
# -
# Word (or more precisely token): `volsunga_saga[i_chapter][j_paragraph][k_sentence][l_token]`
print(volsunga_saga[0][0][0][0])
# -----------------------------------
# This is an ideal case where we have available code to read the structure of a text. Most of the time, it is necessary to:
# - retrieve the text from a source,
# - clean the text
# - give a structure to the text (chapter, paragraph, sentence, word).
# #### 1.1.2 Remove redundant information
#
# Removal of stop words (words which appear in all kinds of texts)
from cltk.stop.old_norse.stops import STOPS_LIST
# +
# {word for word in capital_words if word.lower() not in STOPS_LIST}
# -
STOPS_LIST[10:20]
# It is often needed to remove stop words because they cannot discriminate
# ### 1.2 Loading Nieblungenlied (Augburg's corpus)
# We import the functions to read the Nibelungenlied
import sigurd.nib_augsburg.nib_reader as nibaugr
# `nibaugr.MAIN_LINKS` is the list of links to the different manuscription transcription of Nibelungenlied. The link is also the pointer to the stored text.
nibaugr.MAIN_LINKS[0]
# `nibaugr.read_tei` extracts the content from a TEI-compliant XML file.
nibelungenlied_aug = nibaugr.read_tei(nibaugr.MAIN_LINKS[0])
# (Explain why it is relevant to remove stop words from texts.)
#
# Less resourced languages often do not have many annotations and a stop word list is not always available. CLTK provides a class to extract stop words according to some criteria.
from cltk.stop import stop
mhg_stop_list = stop.BaseCorpusStoplist()
string_nibelungenlied_aug = [" ".join([" ".join(long_line) for long_line in chapter])
for chapter in nibelungenlied_aug]
# mhg_stop_list.build_stoplist(string_nibelungenlied_aug)
from cltk.stop.middle_high_german.stops import STOPS_LIST
STOPS_LIST[:20]
"von" in STOPS_LIST
nibelungenlied_aug[0][:10]
[[[word for word in half_line.split(" ") if word.lower() not in STOPS_LIST] for half_line in long_line] for long_line in nibelungenlied_aug[0][:10]]
# ## 2. Analysis of vocabulary
# Which words are more likely associated to the main characters?
#
# * See POS tagging and lemmatization.
# * Adjectives are more that are associated to the main character are more likely to describe him.
# The **method**:
#
# - using the POS tagger to detect words which are adjectives,
# - keep adjectives which are close to a main character,
# - keep adjectives which are inflected according to the character.
#
# The **main issue** is how words are spelled:
#
# - the complete text is not normalized, whereas the POS tagger for Middle High German was trained on normalized annotated texts,
# - the complete text is normalized, whereas the POS tagger for Old Norse was trained on Icelandic spelled texts.
#
# This part has not been implemented.
# ## 3. Tracking characters
#
#
# ### 3.1 Sigurðr's relationships
#
# I chose to analyse Sigurðr's relationships because this character is present in the Nibelungenlied and in the Völsunga saga and is prominent.
# #### 3.1.1 Proper nouns extraction
#
# The main feature of proper nouns is that... their first character is a capital character. (Just look at the name of your city, your first name and your family name).
#
# However, the first word of every sentence has also this feature. The idea is to keep the words with this feature without the ones which are after a punctuation marking a new sentence/sequence. We hope that we don't lose many proper nouns.
capital_words = set()
sentence_delimiters = "?!.:``''\""
all_words = [word for chapter in volsunga_saga for para in chapter for sentence in para for word in sentence]
for i in range(1, len(all_words)):
if all_words[i-1][-1] not in sentence_delimiters and all_words[i] and all_words[i][0].isupper():
capital_words.add(all_words[i])
# +
# capital_words
# -
# We got proper nouns in the tex with their different attested inflections.
#
# Some are humans, some are dwarves, some are gods.
# +
from collections import defaultdict
def common_prefix(s1, s2):
"""
>>> len_common_prefix("bonjour", "bonsoir")
"bon"
"""
len_1 = len(s1)
len_2 = len(s2)
min_1_2 = min(len_1, len_2)
i = 0
while i < min_1_2:
if s1[i] != s2[i]:
break
i += 1
return i, s1[:i]
def find_paradigms(words):
common_words = defaultdict(set)
for i, w1 in enumerate(words):
for j in range(i+1, len(words)):
common_words[w1].add(w1)
w2 = words[j]
len_1_2, cw_1_2 = common_prefix(w1, w2)
if len(w1) + 2 < len(w2) or len(w2) + 2 < len(w1):
pass
elif(len(w1) <= len_1_2*1.4 and len(w2) <= len_1_2*1.4): # (len_1_2 < 2 and len(w1) < 4 and len(w2) < 4) or
common_words[cw_1_2].add(w1)
common_words[cw_1_2].add(w2)
common_words = {common_word: common_words[common_word] for common_word in common_words
if common_word not in [co for cw in common_words if cw != common_word for co in common_words[cw]]}
return common_words
proper_nouns_paradigms = find_paradigms(list(capital_words))
volsunga_characters = {inflected_form: proper_noun_paradigm for proper_noun_paradigm in proper_nouns_paradigms
for inflected_form in proper_nouns_paradigms[proper_noun_paradigm]}
# print(len(proper_nouns_paradigms))
# print(proper_nouns_paradigms)
# print(volsunga_characters)
# -
# #### 3.1.2 Ego graph of Sigurðr
# From "Applied Text Analysis with Python"
import networkx as nx
import matplotlib.pyplot as plt
import itertools
def one_of_them_in(l1, l2):
for i in l1:
if i in l2:
return True
return False
def cooccurrence_vol(text, characters):
possible_pairs = list(itertools.combinations(list(characters.keys()), 2))
cooccurring = dict.fromkeys(possible_pairs, 0)
for chapter in text:
for para in chapter:
for sent in para:
for pair in possible_pairs:
if one_of_them_in(characters[pair[0]], sent) and one_of_them_in(characters[pair[1]], sent):
# cooccurring[(characters[pair[0]], characters[pair[1]])] += 1
cooccurring[pair] += 1
return cooccurring
def cooccurrence_nib_aug(text, characters):
possible_pairs = list(itertools.combinations(list(characters.keys()), 2))
cooccurring = dict.fromkeys(possible_pairs, 0)
for chapter in text:
# for long_line in chapter:
for pair in possible_pairs:
if one_of_them_in(characters[pair[0]], " ".join([" ".join(long_line) for long_line in chapter])) and one_of_them_in(characters[pair[1]], " ".join([" ".join(long_line) for long_line in chapter])):
# cooccurring[(characters[pair[0]], characters[pair[1]])] += 1
cooccurring[pair] += 1
return cooccurring
# Variants of Völsung.
proper_nouns_paradigms['Völsung']
# - 'Völsungr': nominative singular
# - 'Völsung': accusative singular
# - 'Völsungi': dative singular
# - 'Völsungs': genitive singular
# - 'Völsungar': nominative and accusative plural
# - 'Völsunga': genitive plural
# +
g_vol = nx.Graph()
g_vol.name = "Relationships of Sigurðr"
pairs = cooccurrence_vol(volsunga_saga, proper_nouns_paradigms)
for pair, weight in pairs.items():
if weight > 1:
g_vol.add_edge(pair[0], pair[1], weight=weight)
sigurdr = nx.ego_graph(g_vol, "Sigurð")
edges, weights = zip(*nx.get_edge_attributes(sigurdr, "weight").items())
pos = nx.spring_layout(sigurdr, k=0.5, iterations=40)
nx.draw(sigurdr, pos, node_color="gold", node_size=50, edgelist=edges, width=0.5,
edge_color="orange", with_labels=True, font_size=12)
plt.show()
# -
# - Sigurðr is at the center of the graph.
# - We can see a close relationship between Sigurðr and Brynhildr, one of his lovers, and a more distant relationship with Gudrun.
# - Ennemies of Sigurðr are Fafni (the dragon), Regin (Fafni's brother).
# - Sigurðr's ancestors: Sigmund, Völsung
# - A sword named Gram was used to kill Regin
# #### 3.1.3 Ego graph of Guðrún
# +
g_vol = nx.Graph()
g_vol.name = "Relationships of Guðrún"
pairs = cooccurrence_vol(volsunga_saga, proper_nouns_paradigms)
for pair, weight in pairs.items():
if weight > 1:
g_vol.add_edge(pair[0], pair[1], weight=weight)
gudrun = nx.ego_graph(g_vol, "Guðrún")
edges, weights = zip(*nx.get_edge_attributes(gudrun, "weight").items())
pos = nx.spring_layout(gudrun, k=0.5, iterations=40)
nx.draw(gudrun, pos, node_color="gold", node_size=50, edgelist=edges, width=0.5,
edge_color="orange", with_labels=True, font_size=12)
plt.show()
# -
# ### 3.2 Sigfried's relationships
# #### 3.2.1 Proper nouns extraction
# For this text, I extracted manually the places and the characters' names.
nib_names = nibaugr.read_names()
# Variants of Brünhild in the manuscript C.
nib_names['Brünhild']
# #### 3.2.2 Ego graph of Siegfried
# +
g_nib_aug = nx.Graph()
g_nib_aug.name = "Relationships of Siegfried"
pairs = cooccurrence_nib_aug(nibelungenlied_aug, nib_names)
for pair, weight in pairs.items():
if weight > 2:
g_nib_aug.add_edge(pair[0], pair[1], weight=weight)
siegfried = nx.ego_graph(g_nib_aug, "Siegfried")
edges, weights = zip(*nx.get_edge_attributes(siegfried, "weight").items())
pos = nx.spring_layout(siegfried, k=0.5, iterations=40)
nx.draw(siegfried, pos, node_color="gold", node_size=50, edgelist=edges, width=0.5,
edge_color="orange", with_labels=True, font_size=12)
plt.show()
# -
# It is less visible here. It seems that there are more characters.
# It has to be noted that Kriemhild and Gudrun are actually the same characters. Their names do not help recognize this fact, but they are both married with Siegfried/Sigurdr and
# #### 3.2.2 Ego graph of Kriemhild
# +
g_nib_aug = nx.Graph()
g_nib_aug.name = "Relationships of Kriemhild"
pairs = cooccurrence_nib_aug(nibelungenlied_aug, nib_names)
for pair, weight in pairs.items():
if weight > 3:
g_nib_aug.add_edge(pair[0], pair[1], weight=weight)
kriemhild = nx.ego_graph(g_nib_aug, "Kriemhild")
edges, weights = zip(*nx.get_edge_attributes(kriemhild, "weight").items())
pos = nx.spring_layout(kriemhild, k=0.5, iterations=40)
nx.draw(kriemhild, pos, node_color="gold", node_size=50, edgelist=edges, width=0.5,
edge_color="orange", with_labels=True, font_size=12)
plt.show()
# -
# ## Conclusion
# We could see similarities and differencies between two medival works written in two different but related languages.
# If we know enough how our data are built, then it is possible to automate processes and analyze the outputs.
# -----------------------------
# By <NAME>, CLTK contributor ([www.clementbesnier.fr](https://www.clementbesnier.fr/), [github](https://github.com/clemsciences), [twitter](https://twitter.com/clemsciences)).
| sigurd/notebooks/.ipynb_checkpoints/sigfried_or_sigurdr-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import dataimport
data = dataimport.Dataimport("rawData/tidy.csv","rawData/Metadata.csv")
df = data.dataframe()
df
print('Total rows in the dataset: \n',data.count())
for name in data.columns:
if len(data[name].unique()) < 5:
print(name ,
'\n',
' Labels: ',
data[name].unique(),
' Unique labels: ',
len(data[name].unique()))
else:
print(name ,
'\n',
' Unique labels: ',
len(data[name].unique()))
# ### We want to check if our dataset has balanced data
def countplot(columnname):
datanew = data[columnname].astype('category')
datanew.value_counts().plot(kind='bar') # Equal set size for both columns used
data['Column'] = data['Column'].astype('category')
data.Column.value_counts().plot(kind='bar') # Equal set size for both columns used
countplot('Group')
plt.plot(data['conc_thpa_ugl'])
countplot('mars14') # Type of a protein extraction column
countplot('sepromix20')
countplot('status')
data['proteinName'] = data['proteinName'].astype('category')
data['proteinName'].value_counts().plot(kind='hist')
new = data.iloc[:, 1:3].groupby(['proteinName', 'Group']).size()
new[:30]
data.Group.value_counts()
data.proteinName.count()
new.groupby('Group').value_counts()
# +
# 622 Proteins have 4 samples per Group, 1271 Proteins have 2 samples per Group
# -
f = plt.figure(figsize=(20,20))
plt.matshow(data.corr(), fignum= f.number)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16)
# +
import seaborn as sns
h_labels = [x.replace('_', ' ').title() for x in
list(data.select_dtypes(include=['number', 'bool']).columns.values)]
fig, ax = plt.subplots(figsize=(20,20))
sns.heatmap(data.corr(),
annot=True,
cmap=sns.cubehelix_palette(as_cmap=True),
xticklabels=h_labels, yticklabels=h_labels,
ax=ax)
# -
| dataPreprocessingAnalysis.ipynb |
# ---
# title: "Dictionary Basics"
# author: "<NAME>"
# date: 2017-12-20T11:53:49-07:00
# description: "Dictionary basics in Python."
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Basics
# - Not sequences, but mappings. That is, stored by key, not relative position.
# - Dictionaries are mutable.
# ### Build a dictionary via brackets
unef_org = {'name' : 'UNEF',
'staff' : 32,
'url' : 'http://unef.org'}
# ### View the variable
unef_org
# # Build a dict via keys
who_org = {}
who_org['name'] = 'WHO'
who_org['staff'] = '10'
who_org['url'] = 'http://who.org'
# ### View the variable
who_org
# ## Nesting in dictionaries
# ### Build a dictionary via brackets
unitas_org = {'name' : 'UNITAS',
'staff' : 32,
'url' : ['http://unitas.org', 'http://unitas.int']}
# ### View the variable
unitas_org
# ## Index the nested list
# ### Index the second item of the list nested in the url key.
unitas_org['url'][1]
| docs/python/basics/dictionary_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from datetime import datetime
import numpy as np
import pandas as pd
# -
def split_industries():
df = pd.read_csv(FAMA_49CRSP)
industries = set(df['FFI49_desc'])
for ind in industries:
df_ind = df[df['FFI49_desc'] == ind]
df_ind = df_ind.drop(labels='FFI49_desc', axis=1)
df_ind.to_csv('industries/{}.csv'.format(ind))
return industries
| .ipynb_checkpoints/Preprocessing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Softmax regression in sklearn
# +
from IPython.display import Image
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# -
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.feature_selection import SelectKBest, mutual_info_classif
# +
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c',
'#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b',
'#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09']
cmap = mcolors.LinearSegmentedColormap.from_list("", ["#82cafc", "#069af3", "#0485d1", colors[0], colors[8]])
# +
import urllib.request
filepath = "../dataset/"
url = "https://tvml.github.io/ml1920/dataset/"
def get_file(filename,local):
if local:
return filepath+filename
else:
urllib.request.urlretrieve (url+filename, filename)
return filename
# +
# legge i dati in dataframe pandas
data = pd.read_csv(get_file("iris.csv", False), delimiter=';')
# calcola dimensione dei dati
n = len(data)
# calcola dimensionalità delle features
nfeatures = len(data.columns)-1
X = np.array(data[['sepal_length','sepal_width']])
t = np.array(data['class']).reshape(-1,1)
# +
encoder = LabelEncoder()
t = encoder.fit_transform(t)
# split dataset in train and test sets
X_train, X_test, t_train, t_test = train_test_split(X, t, test_size=0.3)
# -
X_train.shape
# +
scaler = StandardScaler()
logreg = LogisticRegression()
model = Pipeline([('scale', scaler), ('clf', logreg)])
model.set_params(clf__C=1e5)
model = model.fit(X_train, t_train)
# -
delta1=max(X[:,0])-min(X[:,0])
delta2=max(X[:,1])-min(X[:,1])
min1=min(X[:,0])-delta1/10
max1=max(X[:,0])+delta1/10
min2=min(X[:,1])-delta2/10
max2=max(X[:,1])+delta2/10
u = np.linspace(min1, max1, 1000)
v = np.linspace(min2, max2, 1000)
u, v = np.meshgrid(u, v)
z = model.predict(np.c_[u.ravel(), v.ravel()])
p = model.predict_proba(np.c_[u.ravel(), v.ravel()])
z = z.reshape(u.shape)
p0 = p[:,0].reshape(u.shape)
p1 = p[:,1].reshape(u.shape)
p2 = p[:,2].reshape(u.shape)
X_s, t_s=X, t
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
ax.imshow(p0, origin='lower', extent=(min1, max1, min2, max2), alpha=.3, aspect='auto')
plt.contour(u, v, p0, [0.5], colors=colors[8])
X0 = np.compress(t_s==0, X_s, axis=0)
X1 = np.compress(t_s==1, X_s, axis=0)
X2 = np.compress(t_s==2, X_s, axis=0)
ax.scatter(X0[:, 0], X0[:, 1], s=40, c=colors[0], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
ax.scatter(X1[:, 0], X1[:, 1], s=40, c=colors[2], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
ax.scatter(X2[:, 0], X2[:, 1], s=40, c=colors[1], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
plt.xlabel('Petal length', fontsize=10)
plt.ylabel('Petal width', fontsize=10)
plt.xlim(min1, max1)
plt.ylim(min2, max2)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Classe 0')
plt.show()
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
ax.imshow(p1, origin='lower', extent=(min1, max1, min2, max2), alpha=.3, aspect='auto')
plt.contour(u, v, p1, [0.5], colors=colors[8])
X0 = np.compress(t_s==0, X_s, axis=0)
X1 = np.compress(t_s==1, X_s, axis=0)
X2 = np.compress(t_s==2, X_s, axis=0)
ax.scatter(X0[:, 0], X0[:, 1], s=40, c=colors[0], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
ax.scatter(X1[:, 0], X1[:, 1], s=40, c=colors[2], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
ax.scatter(X2[:, 0], X2[:, 1], s=40, c=colors[1], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
plt.xlabel('Petal length', fontsize=10)
plt.ylabel('Petal width', fontsize=10)
plt.xlim(min1, max1)
plt.ylim(min2, max2)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Classe 1')
plt.show()
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
ax.imshow(p2, origin='lower', extent=(min1, max1, min2, max2), alpha=.3, aspect='auto')
plt.contour(u, v, p2, [0.5], colors=colors[8])
X0 = np.compress(t_s==0, X_s, axis=0)
X1 = np.compress(t_s==1, X_s, axis=0)
X2 = np.compress(t_s==2, X_s, axis=0)
ax.scatter(X0[:, 0], X0[:, 1], s=40, c=colors[0], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
ax.scatter(X1[:, 0], X1[:, 1], s=40, c=colors[2], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
ax.scatter(X2[:, 0], X2[:, 1], s=40, c=colors[1], edgecolor='k', marker= 'o', lw=.7, cmap=cmap)
plt.xlabel('Petal length', fontsize=10)
plt.ylabel('Petal width', fontsize=10)
plt.xlim(min1, max1)
plt.ylim(min2, max2)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Classe 2')
plt.show()
y = model.predict(X_train)
y_t = model.predict(X_test)
print(confusion_matrix(y,t_train))
print(confusion_matrix(y_t,t_test))
print(classification_report(y,t_train))
print(classification_report(y_t,t_test))
print(accuracy_score(y,t_train))
print(accuracy_score(y_t,t_test))
X = np.array(data[data.columns[:-1]])
X.shape
t = encoder.fit_transform(t)
model.set_params(clf__C=1e5)
model = model.fit(X, t)
y = model.predict(X)
print(confusion_matrix(y,t))
print(classification_report(y,t))
print(accuracy_score(y,t))
fs = SelectKBest(mutual_info_classif, k=1).fit(X, y)
fs.get_support()
X_new = fs.transform(X)
X_new.shape
model.set_params(clf__C=1e5)
y = model.fit(X_new, t).predict(X_new)
print(accuracy_score(y,t))
accs = []
for k in range(1,5):
X_new = SelectKBest(mutual_info_classif, k=k).fit_transform(X, y)
y = model.fit(X_new, t).predict(X_new)
accs.append(accuracy_score(y,t))
accs
domain = np.linspace(5.9,6,100)
param_grid = [{'C': domain, 'penalty': ['l1','l2']}]
r = LogisticRegression()
clf = GridSearchCV(r, param_grid, cv=10, scoring='accuracy')
clf = clf.fit(X,t)
scores = clf.cv_results_['mean_test_score']
clf.best_params_['C']
clf.best_params_['penalty']
y = clf.predict(X)
print(confusion_matrix(y,t))
print(classification_report(y,t))
print('{0:3.5f}'.format(accuracy_score(y,t)))
| codici/.ipynb_checkpoints/softmax-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: ''
# name: pysparkkernel
# ---
# start context
spark
# Imports
import sparknlp_jsl
from sparknlp_jsl.annotator import *
from sparknlp_jsl import start
from pyspark.ml import PipelineModel
from sparknlp_jsl.annotator import *
from sparknlp.base import *
# +
# Sample Healthcare pipe
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("ner_chunk")
sbert_embedder = BertSentenceEmbeddings\
.pretrained('sbiobert_base_cased_mli', 'en','clinical/models')\
.setInputCols(["ner_chunk"])\
.setOutputCol("sbert_embeddings")
# +
umls_resolver = SentenceEntityResolverModel.pretrained("sbiobertresolve_umls_major_concepts", "en", "clinical/models") \
.setInputCols(["ner_chunk", "sbert_embeddings"]) \
.setOutputCol("umls_code")\
.setDistanceFunction("EUCLIDEAN")
umls_pipelineModel = PipelineModel(
stages = [
documentAssembler,
sbert_embedder,
umls_resolver])
umls_lp = LightPipeline(umls_pipelineModel)
# -
umls_lp
text = 'type two diabetes mellitus'
umls_lp.annotate(text)
| platforms/emr/NLP_EMR_Setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # ANTsPy Tutorial
#
# In this tutorial, I will show of some of the core ANTsPy functionality. I will highlight the similarities with ANTsR.
# ## Basic IO, Processing, & Plotting
# + jupyter={"outputs_hidden": true}
import ants
import matplotlib.pyplot as plt
# %matplotlib inline
# + jupyter={"outputs_hidden": false}
img = ants.image_read( ants.get_ants_data('r16'), 'float' )
plt.imshow(img.numpy(), cmap='Greys_r')
plt.show()
# + jupyter={"outputs_hidden": false}
mask = ants.get_mask(img)
plt.imshow(mask.numpy())
plt.show()
# -
# # N4 Bias Correction
# + jupyter={"outputs_hidden": false}
img_n4 = ants.n4_bias_field_correction(img, shrink_factor=3)
plt.imshow(img_n4.numpy(), cmap='Greys_r')
plt.show()
# -
# ### Overloaded Mathematical Operators
# + jupyter={"outputs_hidden": false}
diff = img - img_n4
plt.imshow(diff.numpy())
plt.show()
# -
# # Atropos
#
# The following example has been validated with ANTsR. That is, both ANTsR and ANTsPy return the EXACT same result (images).
#
# R Version:
# ```R
# img <- antsImageRead( getANTsRData("r16") , 2 )
# img <- resampleImage( img, c(64,64), 1, 0 )
# mask <- getMask(img)
# segs1 <- atropos( a = img, m = '[0.2,1x1]',
# c = '[2,0]', i = 'kmeans[3]', x = mask )
# ```
# + jupyter={"outputs_hidden": false}
img = ants.image_read( ants.get_ants_data("r16") ).clone('float')
img = ants.resample_image( img, (64,64), 1, 0 )
mask = ants.get_mask(img)
segs1 = ants.atropos( a = img, m = '[0.2,1x1]',
c = '[2,0]', i = 'kmeans[3]', x = mask )
print(segs1)
# + jupyter={"outputs_hidden": false}
for i in range(3):
plt.imshow(segs1['probabilityimages'][i].numpy())
plt.title('Class %i' % i)
plt.show()
# + jupyter={"outputs_hidden": false}
plt.imshow(segs1['segmentation'].numpy())
plt.show()
# -
# # Registration
#
# R Version:
# ```R
# fi <- antsImageRead(getANTsRData("r16") )
# mi <- antsImageRead(getANTsRData("r64") )
# fi<-resampleImage(fi,c(60,60),1,0)
# mi<-resampleImage(mi,c(60,60),1,0) # speed up
# mytx <- antsRegistration(fixed=fi, moving=mi, typeofTransform = c('SyN') )
# ```
# + jupyter={"outputs_hidden": false}
fi = ants.image_read( ants.get_ants_data('r16') ).clone('float')
mi = ants.image_read( ants.get_ants_data('r64')).clone('float')
fi = ants.resample_image(fi,(60,60),1,0)
mi = ants.resample_image(mi,(60,60),1,0)
mytx = ants.registration(fixed=fi, moving=mi,
type_of_transform = 'SyN' )
print(mytx)
# + jupyter={"outputs_hidden": false}
plt.imshow(mi.numpy())
plt.title('Original moving image')
plt.show()
plt.imshow(fi.numpy())
plt.title('Original fixed image')
plt.show()
plt.imshow(mytx['warpedmovout'].numpy())
plt.title('Warped moving imag')
plt.show()
# -
# # SparseDecom2
#
# Another ANTsR-validated result:
#
# ```R
# mat<-replicate(100, rnorm(20))
# mat2<-replicate(100, rnorm(20))
# mat<-scale(mat)
# mat2<-scale(mat2)
# mydecom<-sparseDecom2(inmatrix = list(mat,mat2), sparseness=c(0.1,0.3), nvecs=3, its=3, perms=0)
# ```
# The 3 correlation values from that experiment are: [0.9762784, 0.9705170, 0.7937968]
#
# After saving those exact matrices, and running the ANTsPy version, we see that we get the exact same result
# + jupyter={"outputs_hidden": false}
import numpy as np
import pandas as pd
mat = pd.read_csv('~/desktop/mat.csv', index_col=0).values
mat2 = pd.read_csv('~/desktop/mat2.csv', index_col=0).values
mydecom = ants.sparseDecom2(inmatrix=(mat,mat2), sparseness=(0.1,0.3),
nvecs=3, its=3, perms=0)
print('Available Results: ', list(mydecom.keys()))
print('Correlations: ', mydecom['corrs'])
# + jupyter={"outputs_hidden": false}
# + jupyter={"outputs_hidden": true}
| Code/tut-ANTsPy Tutorial.ipynb |
# ---
# title: "Find Uniqueness in a column without null values"
# author: "Charles"
# date: 2020-08-15
# description: "-"
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('train.csv')
df.head()
unique_vals = df.nunique().reset_index() #Doesn't count null values by default
unique_vals.columns = ["Column Name", "Uniqueness"]
unique_vals.head()
| docs/python/pandas/pd-nunique.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.0 ('base')
# language: python
# name: python3
# ---
import torch
from torch import nn
from tqdm import tqdm
# # %load_ext nb_mypy
# %nb_mypy DebugOff
# +
def conv_output_size(w, k=3, p=2, s=1):
return ((w - k + 2*p) / s + 1)
def conv_block(img_size, n_in, nh, kernel_size, stride, padding, max_kernel=2):
modules = [
nn.Conv2d(n_in, nh, kernel_size, stride, padding),
nn.BatchNorm2d(nh),
nn.ReLU(),
nn.MaxPool2d(max_kernel)
]
output_size = conv_output_size(img_size, k=kernel_size, p=padding, s=stride)
output_size = conv_output_size(output_size, k=max_kernel, p=0, s=max_kernel)
return modules, int(output_size)
class CustomModel(nn.Module):
def __init__(self, n_in, n_out, nh, img_size, num_blocks, kernel_size=3, stride=1, padding=1):
super(CustomModel, self).__init__()
modules_list = []
for n in range(num_blocks):
modules, output_size = conv_block(img_size, n_in, nh, kernel_size, stride, padding)
modules_list.extend(modules)
img_size = output_size
n_in = nh
nh = 2*nh
self.conv = nn.Sequential(*modules_list)
output_size = output_size**2 * n_in
self.fc = nn.Sequential(
nn.Flatten(),
nn.Linear(output_size, n_out)
)
def forward(self, x):
x = self.conv(x)
out = self.fc(x)
return out
net = CustomModel(img.size(1), 6, 16, 124, 4)
# -
import matplotlib.pyplot as plt
def plot_img(img):
plt.imshow(img.permute(1, 2, 0))
# +
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
from torchvision import transforms
from PIL import Image
class CustomDataset(Dataset):
def __init__(self, data_path, transform=[]):
self.data_path = data_path
self.transform = transform
self.total_imgs = sorted([p for p in Path(data_path).rglob("*.jpg") if p.is_file()])
classes = [p.name for p in Path(data_path).glob("*")]
self.classes_to_label = dict(zip(classes, range(len(classes))))
def __len__(self):
return len(self.total_imgs)
def __getitem__(self, idx):
image = Image.open(self.total_imgs[idx])
if self.transform:
image = transforms.Compose(self.transform)(image)
label = self.total_imgs[idx].parent.name
class_label = self.classes_to_label[label]
return image, class_label
transform = [transforms.Resize((124, 124)), transforms.ToTensor()]
data_path = "../tmp_files/seg_train/seg_train/"
train_ds = CustomDataset(data_path=data_path, transform=transform)
# +
from sklearn.model_selection import train_test_split
val_split = 0.2
# train_indices, valid_indices = train_test_split(range(len(train_ds)), stratify=train_ds.)
train_ds[:20]
# -
def conv_output_size(w, k=3, p=2, s=1):
return ((w - k + 2*p) / s + 1)
conv_output_size(124,3,1,1)
conv_output_size(124, 2, 0, 2)
62*62*16
# conv_output_size(1984, 1, 2)
124*124*16
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
prime_factors(63504)
7*7*3
| examples/example_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# library for data manipulation
import pandas as pd
# library that allows Python to make http requests
import requests
from requests import get
# library that allows web scraping
from bs4 import BeautifulSoup
# library that allows searching google from Python
from googlesearch import search
# regular expression in Python
import re
# Read the csv dile in Python
df = pd.read_csv('ipl.csv')
# Get a list of all Bowlers in the csv file
bowlers = df['bowler'].unique()
# +
# list the stored data
l = []
# for every bowler
for bowler in bowlers:
# assign a query that adds the word "cricketer" after bowler name (To avoid confusing with popular people who share the same name)
query = bowler + " cricketer"
# get the first two google search results of every cricketer
for i in search(query, tld = "com", num = 10, stop = 2, pause = 1):
# only select sites that contain wikipedia on the domain name
if all(w in i for w in "https://en.wikipedia.org/wiki/") and not all(w in i for w in "gstatic.com"):
# add the wiki link of the bowler to the list
l.append([bowler,i])
# -
# No. of bowlers and their wiki links found
len(l)
# Make empty lists that store a players name and their respective wiki links
player = []
wiki = []
# store every bowlers name and wiki link in their respective lists
for i in range(len(l)):
player.append(l[i][0])
wiki.append(l[i][1])
# print to make sure bowler names match their wiki links
for n, m in zip(player, wiki):
print(n.ljust(30), m)
# function that scrapes a bowlers type of bowling and the hand they use
def scrape_bowler_playstyle(link):
# variables that will help store a bowler's play style and the hand they use
play_style = 0
hand = 0
# for the wiki link
response = get(link)
# parse the html page
page_html = BeautifulSoup(response.text, 'html.parser')
# use 'tr' tag in the html doc as a starting point to dive deeper into the tags
containers = page_html.find_all('tr')
# counters
i = -1
j = -1
for container in containers:
i += 1
# Finding play style in tag th
if container.th is not None:
if container.th.text == "Bowling":
# get the bowling style
if containers[i].td.a != None:
play_style = containers[i].td.a.text
else:
play_style = containers[i].td.text
# repeat the process above for hand
for container2 in containers:
j += 1
# Finding hand
if container2.th is not None:
if container2.th.text == "Batting":
if containers[j].td != None:
hand = containers[j].td.text
hand = hand[:5]
return play_style, hand
# +
# For each bowler, and their bowling style/hand store the results we found
play_style = []
hand = []
for link in wiki:
p, h = scrape_bowler_playstyle(link)
play_style.append(p)
hand.append(h)
# display final results
for k in range(len(player)):
name = player[k]
p = play_style[k]
h = hand[k]
print(name.ljust(30), p, '\t', h)
# -
| IPL problem 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Exercises 1
# # 1.
import numpy as np
import matplotlib.pyplot as plt
# Part a)
m1 = np.array([[3,6]])
m2 = np.array([[5],[2]])
mRes = np.dot(m1,m2)
print(aRes)
# Part b)
mRes = np.dot(m2,m1)
print(mRes)
# +
# Part c)
# Cannot caluculate [2,1] * [2,1]
# -
# Part d)
p1 = np.array([[1,2],[3,5]])
p2 = np.array([[4],[6]])
pRes = np.dot(p1,p2)
print(pRes)
# Part e)
q1 = np.array([[4,6]])
rRes = np.dot(q1, p1)
print(rRes)
# +
# Part f)
sTrans = np.transpose(p2)
sRes1 = np.dot(sTrans, p1)
sRes = np.dot(sRes1, p2)
# or
testing = np.dot(np.dot(sTrans,p1), p2)
print(testing)
# testing = sRes
# -
# # 2
# a)
A = np.array([[1,0],[0,2],[3,0]])
B = np.array([[0,4],[0,5],[6,0]])
print(A)
print(B)
# Not possible to calculate
# Part b)
print(A + B)
| Mathematics - Cognitive Systems-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn import *
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
# %pylab
# -
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns #for graphics and figure styling
import pandas as pd
from matplotlib.colors import ListedColormap
data = pd.read_csv('E:/Stony Brook/AMS560/Data/FlightDelay2018.csv')
data=data[1:50000]
data.DepDelayMinutes.fillna(1)
data.DepDelayMinutes[data.DepDelayMinutes!=0]=1
# +
from collections import defaultdict
a=0
b=0
missing=defaultdict(int)
for col in data:
for i in data[col].isnull():
if i:
a+=1
b+=1
#print('Missing data in',col,'is',a/b*100,'%')
missing[col]=a/b*100
a=0
b=0
missing['Year']
for col in data:
if missing[col]>5:
data=data.drop(col, axis=1)
# -
data=data.drop('TailNum',axis=1)
enc = LabelEncoder()
data = data.apply(LabelEncoder().fit_transform)
# +
depDelayColumn = data.DepDelayMinutes
data = data.drop('DepDelayMinutes', axis=1)
data = data.drop('DepDelay', axis=1)
data = data.drop(['CRSDepTime','DepTime','DepartureDelayGroups'], axis=1)
# +
data_train, data_test, y_train, y_test = train_test_split(data, depDelayColumn, test_size=.3)
scaler = StandardScaler().fit(data)
standard_data_test = scaler.transform(data_test)
scaler = StandardScaler().fit(data_train)
standard_data = scaler.transform(data_train)
# -
#Using the Random Forest Classifier on our Data, with depth 3.
depth=3;
n_features=5;
censusIDM = RandomForestClassifier(max_depth=depth, random_state=0)
frfe = RFE(censusIDM, n_features_to_select=n_features)
frfe.fit(data_train, y_train)
print(frfe.ranking_)
frfe.score(data_test, y_test)
feature_to_select=[0]*n_features
j=0
for i in range(len(frfe.ranking_)):
if frfe.ranking_[i]==1:
feature_to_select[j]=i
j=j+1
print(feature_to_select)
data.columns[36]
# +
# Parameters
n_classes = 2
n_estimators = 30
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
fig=plt.figure(figsize=[15,5])
plt.subplot(1,3, 1)
f1=[36,28]
f2=[5,36]
f3=[5,28]
#X=standardized_test_data[:,[0,4]];
X=standard_data[:,f1];
y=y_train
frfe.fit(X, y)
print(frfe.score(standard_data_test[:,f1], y_test))
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = frfe.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
xx_coarser, yy_coarser = np.meshgrid(
np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = frfe.predict(np.c_[xx_coarser.ravel(),yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15,c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
plt.scatter(X[:, 0], X[:, 1], c=y,
cmap=ListedColormap(['r', 'y', 'b']),
edgecolor='k', s=20)
xlabel('ArrDelay')
ylabel('DepDel15')
plt.subplot(1,3,2)
X=standard_data[:,f2];
frfe.fit(X, y)
print(frfe.score(standard_data_test[:,f2], y_test))
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = frfe.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
xx_coarser, yy_coarser = np.meshgrid(
np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = frfe.predict(np.c_[xx_coarser.ravel(),yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15,c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
plt.scatter(X[:, 0], X[:, 1], c=y,
cmap=ListedColormap(['r', 'y', 'b']),
edgecolor='k', s=20)
xlabel('DayOfWeek')
ylabel('ArrDelay')
plt.subplot(1,3,3)
X=standard_data[:,f3];
frfe.fit(X, y)
print(frfe.score(standard_data_test[:,f3], y_test))
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = frfe.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
xx_coarser, yy_coarser = np.meshgrid(
np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = frfe.predict(np.c_[xx_coarser.ravel(),yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15,c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
plt.scatter(X[:, 0], X[:, 1], c=y,
cmap=ListedColormap(['r', 'y', 'b']),
edgecolor='k', s=20)
xlabel('DayOfWeek')
ylabel('DepDel15')
plt.suptitle('RandomForestTree model on feature subsets ');
#fig.savefig('RandomForest.pdf',dpi=200)
# -
| ensemble_flight.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Educat8n/Reinforcement-Learning-for-Game-Playing-and-More/blob/main/Module3/Module_3.3_Application_of_RL_in_Finance_TensorTrader_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="tp41W1qgU_V5"
# # Install TensorTrade
#
# Source: https://github.com/tensortrade-org/tensortrade Examples
# + colab={"base_uri": "https://localhost:8080/"} id="23-OzS7oUxZU" outputId="8c92e4f8-0f78-4f66-cf0d-b0aba8f9e784"
# !python3 -m pip install git+https://github.com/tensortrade-org/tensortrade.git
# + [markdown] id="uXNEGHfFVIYl"
# # Setup Data Fetching
# + id="8_7OoInSU7UG"
import pandas as pd
import tensortrade.env.default as default
from tensortrade.data.cdd import CryptoDataDownload
from tensortrade.feed.core import Stream, DataFeed
from tensortrade.oms.exchanges import Exchange
from tensortrade.oms.services.execution.simulated import execute_order
from tensortrade.oms.instruments import USD, BTC, ETH
from tensortrade.oms.wallets import Wallet, Portfolio
from tensortrade.agents import DQNAgent
# %matplotlib inline
# + id="5CH-MXm-VQ5-"
cdd = CryptoDataDownload()
data = cdd.fetch("Bitstamp", "USD", "BTC", "1h")
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="XSQcFFyiVSB_" outputId="afb4ad09-3cb2-43b3-dd43-6eec3843a8be"
data.head()
# + [markdown] id="XlEZPzG4VeR6"
# # Create features with the feed module
# + id="inRWMo3rVWdR"
def rsi(price: Stream[float], period: float) -> Stream[float]:
r = price.diff()
upside = r.clamp_min(0).abs()
downside = r.clamp_max(0).abs()
rs = upside.ewm(alpha=1 / period).mean() / downside.ewm(alpha=1 / period).mean()
return 100*(1 - (1 + rs) ** -1)
def macd(price: Stream[float], fast: float, slow: float, signal: float) -> Stream[float]:
fm = price.ewm(span=fast, adjust=False).mean()
sm = price.ewm(span=slow, adjust=False).mean()
md = fm - sm
signal = md - md.ewm(span=signal, adjust=False).mean()
return signal
features = []
for c in data.columns[1:]:
s = Stream.source(list(data[c]), dtype="float").rename(data[c].name)
features += [s]
cp = Stream.select(features, lambda s: s.name == "close")
features = [
cp.log().diff().rename("lr"),
rsi(cp, period=20).rename("rsi"),
macd(cp, fast=10, slow=50, signal=5).rename("macd")
]
feed = DataFeed(features)
feed.compile()
# + colab={"base_uri": "https://localhost:8080/"} id="ffE697AHVuz8" outputId="f4c72837-25e5-4773-a52f-578a48a925a8"
for i in range(5):
print(feed.next())
# + [markdown] id="4BcW4Nm3Vy-z"
# # Setup Trading Environment
# + id="6p-ZS1GWVxwh"
bitstamp = Exchange("bitstamp", service=execute_order)(
Stream.source(list(data["close"]), dtype="float").rename("USD-BTC")
)
portfolio = Portfolio(USD, [
Wallet(bitstamp, 10000 * USD),
Wallet(bitstamp, 10 * BTC)
])
renderer_feed = DataFeed([
Stream.source(list(data["date"])).rename("date"),
Stream.source(list(data["open"]), dtype="float").rename("open"),
Stream.source(list(data["high"]), dtype="float").rename("high"),
Stream.source(list(data["low"]), dtype="float").rename("low"),
Stream.source(list(data["close"]), dtype="float").rename("close"),
Stream.source(list(data["volume"]), dtype="float").rename("volume")
])
env = default.create(
portfolio=portfolio,
action_scheme="managed-risk",
reward_scheme="risk-adjusted",
feed=feed,
renderer_feed=renderer_feed,
renderer=default.renderers.PlotlyTradingChart(),
window_size=20
)
# + colab={"base_uri": "https://localhost:8080/"} id="iaCedLa3V6Ls" outputId="d251d87a-5d91-413f-d7f8-8591ee2b45b7"
env.observer.feed.next()
# + [markdown] id="g-nKpIoPWFgB"
# # Setup and Train DQN Agent
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["ed4f5a857fd34e6390b93347d894fc14"]} id="bbXrUQPHV9aU" outputId="8864e263-cf97-41f4-b864-8da6cd2ac357"
agent = DQNAgent(env)
agent.train(n_steps=200, n_episodes=2, save_path="agents/")
# + id="ikqb7EFTWHSt"
| Module3/Module_3.3_Application_of_RL_in_Finance_TensorTrader_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import Sequential
from keras.layers import *
from keras.preprocessing import image
from keras.datasets import fashion_mnist
from tensorflow.keras.utils import to_categorical
(xtrain,ytrain),(xtest,ytest)= tf.keras.datasets.fashion_mnist.load_data()
x_train= xtrain/255.0
x_test= xtest/255.0
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
y_train= to_categorical(ytrain, 10)
y_test= to_categorical(ytest, 10)
print(x_train.shape)
print(y_train.shape)
# +
model=Sequential()
model.add(Conv2D(32,kernel_size=(3,3),activation='relu',padding='same',input_shape=(28,28,1)))
model.add(Conv2D(32,kernel_size=(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.15))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=(3,3),activation='relu',padding='same'))
model.add(Conv2D(64,kernel_size=(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.15))
model.add(BatchNormalization())
model.add(GaussianNoise(0.25))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(GaussianNoise(0.25))
model.add(Dense(10,activation='softmax'))
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
# -
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
mp= ModelCheckpoint('mymodel.hdf5',save_best_only=True)
es= EarlyStopping(monitor='val_loss',patience=5)
callbacks= [mp,es]
history= model.fit(
x_train,
y_train,
steps_per_epoch=1875,
epochs=10,
batch_size=32,
validation_data= (x_test,y_test),
callbacks=callbacks
)
model.evaluate(x_train,y_train)
model.evaluate(x_test,y_test)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('accuracy plot')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(['train','validation'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('loss plot')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(['train','validation'])
ypred= model.predict(x_test)
pred=np.argmax(ypred[0])
truth=np.argmax(y_test[0])
if pred==truth:
print("the predicted class is",pred)
print("the true class is",truth)
print("Hence the model is working properly")
else:
print("the predicted class is",pred)
print("the true class is",truth)
print("there is an error in the model")
print('PROJECT TESTING')
from keras.preprocessing import image
img = image.load_img('dress.jpeg',color_mode = "grayscale",target_size=(28,28))
imag = image.img_to_array(img)
imag = imag.reshape(1, 28, 28, 1)
image = imag.astype('float32')
image = image / 255.0
ypred = model.predict(image)
output=np.argmax(ypred)
if output==0:
y='T-shirt/top'
elif output==1:
y='Trouser'
elif output==2:
y='Pullover'
elif output==3:
y='Dress'
elif output==4:
y='Coat'
elif output==5:
y='Sandal'
elif output==6:
y='Shirt'
elif output==7:
y='Sneaker'
elif output==8:
y='Bag'
elif output==9:
y='Ankle Boot'
def plot_img(img):
plt.figure(figsize=(6,6))
plt.imshow(img)
plt.title('')
plt.axis('off')
plot_img(img)
print('The uploaded image is classified as:'+str(y))
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quantum Feature Spaces and Kernels
#
# <div class="youtube-wrapper">
# <iframe src="https://www.youtube.com/embed/zw3JYUrS-v8" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# </div>
#
#
# In this lecture, Kristan covers three topics. Firstly, the theory behind feature maps, feature spaces, and kernels is introduced. This is then expanded into the idea of a quantum feature space, accompanied by examples. Secondly, Kristan introduces the circuit for the quantum kernel estimation (QKE). Next, Kristan discusses near-term applications, including a specific algorithm that uses QKE, i.e. a classification algorithm. And thirdly, Kristan discusses the choice of circuit for the unitary feature map, $U(X)$. Constrains on entries to the kernel are considered, and comparisons between QKE and classical kernels are made.
#
# ### Suggested links
#
# - Download the lecturer's notes [here](/content/summer-school/2021/resources/lecture-notes/Lecture6.2.pdf)
# - Read about [Supervised learning with quantum enhanced feature spaces](https://arxiv.org/abs/1804.11326)
# - Watch Kristan Temme on [Supervised Learning with Quantum Enhanced Feature Spaces](https://www.youtube.com/watch?v=rzSYSsTllVE)
# - Read about [Quantum machine learning in feature Hilbert spaces](https://arxiv.org/abs/1803.07128)
# - Read about [A rigorous and robust quantum speed-up in supervised machine learning](https://arxiv.org/abs/2010.02174)
#
# <!-- ::: q-block.reminder -->
#
# ### FAQ
#
# <details>
# <summary>What is a kernel?</summary>
# Given a set of data, a kernel is a distance measure between attribute vectors taken from the data. It tells us how similar any two attribute vectors are. When given a feature map from the space of attributes to a higher dimensional space, the kernel is just the inner product in that higher dimensional Euclidean space between the two feature vectors.
# </details>
#
# <details>
# <summary>Why is RBF infinite dimensional? Doesn’t it output a scalar?</summary>
# The RBF is infinite dimensional since the number of basis functions needed to construct the kernel will be infinite.See https://www.youtube.com/watch?v=XUj5JbQihlU&t=1553s for a more detailed explanation.
# </details>
#
# <details>
# <summary>What is the sign function?</summary>
# The sign function is a non-linear function that return the sign of a real number, i.e. +1 or -1.
# </details>
#
# <details>
# <summary>What is the Hilbert–Schmidt inner product?</summary>
# The Hilbert-Schmidt (HS) inner product is the inner product between two matrices within the vector space of matrices. It is also known as the trace inner product. The HS inner product for matrices A and B is given by tr[A^{dagger}B].
# </details>
#
# <details>
# <summary>What does QKE stand for?</summary>
# QKE stands for quantum kernel estimation.
# </details>
#
# <details>
# <summary>What does IQP (circuit) stand for?</summary>
# IQP stands for instantaneous quantum polynomial circuit.
#
# See https://strawberryfields.ai/photonics/demos/run_iqp.html for a more detailed explanation.
# </details>
#
# <!-- ::: -->
#
# ### Other resources
#
# - Read <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME> on [Supervised learning with quantum-enhanced feature spaces](https://www.nature.com/articles/s41586-019-0980-2)
# - Read <NAME> and <NAME> on [Quantum Machine Learning in Feature Hilbert Spaces](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.122.040504)
#
#
| notebooks/summer-school/2021/lec6.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import qiskit as q
from qiskit.visualization import plot_bloch_multivector
from qiskit.visualization import plot_histogram
# %matplotlib inline
# +
statevector_simulator = q.Aer.get_backend("statevector_simulator")
qasm_sim = q.Aer.get_backend("qasm_simulator")
def do_job(circuit):
result = q.execute(circuit, backend=statevector_simulator).result()
state_vec = result.get_statevector()
n_qubits = circuit.n_qubits
# The first parameter is the quantum computer running it, the second is the classical computer running it
circuit.measure([i for i in range(n_qubits)], [i for i in range(len(circuit.clbits))])
qasm_job = q.execute(circuit, backend=qasm_sim, shots=1024).result()
counts = qasm_job.get_counts()
return state_vec, counts
# -
circuit = q.QuantumCircuit(2, 2) # 2 qubits, 2 normal bits
state_vec, counts = do_job(circuit)
plot_bloch_multivector(state_vec)
plot_histogram([counts], legend=["output"])
circuit = q.QuantumCircuit(3, 3) # 2 qubits, 2 normal bits
circuit.h(0)
circuit.x(1)
circuit.cx(0, 2)
state_vec, counts = do_job(circuit)
plot_bloch_multivector(state_vec)
plot_histogram([counts], legend=['output'])
circuit = q.QuantumCircuit(3, 3) # 3 qubits, 3 normal bits
circuit.h(0)
circuit.h(1)
circuit.ccx(0, 1, 2)
circuit.draw()
state_vec, counts = do_job(circuit)
plot_bloch_multivector(state_vec)
plot_histogram([counts], legend=['output'])
circuit = q.QuantumCircuit(3, 1) # 3 qubits, 3 normal bits
circuit.h(0)
circuit.h(1)
circuit.ccx(0, 1, 2)
circuit.measure([2], [0])
circuit.draw()
qasm_job = q.execute(circuit, backend=qasm_sim, shots=1024).result()
counts = qasm_job.get_counts()
plot_histogram([counts], legend=['output'])
import math
circuit = q.QuantumCircuit(3, 3)
circuit.h(0) # hadamart
circuit.h(1)
circuit.rx(math.pi / 4, 2)
# circuit.x(2)
state_vec, counts = do_job(circuit)
plot_bloch_multivector(state_vec)
plot_histogram([counts], legend=['output'])
circuit = q.QuantumCircuit(3, 1)
circuit.h(0) # hadamart
circuit.h(1)
circuit.rx(math.pi / 4, 2)
circuit.measure([2], [0])
circuit.draw()
qasm_job = q.execute(circuit, backend=qasm_sim, shots=1024).result()
counts = qasm_job.get_counts()
plot_histogram([counts], legend=['output'])
| qiskit-3.ipynb |